Per +Milan Zamazal <mzamazal(a)redhat.com> comment, adding +devel
<devel(a)ovirt.org>
On Wed, Jul 24, 2019 at 11:32 AM Michal Skrivanek <
michal.skrivanek(a)redhat.com> wrote:
On 24 Jul 2019, at 10:24, Amit Bawer <abawer(a)redhat.com> wrote:
Thanks, applied the fixed patch.
No I am punished for choosing not to work with SSL/TLS in Vdsm when trying
to "Run" VM.
- Any known workaround for this?
That’s part of the ongoing fixes, please don’t discuss this privately,
this belongs to devel@ list.
Many people are struggling with the same issues while they’re working on
their areas, and we need complete visibility
24 04:04:54,610-0400 INFO (vm/01de706d) [virt.vm]
(vmId='01de706d-ee4c-484f-a17f-6b3355adf047') <?xml version='1.0'
encoding='utf-8'?>
<domain
xmlns:ns0="http://ovirt.org/vm/tune/1.0" xmlns:ovirt-vm="
http://ovirt.org/vm/1.0" type="kvm">
<name>vm1</name>
<uuid>01de706d-ee4c-484f-a17f-6b3355adf047</uuid>
<memory>1048576</memory>
<currentMemory>1048576</currentMemory>
<iothreads>1</iothreads>
<maxMemory slots="16">4194304</maxMemory>
<vcpu current="1">16</vcpu>
<sysinfo type="smbios">
<system>
<entry name="manufacturer">oVirt</entry>
<entry name="product">RHEL</entry>
<entry name="version">8.0-0.44.el8</entry>
<entry
name="serial">e5825ba8-473e-4821-829a-bc6dbbe79617</entry>
<entry
name="uuid">01de706d-ee4c-484f-a17f-6b3355adf047</entry>
</system>
</sysinfo>
<clock adjustment="0" offset="variable">
<timer name="rtc" tickpolicy="catchup" />
<timer name="pit" tickpolicy="delay" />
<timer name="hpet" present="no" />
</clock>
<features>
<acpi />
</features>
<cpu match="exact">
<model>SandyBridge</model>
<topology cores="1" sockets="16" threads="1"
/>
<numa>
<cell cpus="0-15" id="0" memory="1048576"
/>
</numa>
</cpu>
<cputune />
<devices>
<input bus="usb" type="tablet" />
<channel type="unix">
<target name="ovirt-guest-agent.0" type="virtio"
/>
<source mode="bind"
path="/var/lib/libvirt/qemu/channels/01de706d-ee4c-484f-a17f-6b3355adf047.ovirt-guest-agent.0"
/>
</channel>
<channel type="unix">
<target name="org.qemu.guest_agent.0" type="virtio"
/>
<source mode="bind"
path="/var/lib/libvirt/qemu/channels/01de706d-ee4c-484f-a17f-6b3355adf047.org.qemu.guest_agent.0"
/>
</channel>
<rng model="virtio">
<backend model="random">/dev/urandom</backend>
<alias name="ua-117e932d-e465-40d6-9005-bf7d7eb22023" />
</rng>
<graphics autoport="yes" passwd="*****"
passwdValidTo="1970-01-01T00:00:01" port="-1" tlsPort="-1"
type="spice">
<channel mode="secure" name="main" />
<channel mode="secure" name="inputs" />
<channel mode="secure" name="cursor" />
<channel mode="secure" name="playback" />
<channel mode="secure" name="record" />
<channel mode="secure" name="display" />
<channel mode="secure" name="smartcard" />
<channel mode="secure" name="usbredir" />
<listen network="vdsm-ovirtmgmt" type="network" />
</graphics>
<controller index="0" model="piix3-uhci"
type="usb" />
<controller index="0" model="virtio-scsi"
type="scsi">
<driver iothread="1" />
<alias name="ua-73437643-3c01-4763-b8ef-c4275527606b" />
</controller>
<graphics autoport="yes" keymap="en-us"
passwd="*****"
passwdValidTo="1970-01-01T00:00:01" port="-1"
type="vnc">
<listen network="vdsm-ovirtmgmt" type="network" />
</graphics>
<memballoon model="virtio">
<stats period="5" />
<alias name="ua-93d69b50-e010-4879-80db-713ee4a11727" />
</memballoon>
<controller index="0" ports="16"
type="virtio-serial">
<alias name="ua-cd3edc3f-25c5-4fef-b555-cedf45c86adc" />
</controller>
<video>
<model heads="1" ram="65536" type="qxl"
vgamem="16384"
vram="8192" />
<alias name="ua-d660095f-dfe6-4f2c-aa76-c3525fb6244e" />
</video>
<channel type="spicevmc">
<target name="com.redhat.spice.0" type="virtio" />
</channel>
<disk device="cdrom" snapshot="no"
type="file">
<driver error_policy="report" name="qemu"
type="raw" />
<source file="" startupPolicy="optional">
<seclabel model="dac" relabel="no"
type="none" />
</source>
<target bus="ide" dev="hdc" />
<readonly />
<alias name="ua-917aea8a-bc4b-4d15-8580-a0c442e925ee" />
</disk>
<disk device="disk" snapshot="no"
type="file">
<target bus="scsi" dev="sda" />
<source file="/rhev/data-center/mnt/10.35.1.6:
_exports_data/f49de997-9fb7-4ef8-82bd-f5b97ba31fb0/images/7dee6442-1838-48dd-892a-86fb96a85737/da070fc0-4af5-406e-bf2b-2cf4d89eb276">
<seclabel model="dac" relabel="no"
type="none" />
</source>
<driver cache="none" error_policy="stop"
io="threads"
name="qemu" type="raw" />
<alias name="ua-7dee6442-1838-48dd-892a-86fb96a85737" />
<address bus="0" controller="0" target="0"
type="drive"
unit="0" />
<boot order="1" />
<serial>7dee6442-1838-48dd-892a-86fb96a85737</serial>
</disk>
</devices>
<pm>
<suspend-to-disk enabled="no" />
<suspend-to-mem enabled="no" />
</pm>
<os>
<type arch="x86_64"
machine="pc-i440fx-rhel7.6.0">hvm</type>
<smbios mode="sysinfo" />
</os>
<metadata>
<ns0:qos />
<ovirt-vm:vm>
<ovirt-vm:minGuaranteedMemoryMb
type="int">1024</ovirt-vm:minGuaranteedMemoryMb>
<ovirt-vm:clusterVersion>4.4</ovirt-vm:clusterVersion>
<ovirt-vm:custom />
<ovirt-vm:device devtype="disk" name="sda">
<ovirt-vm:poolID>dab8cf3a-a969-11e9-84eb-080027624b78</ovirt-vm:poolID>
<ovirt-vm:volumeID>da070fc0-4af5-406e-bf2b-2cf4d89eb276</ovirt-vm:volumeID>
<ovirt-vm:imageID>7dee6442-1838-48dd-892a-86fb96a85737</ovirt-vm:imageID>
<ovirt-vm:domainID>f49de997-9fb7-4ef8-82bd-f5b97ba31fb0</ovirt-vm:domainID>
</ovirt-vm:device>
<ovirt-vm:launchPaused>false</ovirt-vm:launchPaused>
<ovirt-vm:resumeBehavior>auto_resume</ovirt-vm:resumeBehavior>
</ovirt-vm:vm>
</metadata>
</domain>
(vm:2570)
2019-07-24 04:04:55,348-0400 ERROR (vm/01de706d) [virt.vm]
(vmId='01de706d-ee4c-484f-a17f-6b3355adf047') The vm start process failed
(vm:841)
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/vdsm/virt/vm.py", line 775, in
_startUnderlyingVm
self._run()
File "/usr/lib/python3.6/site-packages/vdsm/virt/vm.py", line 2575, in
_run
dom.createWithFlags(flags)
File
"/usr/lib/python3.6/site-packages/vdsm/common/libvirtconnection.py", line
131, in wrapper
ret = f(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/vdsm/common/function.py", line
94, in wrapper
return func(inst, *args, **kwargs)
File "/usr/lib64/python3.6/site-packages/libvirt.py", line 1110, in
createWithFlags
if ret == -1: raise libvirtError ('virDomainCreateWithFlags() failed',
dom=self)
libvirt.libvirtError: unsupported configuration: Auto allocation of spice
TLS port requested but spice TLS is disabled in qemu.conf
2019-07-24 04:04:55,348-0400 INFO (vm/01de706d) [virt.vm]
(vmId='01de706d-ee4c-484f-a17f-6b3355adf047') Changed state to Down:
unsupported configuration: Auto allocation of spice TLS port requested but
spice TLS is disabled in qemu.conf (code=1) (vm:1595)
2019-07-24 04:04:55,525-0400 INFO (vm/01de706d) [virt.vm]
(vmId='01de706d-ee4c-484f-a17f-6b3355adf047') Stopping connection
(guestagent:455)
2019-07-24 04:04:55,525-0400 DEBUG (vm/01de706d) [jsonrpc.Notification]
Sending event {"jsonrpc": "2.0", "method":
"|virt|VM_status|01de706d-ee4c-484f-a17f-6b3355adf047", "params":
{"01de706d-ee4c-484f-a17f-6b3355adf047": {"status": "Down",
"vmId":
"01de706d-ee4c-484f-a17f-6b3355adf047", "exitCode": 1,
"exitMessage":
"unsupported configuration: Auto allocation of spice TLS port requested but
spice TLS is disabled in qemu.conf", "exitReason": 1},
"notify_time":
4440824940}} (__init__:181)
On Tue, Jul 23, 2019 at 6:14 PM Milan Zamazal <mzamazal(a)redhat.com> wrote:
> Amit Bawer <abawer(a)redhat.com> writes:
>
> > Ok, applied the given patch. Now I have following error trace in
> vdsm.log
> > when trying to "Run" the VM:
> >
> > 2019-07-23 09:57:55,167-0400 ERROR (vm/01de706d) [virt.vm]
> > (vmId='01de706d-ee4c-484f-a17f-6b3355adf047') Failed to setup device
> spice
> > (vm:2344)
> > Traceback (most recent call last):
> > File "/usr/lib/python3.6/site-packages/vdsm/virt/vm.py", line 2341,
in
> > _setup_devices
> > dev_object.setup()
> > File
> "/usr/lib/python3.6/site-packages/vdsm/virt/vmdevices/graphics.py",
> > line 95, in setup
> > displaynetwork.create_network(display_network, self.vmid)
> > File
"/usr/lib/python3.6/site-packages/vdsm/virt/displaynetwork.py",
> line
> > 29, in create_network
> > libvirtnetwork.create_network(netname, display_device,
> user_reference)
> > File
"/usr/lib/python3.6/site-packages/vdsm/virt/libvirtnetwork.py",
> line
> > 91, in create_network
> > _createNetwork(createNetworkDef(netname, bridged, iface))
> > File
"/usr/lib/python3.6/site-packages/vdsm/virt/libvirtnetwork.py",
> line
> > 108, in _createNetwork
> > net = conn.networkDefineXML(netXml)
> > File
> "/usr/lib/python3.6/site-packages/vdsm/common/libvirtconnection.py",
> > line 131, in wrapper
> > ret = f(*args, **kwargs)
> > File "/usr/lib/python3.6/site-packages/vdsm/common/function.py",
line
> 94,
> > in wrapper
> > return func(inst, *args, **kwargs)
> > File "/usr/lib64/python3.6/site-packages/libvirt.py", line 4235, in
> > networkDefineXML
> > ret = libvirtmod.virNetworkDefineXML(self._o, xml)
> > TypeError: virNetworkDefineXML() argument 2 must be str or None, not
> bytes
>
> I believe
https://gerrit.ovirt.org/102136 (untested) should fix it.
>
> > On Tue, Jul 23, 2019 at 3:56 PM Milan Zamazal <mzamazal(a)redhat.com>
> wrote:
> >
> >> Amit Bawer <abawer(a)redhat.com> writes:
> >>
> >> > Hi Milan,
> >> >
> >> > I am trying to run VM on a RHEL8 host attached NFS domain.
> >> > I get the following error in vdsm.log
> >> > Do you happen to know what it means?
> >>
> >> Probably yes, does
https://gerrit.ovirt.org/102127 (untested) fix it?
> >>
> >> > Thanks,
> >> > Amit
> >> >
> >> > Traceback (most recent call last):
> >> > File "/usr/lib/python3.6/site-packages/vdsm/common/api.py",
line
> 124,
> >> in
> >> > method
> >> > ret = func(*args, **kwargs)
> >> > File "/usr/lib/python3.6/site-packages/vdsm/API.py", line
294, in
> >> destroy
> >> > res = self.vm.destroy(gracefulAttempts)
> >> > File "/usr/lib/python3.6/site-packages/vdsm/virt/vm.py",
line
> 4916, in
> >> > destroy
> >> > result = self.doDestroy(gracefulAttempts, reason)
> >> > File "/usr/lib/python3.6/site-packages/vdsm/virt/vm.py",
line
> 4935, in
> >> > doDestroy
> >> > return self.releaseVm(gracefulAttempts)
> >> > File "/usr/lib/python3.6/site-packages/vdsm/virt/vm.py",
line
> 4828, in
> >> > releaseVm
> >> > self.guestAgent.stop()
> >> > File
"/usr/lib/python3.6/site-packages/vdsm/virt/guestagent.py",
> line
> >> > 458, in stop
> >> > self._channelListener.unregister(self._sock.fileno())
> >> > File
"/usr/lib/python3.6/site-packages/vdsm/virt/vmchannels.py",
> line
> >> > 257, in unregister
> >> > self._unregister_fd(fileno)
> >> > File
"/usr/lib/python3.6/site-packages/vdsm/virt/vmchannels.py",
> line
> >> 67,
> >> > in _unregister_fd
> >> > self._epoll.unregister(fileno)
> >> > ValueError: file descriptor cannot be a negative integer (-1)
> >> >
> >> > On Thu, Jul 18, 2019 at 8:35 AM Milan Zamazal
<mzamazal(a)redhat.com>
> >> wrote:
> >> >
> >> >> Amit Bawer <abawer(a)redhat.com> writes:
> >> >>
> >> >> > 1. Had couple of small patches pushed to darkthrone py3_poc
branch
> >> >> >
> >> >> > 2. Have a sequel document to Milan's installation guide
abuts
> SSL-less
> >> >> vdsm
> >> >> > and engine:
> >> >> >
> >> >>
> >>
>
https://drive.google.com/open?id=1jSt-LXSY8S6J8eTjt_-JU3Ph4ZNNnBoZBaJBWe8...
> >> >> >
> >> >> > 3. Milan, you might want to update the BREW installation
section -
> >> vdsm
> >> >> > install wants a more up-to-date ioprocess rpms,
> >> >> > I updated it in the guide copy here:
> >> >> >
> >> >>
> >>
>
https://drive.google.com/open?id=1jSt-LXSY8S6J8eTjt_-JU3Ph4ZNNnBoZBaJBWe8...
> >> >> > It now has:
> >> >> >
> >>
> $BREW/ioprocess/1.2.1/1.el8ev/x86_64/ioprocess-1.2.1-1.el8ev.x86_64.rpm \
> >> >> >
> >> >>
> >>
> $BREW/ioprocess/1.2.1/1.el8ev/x86_64/python3-ioprocess-1.2.1-1.el8ev.x86_64.rpm
> >> >>
> >> >> It should be already in the updated document I sent to rhev-devel@
> >> >> yesterday.
> >> >>
> >> >> > 4. Latest error when try to register host at
engine:"Host
> 10.35.0.136
> >> is
> >> >> > installed with VDSM version (4.40) and cannot join cluster
Default
> >> which
> >> >> is
> >> >> > compatible with VDSM versions [4.17, 4.18]." - Can we
fake the
> version
> >> >> > somehow?
> >> >>
> >> >> This has been fixed in Engine, both master and 4.3, some time ago.
> >> >> Please upgrade to current Engine version.
> >> >>
> >> >> > On Wed, Jul 17, 2019 at 4:38 PM Marcin Sobczyk <
> msobczyk(a)redhat.com>
> >> >> wrote:
> >> >> >
> >> >> >>
> >> >> >> On 7/17/19 3:27 PM, Marcin Sobczyk wrote:
> >> >> >> >
> >> >> >> > On 7/17/19 3:04 PM, Milan Zamazal wrote:
> >> >> >> >> Marcin Sobczyk <msobczyk(a)redhat.com>
writes:
> >> >> >> >>
> >> >> >> >>> On 7/16/19 5:11 PM, Marcin Sobczyk wrote:
> >> >> >> >>>> On 7/16/19 4:47 PM, Milan Zamazal wrote:
> >> >> >> >>>>> Amit Bawer <abawer(a)redhat.com>
writes:
> >> >> >> >>>>>
> >> >> >> >>>>>> +Marcin Sobczyk
<msobczyk(a)redhat.com>
> >> >> >> >>>>>>
> >> >> >> >>>>>> On Tue, Jul 16, 2019 at 5:23 PM
Amit Bawer <
> abawer(a)redhat.com
> >> >
> >> >> >> >>>>>> wrote:
> >> >> >> >>>>>>
> >> >> >> >>>>>>> I believe i tried all the
variations after vdsm.id
> creation.
> >> >> >> >>>>>>> engine.log shows following
error for ssh attempts to
> overlord
> >> >> >> >>>>>>> the host
> >> >> >> >>>>>>> from the engine:
> >> >> >> >>>>>>>
> >> >> >> >>>>>>> SS_ERROR(511), An error has
occurred during installation
> of
> >> Host
> >> >> >> >>>>>>> rhel8:
> >> >> >> >>>>>>> Failed to execute stage
'Environment customization': No
> >> module
> >> >> >> >>>>>>> named
> >> >> >> >>>>>>> 'rpmUtils'.
> >> >> >> >>>>>>>
> >> >> >> >>>>>>> Could this be the reason?
> >> >> >> >>>>> I don't think Engine can manage
the host, this is why we
> have
> >> to
> >> >> >> >>>>> bother
> >> >> >> >>>>> with the manual setup. rpmUtils is
no longer available on
> >> RHEL 8,
> >> >> >> >>>>> there
> >> >> >> >>>>> was a patch to ovirt-host-deploy (
> >>
https://gerrit.ovirt.org/101293
> >> >> )
> >> >> >> >>>>> but
> >> >> >> >>>>> there are probably more places to
fix.
> >> >> >> >>>>>
> >> >> >> >>>>> I can't remember what I did
exactly regarding vdsm.id. I
> can
> >> >> >> suggest
> >> >> >> >>>>> only trying switching to maintenance
and activate again
> after
> >> the
> >> >> >> >>>>> failed
> >> >> >> >>>>> installation.
> >> >> >> >>>>>
> >> >> >> >>>>> Marcin, do you have any better
advice?
> >> >> >> >>>> Nope, unfortunately. Since the host
deployment process
> fails in
> >> >> >> >>>> various interesting ways, my methodology
also relies on the
> >> >> >> >>>> "maintenance --> activate"
trick.
> >> >> >> >>>> I also never generated and
'vdsm.id' by myself.
> >> >> >> >>> Oh, regarding 'vdsm.id'. This might
help:
> >> >> >> >>>
> >> >> >> >>> diff --git a/lib/vdsm/host/__init__.py
> >> b/lib/vdsm/host/__init__.py
> >> >> >> >>> index 334c88d1f..2aee4ea77 100644
> >> >> >> >>> --- a/lib/vdsm/host/__init__.py
> >> >> >> >>> +++ b/lib/vdsm/host/__init__.py
> >> >> >> >>> @@ -47,6 +47,7 @@ def uuid():
> >> >> >> >>>
"system-uuid"],
> >> >> >> >>>
raw=True,
> >> >> >> >>>
sudo=True)
> >> >> >> >>> + out =
out.decode('utf-8')
> >> >> >> >>> out =
'\n'.join(line for line in
> >> out.splitlines()
> >> >> >> >>> if not
> line.startswith('#'))
> >> >> >> >>
> >> >> >> >> It indeed seems to help, thank you. Would you
like to post a
> >> patch?
> >> >> >> >
> >> >> >> > Great! Sure, will do so.
> >> >> >>
> >> >> >> 3c08f9cc2fc1eb9390f1a68e1828bfb297307603 on darkthrone.
Will
> post to
> >> >> >> gerrit later.
> >> >> >>
> >> >> >> >
> >> >> >> >
> >> >> >> >>
> >> >> >> >>>> Still, I never managed to get my RHEL8
host to 'Up' status -
> >> it's
> >> >> >> >>>> still 'NonOperational'.
> >> >> >> >>>>
> >> >> >> >>>>
> >> >> >> >>>>>>> On Tue, Jul 16, 2019 at 4:54
PM Milan Zamazal
> >> >> >> >>>>>>> <mzamazal(a)redhat.com>
wrote:
> >> >> >> >>>>>>>
> >> >> >> >>>>>>>> Amit Bawer
<abawer(a)redhat.com> writes:
> >> >> >> >>>>>>>>
> >> >> >> >>>>>>>>> Getting following
error for attempting to Activate the
> >> rhel8
> >> >> >> >>>>>>>>> host from
> >> >> >> >>>>>>>> web
> >> >> >> >>>>>>>>> mgmt:
> >> >> >> >>>>>>>>>
> >> >> >> >>>>>>>>> [image: image.png]
> >> >> >> >>>>>>>> Ah, right, you must do
> >> >> >> >>>>>>>>
> >> >> >> >>>>>>>> uuidgen >
/etc/vdsm/vdsm.id
> >> >> >> >>>>>>>>
> >> >> >> >>>>>>>> to create the host id. I
don't know how to tell Engine
> >> about
> >> >> the
> >> >> >> >>>>>>>> change, perhaps it
detects it itself, perhaps the host
> must
> >> be
> >> >> >> >>>>>>>> switched
> >> >> >> >>>>>>>> to maintenance and then
activated, perhaps it must be
> >> removed
> >> >> >> >>>>>>>> and added
> >> >> >> >>>>>>>> again (the previously
performed actions on the host
> needn't
> >> be
> >> >> >> >>>>>>>> repeated
> >> >> >> >>>>>>>> again in such a case, I
believe).
> >> >> >> >>>>>>>>
> >> >> >> >>>>>>>>> On Tue, Jul 16, 2019
at 4:27 PM Milan Zamazal
> >> >> >> >>>>>>>>>
<mzamazal(a)redhat.com>
> >> >> >> >>>>>>>> wrote:
> >> >> >> >>>>>>>>>> Amit Bawer
<abawer(a)redhat.com> writes:
> >> >> >> >>>>>>>>>>
> >> >> >> >>>>>>>>>>> On Tue, Jul
16, 2019 at 3:16 PM Milan Zamazal
> >> >> >> >>>>>>>>>>>
<mzamazal(a)redhat.com>
> >> >> >> >>>>>>>>>> wrote:
> >> >> >> >>>>>>>>>>>> Amit
Bawer <abawer(a)redhat.com> writes:
> >> >> >> >>>>>>>>>>>>
> >> >> >> >>>>>>>>>>>>> On
Tue, Jul 16, 2019 at 12:37 PM Milan Zamazal <
> >> >> >> >>>>>>>> mzamazal(a)redhat.com>
> >> >> >> >>>>>>>>>>>> wrote:
> >> >> >> >>>>>>>>>>>>>>
Amit Bawer <abawer(a)redhat.com> writes:
> >> >> >> >>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>> On Tue, Jul 16, 2019 at 10:46
AM Milan Zamazal <
> >> >> >> >>>>>>>>>>
mzamazal(a)redhat.com>
> >> >> >> >>>>>>>>>>>>>>
wrote:
> >> >> >>
>>>>>>>>>>>>>>>> Amit Bawer
<abawer(a)redhat.com> writes:
> >> >> >>
>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>> after 60s with not
interruption it is timedout
> >> >> >>
>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>> No subscription with
> >> >> >>
>>>>>>>>>>>>>>>>>
227243a0-c4fc-4fb6-b2ff-6e79ad362452 id
> >> >> >>
>>>>>>>>>>>>>>>>> vdsm-client: Request
Host.setupNetworks with
> args
> >> >> >> >>>>>>>> {'networks':
> >> >> >>
>>>>>>>>>>>>>>>>> {'ovirtmgmt':
{'nic': 'ens3', 'bootproto':
> 'dhcp',
> >> >> >> >>>>>>>>>>
'defaultRoute':
> >> >> >>
>>>>>>>>>>>>>>>> True}},
> >> >> >>
>>>>>>>>>>>>>>>>> 'bondings':
{}, 'options':
> {'connectivityCheck':
> >> >> False}}
> >> >> >> >>>>>>>> timed
> >> >> >> >>>>>>>>>> out
> >> >> >> >>>>>>>>>>>>>>
after
> >> >> >>
>>>>>>>>>>>>>>>> 60
> >> >> >>
>>>>>>>>>>>>>>>>> seconds
> >> >> >>
>>>>>>>>>>>>>>>> You can use the script
below to bring up
> ovirtmgmt
> >> >> >>
>>>>>>>>>>>>>>>> manually
> >> >> >> >>>>>>>>>> (again,
> >> >> >>
>>>>>>>>>>>>>>>> assuming your standard
network interface is
> ens3).
> >> It
> >> >> >>
>>>>>>>>>>>>>>>> should
> >> >> >> >>>>>>>>>>>> suffice
to
> >> >> >>
>>>>>>>>>>>>>>>> be able to connect the
host to Engine, but you
> have
> >> to
> >> >> >>
>>>>>>>>>>>>>>>> run it
> >> >> >> >>>>>>>>>>>> manually
> >> >> >>
>>>>>>>>>>>>>>>> after each host boot.
> >> >> >>
>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>> Thanks, I think for RHEL8
brctl was replaced by
> >> nmcli:
> >> >> >>
>>>>>>>>>>>>>>>
> >> >> >> >>>>>>>>
> >> >> >>
> >> >>
> >>
>
https://computingforgeeks.com/how-to-create-a-linux-network-bridge-on-rhe...
> >> >> >> >>>>>>>>
> >> >> >> >>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>> Does vdsm-client work for
you fine otherwise
> (e.g.
> >> for
> >> >> >>
>>>>>>>>>>>>>>>> calls
> >> >> >> >>>>>>>> such
> >> >> >> >>>>>>>>>> as
> >> >> >>
>>>>>>>>>>>>>>>> `vdsm-client Host
getCapabilities'?). If so,
> I'd
> >> >> suggest
> >> >> >> >>>>>>>> catching
> >> >> >>
>>>>>>>>>>>>>>>> someone from the
networking team (Edward?,
> Bell?) to
> >> >> >>
>>>>>>>>>>>>>>>> help you
> >> >> >> >>>>>>>> with
> >> >> >>
>>>>>>>>>>>>>>>> setupNetworks.
> >> >> >>
>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>> No, it hangs for any
(insecure) command.
> >> >> >> >>>>>>>>>>>>>>
Does vdsm-client work with secure commands?
> >> >> >> >>>>>>>>>>>>>>
> >> >> >> >>>>>>>>>>>>>
[abawer@dhcp-0-191 RPMS]$ vdsm-client Host
> >> >> >> >>>>>>>>>>>>>
getNetworkStatistics
> >> >> >> >>>>>>>>>>>>>
vdsm-client: Connection to localhost:54321 with
> >> >> use_tls=True,
> >> >> >> >>>>>>>>>> timeout=60
> >> >> >> >>>>>>>>>>>>>
failed: [Errno 13] Permission denied
> >> >> >> >>>>>>>>>>>> Two
things to check:
> >> >> >> >>>>>>>>>>>>
> >> >> >> >>>>>>>>>>>> - Is
`vdsm-tool configure' happy and were all the
> >> services
> >> >> >> >>>>>>>> (libvirtd,
> >> >> >> >>>>>>>>>>>>
supervdsmd, vdsmd) restarted after last run of
> >> >> vdsm-tool
> >> >> >> >>>>>>>> configure?
> >> >> >> >>>>>>>>>>> Thanks!
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>>>> Running
insecure vdsm-client worked for following
> config
> >> >> and
> >> >> >> >>>>>>>>>>> course
> >> >> >> >>>>>>>> of
> >> >> >> >>>>>>>>>>> actions:
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>>>>
[root@dhcp-0-191 abawer]# egrep "ssl|tls|tcp|auth"
> >> >> >> >>>>>>>> /etc/vdsm/vdsm.conf
> >> >> >> >>>>>>>>>>>
/etc/libvirt/libvirtd.conf /etc/libvirt/qemu.conf |
> >> grep -v
> >> >> >> >>>>>>>>>>>
"#"
> >> >> >> >>>>>>>>>>>
/etc/vdsm/vdsm.conf:ssl = false
> >> >> >> >>>>>>>>>>>
/etc/libvirt/libvirtd.conf:auth_unix_ro = "none"
> >> >> >> >>>>>>>>>>>
/etc/libvirt/libvirtd.conf:auth_tcp = "none"
> >> >> >> >>>>>>>>>>>
/etc/libvirt/libvirtd.conf:auth_unix_rw="none"
> >> >> >> >>>>>>>>>>>
/etc/libvirt/libvirtd.conf:listen_tcp = 1
> >> >> >> >>>>>>>>>>>
/etc/libvirt/libvirtd.conf:listen_tls = 0
> >> >> >> >>>>>>>>>>>
/etc/libvirt/qemu.conf:spice_tls=0
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>
> >> >> >>
> >> >>
> >>
>
/etc/libvirt/qemu.conf:spice_tls_x509_cert_dir="/etc/pki/vdsm/libvirt-spice"
> >> >> >>
> >> >> >> >>>>>>>>
> >> >> >> >>>>>>>>
> >> >> >> >>>>>>>>>>>
[root@dhcp-0-191 abawer]# vdsm-tool configure
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>>>> Checking
configuration status...
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>>>> lvm is
configured for vdsm
> >> >> >> >>>>>>>>>>> Current
revision of multipath.conf detected,
> preserving
> >> >> >> >>>>>>>>>>> Managed
volume database is already configured
> >> >> >> >>>>>>>>>>> abrt is
already configured for vdsm
> >> >> >> >>>>>>>>>>> libvirt is
already configured for vdsm
> >> >> >> >>>>>>>>>>> SUCCESS: ssl
configured to false. No conflicts.
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>>>> Running
configure...
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>>>> Done
configuring modules to VDSM.
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>>>> Restarted
vdsmd, supervdsmd, libvirtd...
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>>>>
[abawer@dhcp-0-191 ~]$ service vdsmd status
> >> >> >> >>>>>>>>>>> Redirecting
to /bin/systemctl status vdsmd.service
> >> >> >> >>>>>>>>>>> ●
vdsmd.service - Virtual Desktop Server Manager
> >> >> >> >>>>>>>>>>> Loaded:
loaded
> >> (/usr/lib/systemd/system/vdsmd.service;
> >> >> >> >>>>>>>>>>> enabled;
> >> >> >> >>>>>>>> vendor
> >> >> >> >>>>>>>>>>> preset:
enabled)
> >> >> >> >>>>>>>>>>> Active:
active (running) since Tue 2019-07-16
> >> 09:11:22
> >> >> >> >>>>>>>>>>> EDT; 8s ago
> >> >> >> >>>>>>>>>>> Process:
6963
> >> >> >> >>>>>>>>>>>
ExecStopPost=/usr/libexec/vdsm/vdsmd_init_common.sh
> >> >> >> >>>>>>>>>>> --post-stop
(code=exited, status=0/SUCCESS)
> >> >> >> >>>>>>>>>>> Process:
6969
> >> >> >> >>>>>>>>>>>
ExecStartPre=/usr/libexec/vdsm/vdsmd_init_common.sh
> >> >> >> >>>>>>>>>>> --pre-start
(code=exited, status=0/SUCCESS)
> >> >> >> >>>>>>>>>>> Main PID:
7014 (vdsmd)
> >> >> >> >>>>>>>>>>> Tasks:
38 (limit: 24006)
> >> >> >> >>>>>>>>>>> Memory:
43.4M
> >> >> >> >>>>>>>>>>> CGroup:
/system.slice/vdsmd.service
> >> >> >> >>>>>>>>>>>
└─7014 /usr/libexec/platform-python
> >> >> >> >>>>>>>>>>>
/usr/share/vdsm/vdsmd
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>>>> Jul 16
09:11:21
dhcp-0-191.tlv.redhat.com
> >> >> >> >>>>>>>>
vdsmd_init_common.sh[6969]:
> >> >> >> >>>>>>>>>> vdsm:
> >> >> >> >>>>>>>>>>> Running
prepare_transient_repository
> >> >> >> >>>>>>>>>>> Jul 16
09:11:22
dhcp-0-191.tlv.redhat.com
> >> >> >> >>>>>>>>
vdsmd_init_common.sh[6969]:
> >> >> >> >>>>>>>>>> vdsm:
> >> >> >> >>>>>>>>>>> Running
syslog_available
> >> >> >> >>>>>>>>>>> Jul 16
09:11:22
dhcp-0-191.tlv.redhat.com
> >> >> >> >>>>>>>>
vdsmd_init_common.sh[6969]:
> >> >> >> >>>>>>>>>> vdsm:
> >> >> >> >>>>>>>>>>> Running
nwfilter
> >> >> >> >>>>>>>>>>> Jul 16
09:11:22
dhcp-0-191.tlv.redhat.com
> >> >> >> >>>>>>>>
vdsmd_init_common.sh[6969]:
> >> >> >> >>>>>>>>>> vdsm:
> >> >> >> >>>>>>>>>>> Running
dummybr
> >> >> >> >>>>>>>>>>> Jul 16
09:11:22
dhcp-0-191.tlv.redhat.com
> >> >> >> >>>>>>>>
vdsmd_init_common.sh[6969]:
> >> >> >> >>>>>>>>>> vdsm:
> >> >> >> >>>>>>>>>>> Running
tune_system
> >> >> >> >>>>>>>>>>> Jul 16
09:11:22
dhcp-0-191.tlv.redhat.com
> >> >> >> >>>>>>>>
vdsmd_init_common.sh[6969]:
> >> >> >> >>>>>>>>>> vdsm:
> >> >> >> >>>>>>>>>>> Running
test_space
> >> >> >> >>>>>>>>>>> Jul 16
09:11:22
dhcp-0-191.tlv.redhat.com
> >> >> >> >>>>>>>>
vdsmd_init_common.sh[6969]:
> >> >> >> >>>>>>>>>> vdsm:
> >> >> >> >>>>>>>>>>> Running
test_lo
> >> >> >> >>>>>>>>>>> Jul 16
09:11:22
dhcp-0-191.tlv.redhat.com
> systemd[1]:
> >> >> Started
> >> >> >> >>>>>>>> Virtual
> >> >> >> >>>>>>>>>>> Desktop
Server Manager.
> >> >> >> >>>>>>>>>>> Jul 16
09:11:23
dhcp-0-191.tlv.redhat.com
> vdsm[7014]:
> >> WARN
> >> >> >> >>>>>>>>>>> MOM not
> >> >> >> >>>>>>>>>>> available.
> >> >> >> >>>>>>>>>>> Jul 16
09:11:23
dhcp-0-191.tlv.redhat.com
> vdsm[7014]:
> >> WARN
> >> >> >> >>>>>>>>>>> MOM not
> >> >> >> >>>>>>>>>>> available,
KSM stats will be missing.
> >> >> >> >>>>>>>>>>>
[abawer@dhcp-0-191 ~]$ service supervdsmd status
> >> >> >> >>>>>>>>>>> Redirecting
to /bin/systemctl status
> supervdsmd.service
> >> >> >> >>>>>>>>>>> ●
supervdsmd.service - Auxiliary vdsm service for
> running
> >> >> >> >>>>>>>>>>> helper
> >> >> >> >>>>>>>>>> functions
> >> >> >> >>>>>>>>>>> as root
> >> >> >> >>>>>>>>>>> Loaded:
loaded
> >> >> >> >>>>>>>>>>>
(/usr/lib/systemd/system/supervdsmd.service;
> >> >> >> >>>>>>>> static;
> >> >> >> >>>>>>>>>>> vendor
preset: enabled)
> >> >> >> >>>>>>>>>>> Active:
active (running) since Tue 2019-07-16
> >> 09:11:19
> >> >> >> >>>>>>>>>>> EDT; 17s
> >> >> >> >>>>>>>> ago
> >> >> >> >>>>>>>>>>> Main PID:
6968 (supervdsmd)
> >> >> >> >>>>>>>>>>> Tasks:
5 (limit: 24006)
> >> >> >> >>>>>>>>>>> Memory:
20.9M
> >> >> >> >>>>>>>>>>> CGroup:
/system.slice/supervdsmd.service
> >> >> >> >>>>>>>>>>>
└─6968 /usr/libexec/platform-python
> >> >> >> >>>>>>>>
/usr/share/vdsm/supervdsmd
> >> >> >> >>>>>>>>>>> --sockfile
/var/run/vdsm/svdsm.sock
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>>>> Jul 16
09:11:19
dhcp-0-191.tlv.redhat.com
> systemd[1]:
> >> >> Stopped
> >> >> >> >>>>>>>> Auxiliary
> >> >> >> >>>>>>>>>>> vdsm service
for running helper functions as root.
> >> >> >> >>>>>>>>>>> Jul 16
09:11:19
dhcp-0-191.tlv.redhat.com
> systemd[1]:
> >> >> Started
> >> >> >> >>>>>>>> Auxiliary
> >> >> >> >>>>>>>>>>> vdsm service
for running helper functions as root.
> >> >> >> >>>>>>>>>>>
[abawer@dhcp-0-191 ~]$ service libvirtd status
> >> >> >> >>>>>>>>>>> Redirecting
to /bin/systemctl status libvirtd.service
> >> >> >> >>>>>>>>>>> ●
libvirtd.service - Virtualization daemon
> >> >> >> >>>>>>>>>>> Loaded:
loaded
> >> >> (/usr/lib/systemd/system/libvirtd.service;
> >> >> >> >>>>>>>>>>> enabled;
> >> >> >> >>>>>>>>>>> vendor
preset: enabled)
> >> >> >> >>>>>>>>>>> Drop-In:
/etc/systemd/system/libvirtd.service.d
> >> >> >> >>>>>>>>>>>
└─unlimited-core.conf
> >> >> >> >>>>>>>>>>> Active:
active (running) since Tue 2019-07-16
> >> 09:11:04
> >> >> >> >>>>>>>>>>> EDT; 38s
> >> >> >> >>>>>>>> ago
> >> >> >> >>>>>>>>>>> Docs:
man:libvirtd(8)
> >> >> >> >>>>>>>>>>>
https://libvirt.org
> >> >> >> >>>>>>>>>>> Main PID:
6448 (libvirtd)
> >> >> >> >>>>>>>>>>> Tasks:
17 (limit: 32768)
> >> >> >> >>>>>>>>>>> Memory:
12.5M
> >> >> >> >>>>>>>>>>> CGroup:
/system.slice/libvirtd.service
> >> >> >> >>>>>>>>>>>
└─6448 /usr/sbin/libvirtd --listen
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>>>> Jul 16
09:11:04
dhcp-0-191.tlv.redhat.com
> systemd[1]:
> >> >> Stopped
> >> >> >> >>>>>>>>>>>
Virtualization daemon.
> >> >> >> >>>>>>>>>>> Jul 16
09:11:04
dhcp-0-191.tlv.redhat.com
> systemd[1]:
> >> >> Starting
> >> >> >> >>>>>>>>>>>
Virtualization daemon...
> >> >> >> >>>>>>>>>>> Jul 16
09:11:04
dhcp-0-191.tlv.redhat.com
> systemd[1]:
> >> >> Started
> >> >> >> >>>>>>>>>>>
Virtualization daemon.
> >> >> >> >>>>>>>>>>> Jul 16
09:11:09
dhcp-0-191.tlv.redhat.com
> >> libvirtd[6448]:
> >> >> >> >>>>>>>>>>> libvirt
> >> >> >> >>>>>>>>>> version:
> >> >> >> >>>>>>>>>>> 5.0.0,
package: 7.module+el8+2887+effa3c42 (Red Hat,
> >> Inc. <
> >> >> >> >>>>>>>>>>>
http://bugzilla.redhat.>
> >> >> >> >>>>>>>>>>> Jul 16
09:11:09
dhcp-0-191.tlv.redhat.com
> >> libvirtd[6448]:
> >> >> >> >>>>>>>>>>> hostname:
> >> >> >> >>>>>>>>>>>
dhcp-0-191.tlv.redhat.com
> >> >> >> >>>>>>>>>>> Jul 16
09:11:09
dhcp-0-191.tlv.redhat.com
> >> libvirtd[6448]:
> >> >> >> >>>>>>>>>>> End of
> >> >> >> >>>>>>>> file
> >> >> >> >>>>>>>>>> while
> >> >> >> >>>>>>>>>>> reading data:
Input/output error
> >> >> >> >>>>>>>>>>> Jul 16
09:11:19
dhcp-0-191.tlv.redhat.com
> >> libvirtd[6448]:
> >> >> >> >>>>>>>>>>> End of
> >> >> >> >>>>>>>> file
> >> >> >> >>>>>>>>>> while
> >> >> >> >>>>>>>>>>> reading data:
Input/output error
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>>>>
[abawer@dhcp-0-191 ~]$ echo '{"networks":
> {"ovirtmgmt":
> >> >> >> {"nic":
> >> >> >> >>>>>>>>>>
"enp0s3",
> >> >> >> >>>>>>>>>>>
"bootproto": "dhcp", "defaultRoute": true}},
> "bondings":
> >> {},
> >> >> >> >>>>>>>> "options":
> >> >> >> >>>>>>>>>>>
{"connectivityCheck": false}}' | vdsm-client
> --insecure
> >> -f -
> >> >> >> >>>>>>>>>>> Host
> >> >> >> >>>>>>>>>>>
setupNetworks
> >> >> >> >>>>>>>>>>> {
> >> >> >> >>>>>>>>>>>
"code": 0,
> >> >> >> >>>>>>>>>>>
"message": "Done"
> >> >> >> >>>>>>>>>>> }
> >> >> >> >>>>>>>>>>>
> >> >> >> >>>>>>>>>>>
[abawer@dhcp-0-191 ~]$
> >> >> >> >>>>>>>>>> Looks fine,
great. If networking works fine after
> this
> >> >> change
> >> >> >> >>>>>>>>>> don't
> >> >> >> >>>>>>>>>> forget to run
> >> >> >> >>>>>>>>>>
> >> >> >> >>>>>>>>>> vdsm-client
Host setSafeNetworkConfig
> >> >> >> >>>>>>>>>>
> >> >> >> >>>>>>>>>> before reboot to
save the configuration.
> >> >> >> >>>>>>>>>>
> >> >> >> >>>>>>>>>> Can you activate
the host in Engine now?
> >> >> >> >>>>>>>>>>
> >> >> >> >>>>>>>>>>>> - Are
there any details reported about the error in
> >> >> vdsm.log?
> >> >> >> >>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>> seems like the vdsm in non
responsive for rpc
> >> although
> >> >> >> >>>>>>>> according to
> >> >> >>
>>>>>>>>>>>>>>> vdsm.log it seems the
connections are accepted
> during
> >> >> >> >>>>>>>> vdsm-client
> >> >> >>
>>>>>>>>>>>>>>> execution.
> >> >> >> >>>>>>>>>>>>>>
Just to be sure: Do you have current master?
> >> >> >> >>>>>>>>>>>>>>
> >> >> >> >>>>>>>>>>>>>
commit 8d8b74dfbab8514437c1ce3edd05b9bdda834129
> >> >> >> >>>>>>>>>>>>>
(github/master)
> >> >> >> >>>>>>>>>>>>>
Author: Nir Soffer <nsoffer(a)redhat.com>
> >> >> >> >>>>>>>>>>>>> Date:
Mon Jul 15 03:15:52 2019 +0300
> >> >> >> >>>>>>>>>>>>>
> >> >> >> >>>>>>>>>>>>> On
top of it i have some local py3 fixes which are
> >> only on
> >> >> >> >>>>>>>>>>>>>
storage
> >> >> >> >>>>>>>>>>>>
component
> >> >> >> >>>>>>>>>>>>
> >> >> >> >>>>>>>>>>>> OK,
should be fine.
> >> >> >> >>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>> Will ask, Thanks.
> >> >> >>
>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>> On Mon, Jul 15, 2019
at 6:28 PM Amit Bawer <
> >> >> >> >>>>>>>> abawer(a)redhat.com>
> >> >> >> >>>>>>>>>>>> wrote:
> >> >> >>
>>>>>>>>>>>>>>>>>> Thanks, but now
it hangs for --insecure
> execution
> >> >> >>
>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>> echo
'{"networks": {"ovirtmgmt": {"nic":
> "ens3",
> >> >> >> >>>>>>>> "bootproto":
> >> >> >> >>>>>>>>>>>>
"dhcp",
> >> >> >>
>>>>>>>>>>>>>>>>>>
"defaultRoute": true}}, "bondings": {},
> "options":
> >> >> >> >>>>>>>>>>>>>>
{"connectivityCheck":
> >> >> >>
>>>>>>>>>>>>>>>>>> false}}' |
vdsm-client --insecure -f - Host
> >> >> >>
>>>>>>>>>>>>>>>>>> setupNetworks
> >> >> >>
>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>> Ctrl+C dumps
> >> >> >>
>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>> No subscription
with
> >> >> >>
>>>>>>>>>>>>>>>>>>
caf3d1fa-3331-44af-8d54-a65bd5d7d042
> >> >> >> >>>>>>>> id
> >> >> >>
>>>>>>>>>>>>>>>>>> Traceback (most
recent call last):
> >> >> >>
>>>>>>>>>>>>>>>>>> File
"/usr/bin/vdsm-client", line 25, in
> >> <module>
> >> >> >>
>>>>>>>>>>>>>>>>>>
client.main()
> >> >> >>
>>>>>>>>>>>>>>>>>> File
> >> >> >> >>>>>>>>
"/usr/lib/python3.6/site-packages/vdsmclient/client.py",
> >> >> >> >>>>>>>>>>>> line
> >> >> >>
>>>>>>>>>>>>>>>> 185,
> >> >> >>
>>>>>>>>>>>>>>>>>> in main
> >> >> >>
>>>>>>>>>>>>>>>>>> result =
command(**request_params)
> >> >> >>
>>>>>>>>>>>>>>>>>> File
> >> >> >>
>>>>>>>>>>>>>>>>>>
> "/usr/lib/python3.6/site-packages/vdsm/client.py",
> >> >> >> >>>>>>>> line
> >> >> >> >>>>>>>>>>>> 289,
> >> >> >> >>>>>>>>>>>>>>
in
> >> >> >>
>>>>>>>>>>>>>>>>>> _call
> >> >> >>
>>>>>>>>>>>>>>>>>> req,
timeout=timeout,
> flow_id=self._flow_id)
> >> >> >>
>>>>>>>>>>>>>>>>>> File
> >> >> >> >>>>>>>>>>>>
> >> >>
"/usr/lib/python3.6/site-packages/yajsonrpc/jsonrpcclient.py",
> >> >> >>
>>>>>>>>>>>>>>>> line
> >> >> >>
>>>>>>>>>>>>>>>>>> 91, in call
> >> >> >>
>>>>>>>>>>>>>>>>>>
call.wait(kwargs.get('timeout', CALL_TIMEOUT))
> >> >> >>
>>>>>>>>>>>>>>>>>> File
> >> >> >> >>>>>>>>>>>>
> >> >>
"/usr/lib/python3.6/site-packages/yajsonrpc/jsonrpcclient.py",
> >> >> >>
>>>>>>>>>>>>>>>> line
> >> >> >>
>>>>>>>>>>>>>>>>>> 290, in wait
> >> >> >>
>>>>>>>>>>>>>>>>>>
self._ev.wait(timeout)
> >> >> >>
>>>>>>>>>>>>>>>>>> File
"/usr/lib64/python3.6/threading.py",
> line
> >> >> >>
>>>>>>>>>>>>>>>>>> 551, in
> >> >> >> >>>>>>>> wait
> >> >> >>
>>>>>>>>>>>>>>>>>> signaled =
self._cond.wait(timeout)
> >> >> >>
>>>>>>>>>>>>>>>>>> File
"/usr/lib64/python3.6/threading.py",
> line
> >> >> >>
>>>>>>>>>>>>>>>>>> 299, in
> >> >> >> >>>>>>>> wait
> >> >> >>
>>>>>>>>>>>>>>>>>> gotit =
waiter.acquire(True, timeout)
> >> >> >>
>>>>>>>>>>>>>>>>>>
KeyboardInterrupt
> >> >> >>
>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>> On Mon, Jul 15,
2019 at 6:15 PM Milan Zamazal
> <
> >> >> >> >>>>>>>>>>>>
mzamazal(a)redhat.com>
> >> >> >>
>>>>>>>>>>>>>>>> wrote:
> >> >> >>
>>>>>>>>>>>>>>>>>>> Amit Bawer
<abawer(a)redhat.com> writes:
> >> >> >>
>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>> On Mon,
Jul 15, 2019 at 5:50 PM Milan
> Zamazal <
> >> >> >> >>>>>>>>>>>>>>
mzamazal(a)redhat.com>
> >> >> >>
>>>>>>>>>>>>>>>>>>> wrote:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> Amit
Bawer <abawer(a)redhat.com> writes:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>> I
followed your RHEL8 vdsm guide, was very
> >> useful
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
for
> >> >> >> >>>>>>>>>>>>>>
installing
> >> >> >>
>>>>>>>>>>>>>>>>>>> vdsm on
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
RHEL8.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Though I had to build MOM locally and it
> >> doesn't
> >> >> >> >>>>>>>> seem to
> >> >> >> >>>>>>>>>>>> fully
> >> >> >>
>>>>>>>>>>>>>>>> work
> >> >> >>
>>>>>>>>>>>>>>>>>>> on
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
RHEL8 yet.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> I
don't think MoM is required for
> anything, you
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
should
> >> >> >> >>>>>>>> be
> >> >> >> >>>>>>>>>>>> able to
> >> >> >>
>>>>>>>>>>>>>>>> run
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> Vdsm
happily without it and `vdsm' package
> >> >> currently
> >> >> >> >>>>>>>>>> doesn't
> >> >> >> >>>>>>>>>>>>>>
depend
> >> >> >>
>>>>>>>>>>>>>>>> on
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> it on
RHEL 8. A fix was posted for one
> issue
> >> >> today:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
https://gerrit.ovirt.org/101804. If you
> have
> >> any
> >> >> >> >>>>>>>> other
> >> >> >> >>>>>>>>>>>> troubles
> >> >> >>
>>>>>>>>>>>>>>>> with
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> MoM,
please report them to Andrej Krejčíř <
> >> >> >> >>>>>>>>>>>>
akrejcir(a)redhat.com>.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Next step is to test attach NFS domain,
> have
> >> to
> >> >> set
> >> >> >> >>>>>>>> up an
> >> >> >> >>>>>>>>>>>>>>
engine
> >> >> >>
>>>>>>>>>>>>>>>>>>> first
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
since it seems that vdsm-client is not the
> >> >> >> >>>>>>>>>> straightforward
> >> >> >> >>>>>>>>>>>>>>
choice
> >> >> >>
>>>>>>>>>>>>>>>> for
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> this
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
operation.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
Please note that since host deploy etc. is
> not
> >> >> fully
> >> >> >> >>>>>>>>>> working
> >> >> >> >>>>>>>>>>>> yet
> >> >> >>
>>>>>>>>>>>>>>>> it's a
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> bit
tricky to attach a RHEL 8 host to
> Engine.
> >> But
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
it's
> >> >> >> >>>>>>>>>>>> doable.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> - A
host is added to Engine. Its
> installation
> >> will
> >> >> >> >>>>>>>> fail,
> >> >> >> >>>>>>>>>> but
> >> >> >> >>>>>>>>>>>>>>
Engine
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> will
know about the host.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> -
Vdsm can be installed and run manually
> on the
> >> >> host,
> >> >> >> >>>>>>>>>> you've
> >> >> >> >>>>>>>>>>>>>>
already
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> done
that.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> - You
need to set up `ovirtmgmt' network in
> >> order
> >> >> to
> >> >> >> >>>>>>>> talk
> >> >> >> >>>>>>>>>> with
> >> >> >>
>>>>>>>>>>>>>>>> Engine.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> Here
is what I've used on the host, based
> on
> >> >> advice
> >> >> >> >>>>>>>> from
> >> >> >> >>>>>>>>>>>>>>
network
> >> >> >>
>>>>>>>>>>>>>>>>>>> guys:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> echo
'{"networks": {"ovirtmgmt": {"nic":
> >> "ens3",
> >> >> >> >>>>>>>>>>>>
"bootproto":
> >> >> >>
>>>>>>>>>>>>>>>>>>>
"dhcp",
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
"defaultRoute": true}}, "bondings": {},
> >> "options":
> >> >> >>
>>>>>>>>>>>>>>>>>>>
{"connectivityCheck":
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
false}}' | vdsm-client -f - Host
> setupNetworks
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>> gives me
> >> >> >>
>>>>>>>>>>>>>>>>>>>> ��
vdsm-client: Connection to
> localhost:54321
> >> with
> >> >> >> >>>>>>>>>> use_tls=True,
> >> >> >>
>>>>>>>>>>>>>>>>>>> timeout=60
> >> >> >>
>>>>>>>>>>>>>>>>>>>> failed:
[Errno 13] Permission denied
> >> >> >>
>>>>>>>>>>>>>>>>>>>> for
either local user or root user
> >> >> >>
>>>>>>>>>>>>>>>>>>> I think I
experienced something similar when
> I
> >> >> >>
>>>>>>>>>>>>>>>>>>> installed
> >> >> >> >>>>>>>> the
> >> >> >>
>>>>>>>>>>>>>>>>>>> certificates
to /etc/pki/vdsm from another
> host
> >> and
> >> >> >> >>>>>>>> forgot to
> >> >> >> >>>>>>>>>>>> change
> >> >> >>
>>>>>>>>>>>>>>>>>>> their owners
to vdsm:kvm. You may also try
> to
> >> >> disable
> >> >> >> >>>>>>>> ssl in
> >> >> >> >>>>>>>>>>>>>>
vdsm.conf
> >> >> >>
>>>>>>>>>>>>>>>>>>> (don't
forget to run `vdsm-tool configure'
> >> >> afterwards)
> >> >> >> >>>>>>>> and run
> >> >> >>
>>>>>>>>>>>>>>>>>>> vdsm-client
with --insecure option.
> >> >> >>
>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
vdsm-client Host setSafeNetworkConfig
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
reboot
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
("ens3" above is my default network
> >> interface
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> on
the
> >> >> >> >>>>>>>>>> host.)
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
Beware the first command (or other
> commands)
> >> may
> >> >> >> >>>>>>>>>> disconnect
> >> >> >> >>>>>>>>>>>> you
> >> >> >>
>>>>>>>>>>>>>>>> from
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> the
host and leave the host disconnected,
> so be
> >> >> sure
> >> >> >> >>>>>>>> to
> >> >> >> >>>>>>>>>>>> perform
> >> >> >>
>>>>>>>>>>>>>>>> the
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
commands from a console in case you decide
> to
> >> >> >> >>>>>>>> configure
> >> >> >> >>>>>>>>>>>>>>
networking
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> this
way.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> - In
case you use TLS communication with
> >> Engine,
> >> >> it
> >> >> >> >>>>>>>> may be
> >> >> >>
>>>>>>>>>>>>>>>> necessary to
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> set
up the certificates manually (while
> Engine
> >> >> enroll
> >> >> >> >>>>>>>>>>>>>>
certificate
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
operation seemed to report success in the
> UI,
> >> the
> >> >> >> >>>>>>>>>>>>
certificates
> >> >> >>
>>>>>>>>>>>>>>>>>>> weren't
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
installed for me). That means (courtesy to
> >> Marcin
> >> >> >> >>>>>>>>>> Sobczyk
> >> >> >> >>>>>>>>>>>> for
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
providing these tips):
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> -
Enable legacy crypto policies:
> >> >> >> >>>>>>>> update-crypto-policies
> >> >> >> >>>>>>>>>>>> --set
> >> >> >>
>>>>>>>>>>>>>>>> LEGACY
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> - On
the host side, copy /etc/pki/vdsm from
> >> some
> >> >> host
> >> >> >> >>>>>>>>>>>> already
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
successfully connected to the same Engine.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> -
On the Engine side, copy
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
> /etc/pki/ovirt-engine/requests/HOSTNAME.com.req
> >> >> for
> >> >> >> >>>>>>>>>> your
> >> >> >> >>>>>>>>>>>> host
> >> >> >>
>>>>>>>>>>>>>>>> named
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
HOSTNAME from an already connected host.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
When not using TLS, no further special
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
arrangements
> >> >> >> >>>>>>>>>> should
> >> >> >> >>>>>>>>>>>> be
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
necessary.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> Thank
you for info, please tell me when you
> >> have
> >> >> >> >>>>>>>> anything
> >> >> >> >>>>>>>>>> new
> >> >> >> >>>>>>>>>>>> or
> >> >> >> >>>>>>>>>>>>>>
you
> >> >> >>
>>>>>>>>>>>>>>>>>>>>> need
some help.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
[root@dhcp-0-191 ~]# uname -rn
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
dhcp-0-191.tlv.redhat.com
> >> 4.18.0-80.el8.x86_64
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
[root@dhcp-0-191 ~]# systemctl status
> vdsmd
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>> ●
vdsmd.service - Virtual Desktop Server
> >> Manager
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Loaded: loaded
> >> >> >> >>>>>>>>
(/usr/lib/systemd/system/vdsmd.service;
> >> >> >> >>>>>>>>>>>>>>
enabled;
> >> >> >>
>>>>>>>>>>>>>>>>>>> vendor
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
preset: enabled)
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Active: active (running) since Mon
> >> >> 2019-07-15
> >> >> >> >>>>>>>> 09:02:06
> >> >> >> >>>>>>>>>>>> EDT;
> >> >> >>
>>>>>>>>>>>>>>>> 36min
> >> >> >>
>>>>>>>>>>>>>>>>>>> ago
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Process: 1863
> >> >> >>
>>>>>>>>>>>>>>>>
> ExecStartPre=/usr/libexec/vdsm/vdsmd_init_common.sh
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
--pre-start (code=exited,
> status=0/SUCCESS)
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Main PID: 1978 (vdsmd)
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Tasks: 38 (limit: 11516)
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Memory: 63.6M
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
CGroup: /system.slice/vdsmd.service
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
└─1978
> >> /usr/libexec/platform-python
> >> >> >>
>>>>>>>>>>>>>>>> /usr/share/vdsm/vdsmd
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Jul 15 09:02:05
dhcp-0-191.tlv.redhat.com
> >> >> >>
>>>>>>>>>>>>>>>>>>>
vdsmd_init_common.sh[1863]:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
vdsm:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Running prepare_transient_repository
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Jul 15 09:02:05
dhcp-0-191.tlv.redhat.com
> >> >> >>
>>>>>>>>>>>>>>>>>>>
vdsmd_init_common.sh[1863]:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
vdsm:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Running syslog_available
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Jul 15 09:02:05
dhcp-0-191.tlv.redhat.com
> >> >> >>
>>>>>>>>>>>>>>>>>>>
vdsmd_init_common.sh[1863]:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
vdsm:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Running nwfilter
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Jul 15 09:02:06
dhcp-0-191.tlv.redhat.com
> >> >> >>
>>>>>>>>>>>>>>>>>>>
vdsmd_init_common.sh[1863]:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
vdsm:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Running dummybr
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Jul 15 09:02:06
dhcp-0-191.tlv.redhat.com
> >> >> >>
>>>>>>>>>>>>>>>>>>>
vdsmd_init_common.sh[1863]:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
vdsm:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Running tune_system
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Jul 15 09:02:06
dhcp-0-191.tlv.redhat.com
> >> >> >>
>>>>>>>>>>>>>>>>>>>
vdsmd_init_common.sh[1863]:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
vdsm:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Running test_space
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Jul 15 09:02:06
dhcp-0-191.tlv.redhat.com
> >> >> >>
>>>>>>>>>>>>>>>>>>>
vdsmd_init_common.sh[1863]:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
vdsm:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Running test_lo
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Jul 15 09:02:06
dhcp-0-191.tlv.redhat.com
> >> >> >> >>>>>>>> systemd[1]:
> >> >> >> >>>>>>>>>>>> Started
> >> >> >>
>>>>>>>>>>>>>>>>>>> Virtual
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Desktop Server Manager.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Jul 15 09:02:08
dhcp-0-191.tlv.redhat.com
> >> >> >> >>>>>>>> vdsm[1978]:
> >> >> >> >>>>>>>>>> WARN
> >> >> >> >>>>>>>>>>>> MOM
> >> >> >>
>>>>>>>>>>>>>>>> not
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
available.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
Jul 15 09:02:08
dhcp-0-191.tlv.redhat.com
> >> >> >> >>>>>>>> vdsm[1978]:
> >> >> >> >>>>>>>>>> WARN
> >> >> >> >>>>>>>>>>>> MOM
> >> >> >>
>>>>>>>>>>>>>>>> not
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
available, KSM stats will be missing.
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>
On Mon, Jul 15, 2019 at 4:11 PM Milan
> Zamazal
> >> <
> >> >> >>
>>>>>>>>>>>>>>>> mzamazal(a)redhat.com>
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>
wrote:
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>>
Hi Amit, how is it going with your work
> on
> >> PoC
> >> >> >> >>>>>>>> NFS? Do
> >> >> >> >>>>>>>>>> you
> >> >> >> >>>>>>>>>>>>>>
need
> >> >> >>
>>>>>>>>>>>>>>>>>>> help
> >> >> >>
>>>>>>>>>>>>>>>>>>>>>>>
with anything? Is there anything to
> >> experiment
> >> >> >> >>>>>>>> with?
> >> >> >>
> >> >>
> >>
>