On Thu, May 28, 2020 at 11:56 PM Gianluca Cecchi <gianluca.cecchi@gmail.com> wrote:
On Thu, May 28, 2020 at 3:09 PM Gianluca Cecchi <gianluca.cecchi@gmail.com> wrote:

[snip]


for the cluster type in the mean time I was able to change it to "Intel Cascadelake Server Family" from web admin gui and now I have to try these steps and see if engine starts automatically without manual operations

1) set global maintenance
2) shutdown engine
3) exit maintenance
4) see if the engine vm starts without the cpu flag....


I confirm that point 4) was successful and engine vm was able to autostart, after changing cluster type.

As expected,
in my opinion now the point is just about understanding why the engine detected your host with the wrong CPU features set.

To be fully honest, as you can see in https://github.com/oVirt/ovirt-ansible-hosted-engine-setup/blob/master/README.md#L46 , we already have a variable (he_cluster_cpu_type) to force a cluster CPU type from the ansible role but I don't think is exposed in the interactive installer.
 
I'm also able to connect to its console from web admin gui

The command line generated now is:

qemu     29450     1 43 23:38 ?        00:03:09 /usr/libexec/qemu-kvm -name guest=HostedEngine,debug-threads=on -S -object secret,id=masterKey0,format=raw,file=/var/lib/libvirt/qemu/domain-10-HostedEngine/master-key.aes -machine pc-q35-rhel8.1.0,accel=kvm,usb=off,dump-guest-core=off -cpu Cascadelake-Server,hle=off,rtm=off,arch-capabilities=on -m size=16777216k,slots=16,maxmem=67108864k -overcommit mem-lock=off -smp 2,maxcpus=32,sockets=16,cores=2,threads=1 -object iothread,id=iothread1 -numa node,nodeid=0,cpus=0-31,mem=16384 -uuid b572d924-b278-41c7-a9da-52c4f590aac1 -smbios type=1,manufacturer=oVirt,product=RHEL,version=8-1.1911.0.9.el8,serial=d584e962-5461-4fa5-affa-db413e17590c,uuid=b572d924-b278-41c7-a9da-52c4f590aac1,family=oVirt -no-user-config -nodefaults -device sga -chardev socket,id=charmonitor,fd=40,server,nowait -mon chardev=charmonitor,id=monitor,mode=control -rtc base=2020-05-28T21:38:21,driftfix=slew -global kvm-pit.lost_tick_policy=delay -no-hpet -no-reboot -global ICH9-LPC.disable_s3=1 -global ICH9-LPC.disable_s4=1 -boot strict=on -device pcie-root-port,port=0x10,chassis=1,id=pci.1,bus=pcie.0,multifunction=on,addr=0x2 -device pcie-root-port,port=0x11,chassis=2,id=pci.2,bus=pcie.0,addr=0x2.0x1 -device pcie-root-port,port=0x12,chassis=3,id=pci.3,bus=pcie.0,addr=0x2.0x2 -device pcie-root-port,port=0x13,chassis=4,id=pci.4,bus=pcie.0,addr=0x2.0x3 -device pcie-root-port,port=0x14,chassis=5,id=pci.5,bus=pcie.0,addr=0x2.0x4 -device pcie-root-port,port=0x15,chassis=6,id=pci.6,bus=pcie.0,addr=0x2.0x5 -device pcie-root-port,port=0x16,chassis=7,id=pci.7,bus=pcie.0,addr=0x2.0x6 -device pcie-root-port,port=0x17,chassis=8,id=pci.8,bus=pcie.0,addr=0x2.0x7 -device pcie-root-port,port=0x18,chassis=9,id=pci.9,bus=pcie.0,multifunction=on,addr=0x3 -device pcie-root-port,port=0x19,chassis=10,id=pci.10,bus=pcie.0,addr=0x3.0x1 -device pcie-root-port,port=0x1a,chassis=11,id=pci.11,bus=pcie.0,addr=0x3.0x2 -device pcie-root-port,port=0x1b,chassis=12,id=pci.12,bus=pcie.0,addr=0x3.0x3 -device pcie-root-port,port=0x1c,chassis=13,id=pci.13,bus=pcie.0,addr=0x3.0x4 -device pcie-root-port,port=0x1d,chassis=14,id=pci.14,bus=pcie.0,addr=0x3.0x5 -device pcie-root-port,port=0x1e,chassis=15,id=pci.15,bus=pcie.0,addr=0x3.0x6 -device pcie-root-port,port=0x1f,chassis=16,id=pci.16,bus=pcie.0,addr=0x3.0x7 -device pcie-root-port,port=0x20,chassis=17,id=pci.17,bus=pcie.0,addr=0x4 -device pcie-pci-bridge,id=pci.18,bus=pci.1,addr=0x0 -device qemu-xhci,p2=8,p3=8,id=ua-b630a65c-8156-4542-b8e8-98b4d2c48f67,bus=pci.4,addr=0x0 -device virtio-scsi-pci,iothread=iothread1,id=ua-b7696ce2-fd8c-4856-8c38-197fc520271b,bus=pci.5,addr=0x0 -device virtio-serial-pci,id=ua-608f9599-30b2-4ee6-a0d3-d5fb588583ad,max_ports=16,bus=pci.3,addr=0x0 -drive if=none,id=drive-ua-fa671f6c-dc42-4c59-a66d-ccfa3d5d422b,readonly=on -device ide-cd,bus=ide.2,drive=drive-ua-fa671f6c-dc42-4c59-a66d-ccfa3d5d422b,id=ua-fa671f6c-dc42-4c59-a66d-ccfa3d5d422b,werror=report,rerror=report -drive file=/var/run/vdsm/storage/3df8f6d4-d572-4d2b-9ab2-8abc456a396f/df02bff9-2c4b-4e14-a0a3-591a84ccaed9/bf435645-2999-4fb2-8d0e-5becab5cf389,format=raw,if=none,id=drive-ua-df02bff9-2c4b-4e14-a0a3-591a84ccaed9,cache=none,aio=threads -device virtio-blk-pci,iothread=iothread1,scsi=off,bus=pci.6,addr=0x0,drive=drive-ua-df02bff9-2c4b-4e14-a0a3-591a84ccaed9,id=ua-df02bff9-2c4b-4e14-a0a3-591a84ccaed9,bootindex=1,write-cache=on,serial=df02bff9-2c4b-4e14-a0a3-591a84ccaed9,werror=stop,rerror=stop -netdev tap,fds=43:44,id=hostua-b29ca99f-a53e-4de7-8655-b65ef4ba5dc4,vhost=on,vhostfds=45:46 -device virtio-net-pci,mq=on,vectors=6,host_mtu=1500,netdev=hostua-b29ca99f-a53e-4de7-8655-b65ef4ba5dc4,id=ua-b29ca99f-a53e-4de7-8655-b65ef4ba5dc4,mac=00:16:3e:0a:96:80,bus=pci.2,addr=0x0 -chardev socket,id=charserial0,fd=47,server,nowait -device isa-serial,chardev=charserial0,id=serial0 -chardev socket,id=charchannel0,fd=48,server,nowait -device virtserialport,bus=ua-608f9599-30b2-4ee6-a0d3-d5fb588583ad.0,nr=1,chardev=charchannel0,id=channel0,name=ovirt-guest-agent.0 -chardev socket,id=charchannel1,fd=49,server,nowait -device virtserialport,bus=ua-608f9599-30b2-4ee6-a0d3-d5fb588583ad.0,nr=2,chardev=charchannel1,id=channel1,name=org.qemu.guest_agent.0 -chardev spicevmc,id=charchannel2,name=vdagent -device virtserialport,bus=ua-608f9599-30b2-4ee6-a0d3-d5fb588583ad.0,nr=3,chardev=charchannel2,id=channel2,name=com.redhat.spice.0 -chardev socket,id=charchannel3,fd=50,server,nowait -device virtserialport,bus=ua-608f9599-30b2-4ee6-a0d3-d5fb588583ad.0,nr=4,chardev=charchannel3,id=channel3,name=org.ovirt.hosted-engine-setup.0 -device usb-tablet,id=input0,bus=ua-b630a65c-8156-4542-b8e8-98b4d2c48f67.0,port=1 -vnc 172.19.0.224:3,password -k en-us -spice port=5904,tls-port=5905,addr=172.19.0.224,x509-dir=/etc/pki/vdsm/libvirt-spice,tls-channel=main,tls-channel=display,tls-channel=inputs,tls-channel=cursor,tls-channel=playback,tls-channel=record,tls-channel=smartcard,tls-channel=usbredir,seamless-migration=on -device qxl-vga,id=ua-ac7aed4d-d824-40f9-aaa8-1c0be702e38c,ram_size=67108864,vram_size=33554432,vram64_size_mb=0,vgamem_mb=16,max_outputs=1,bus=pcie.0,addr=0x1 -device intel-hda,id=ua-c7078e28-5585-4866-bdbd-528ebddd8854,bus=pci.18,addr=0x1 -device hda-duplex,id=ua-c7078e28-5585-4866-bdbd-528ebddd8854-codec0,bus=ua-c7078e28-5585-4866-bdbd-528ebddd8854.0,cad=0 -device virtio-balloon-pci,id=ua-fc4f6b20-0b17-4198-b059-b5753893584d,bus=pci.7,addr=0x0 -object rng-random,id=objua-c4c3e5e7-1c19-4582-a87c-4f3fee4a0ee5,filename=/dev/urandom -device virtio-rng-pci,rng=objua-c4c3e5e7-1c19-4582-a87c-4f3fee4a0ee5,id=ua-c4c3e5e7-1c19-4582-a87c-4f3fee4a0ee5,bus=pci.8,addr=0x0 -sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny -msg timestamp=on

Only aftermath is that in web admin gui I still see the temporary engine marked as "external-HostedEngineLocal" and I'm quite afraid of deleting it and having some effects on real engine one...

That one is just a leftover from the install process.
It's normally automatically cleaned up as one of the latest actions in the ansible role used for the deployment.
I suspect that, due to the wrongly detected CPU type, in your case something failed really close to the end of the deployment and so the leftover: you can safely manually delete it.
 

_______________________________________________
Users mailing list -- users@ovirt.org
To unsubscribe send an email to users-leave@ovirt.org
Privacy Statement: https://www.ovirt.org/privacy-policy.html
oVirt Code of Conduct: https://www.ovirt.org/community/about/community-guidelines/
List Archives: https://lists.ovirt.org/archives/list/users@ovirt.org/message/VTMEQVPPUCVLQKFMLLEL6WCLHONYACKQ/