Thanks for help.... following below.
1) Auth to Libvirtd and show VM "hosted engine" but also now that I manually registered "ns01" per above
[root@medusa ~]# vdsm-client Host getVMList
[
{
"status": "Down",
"statusTime": "2218288798",
"vmId": "69ab4f82-1a53-42c8-afca-210a3a2715f1"
}
]
[root@medusa ~]# virsh -c qemu:///system?authfile=/etc/ovirt-hosted-engine/virsh_auth.conf
Welcome to virsh, the virtualization interactive terminal.
Type: 'help' for help with commands
'quit' to quit
virsh # list --all
Id Name State
------------------------------------
- HostedEngine shut off
- HostedEngineLocal shut off
- ns01 shut off
2) Start VM .... but seems network is needed first
virsh # start HostedEngine
error: Failed to start domain HostedEngine
error: Network not found: no network with matching name 'vdsm-ovirtmgmt'
virsh # start HostedEngineLocal
error: Failed to start domain HostedEngineLocal
error: Requested operation is not valid: network 'default' is not active
3) Start Networks: This is "next next" HCI+Gluster build so it called it "ovirtmgmt"
virsh # net-list
Name State Autostart Persistent
------------------------------------------------
;vdsmdummy; active no no
virsh # net-autostart --network default
Network default marked as autostarted
virsh # net-start default
Network default started
virsh # start HostedEngineLocal
error: Failed to start domain HostedEngineLocal
error: Cannot access storage file '/var/tmp/localvmn4khg_ak/seed.iso': No such file or directory
<<<<Hmmm... no idea where that is from or what that is about...>>>
virsh # dumpxml HostedEngineLocal
<domain type='kvm'>
<name>HostedEngineLocal</name>
<uuid>bb2006ce-838b-47a3-a049-7e3e5c7bb049</uuid>
<metadata>
<libosinfo:libosinfo xmlns:libosinfo="http://libosinfo.org/xmlns/libvirt/domain/1.0">
<libosinfo:os id="http://redhat.com/rhel/8.0"/>
</libosinfo:libosinfo>
</metadata>
<memory unit='KiB'>16777216</memory>
<currentMemory unit='KiB'>16777216</currentMemory>
<vcpu placement='static'>4</vcpu>
<os>
<type arch='x86_64' machine='pc-q35-rhel8.2.0'>hvm</type>
<boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
<acpi/>
<apic/>
</features>
<cpu mode='host-model' check='partial'/>
<clock offset='utc'>
<timer name='kvmclock' present='yes'/>
</clock>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<pm>
<suspend-to-mem enabled='no'/>
<suspend-to-disk enabled='no'/>
</pm>
<devices>
<emulator>/usr/libexec/qemu-kvm</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/var/tmp/localvmn4khg_ak/images/e2e4d97c-3430-4880-888e-84c283a80052/0f78b6f7-7755-4fe5-90e3-d41df791a645'/>
<target dev='vda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/var/tmp/localvmn4khg_ak/seed.iso'/>
<target dev='sda' bus='sata'/>
<readonly/>
<address type='drive' controller='0' bus='0' target='0' unit='0'/>
</disk>
<controller type='usb' index='0' model='none'/>
<controller type='sata' index='0'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x1f' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pcie-root'/>
<controller type='pci' index='1' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='1' port='0x10'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0' multifunction='on'/>
</controller>
<controller type='pci' index='2' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='2' port='0x11'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x1'/>
</controller>
<controller type='pci' index='3' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='3' port='0x12'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x2'/>
</controller>
<controller type='pci' index='4' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='4' port='0x13'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x3'/>
</controller>
<controller type='pci' index='5' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='5' port='0x14'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x4'/>
</controller>
<controller type='virtio-serial' index='0'>
<address type='pci' domain='0x0000' bus='0x02' slot='0x00' function='0x0'/>
</controller>
<interface type='network'>
<mac address='00:16:3e:46:a6:60'/>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x01' slot='0x00' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<channel type='unix'>
<target type='virtio' name='org.qemu.guest_agent.0'/>
<address type='virtio-serial' controller='0' bus='0' port='1'/>
</channel>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<graphics type='vnc' port='-1' autoport='yes'>
<listen type='address'/>
</graphics>
<video>
<model type='vga' vram='16384' heads='1' primary='yes'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x0'/>
</video>
<memballoon model='none'/>
<rng model='virtio'>
<backend model='random'>/dev/random</backend>
<address type='pci' domain='0x0000' bus='0x04' slot='0x00' function='0x0'/>
</rng>
</devices>
</domain>
virsh #
##So not sure what hosted engine needs an ISO image. Can I remove this?
virsh # change-media HostedEngineLocal /var/tmp/localvmn4khg_ak/seed.iso --eject
Successfully ejected media.
virsh # start HostedEngineLocal
error: Failed to start domain HostedEngineLocal
error: Cannot access storage file '/var/tmp/localvmn4khg_ak/images/e2e4d97c-3430-4880-888e-84c283a80052/0f78b6f7-7755-4fe5-90e3-d41df791a645' (as uid:107, gid:107): No such file or directory
[root@medusa 3afc47ba-afb9-413f-8de5-8d9a2f45ecde]# tree |grep e2e4d97c-3430-4880-888e-84c283a80052/0f78b6f7-7755-4fe5-90e3-d41df791a645
[root@medusa 3afc47ba-afb9-413f-8de5-8d9a2f45ecde]# pwd
/gluster_bricks/engine/engine/3afc47ba-afb9-413f-8de5-8d9a2f45ecde
[root@medusa 3afc47ba-afb9-413f-8de5-8d9a2f45ecde]# tree
.
├── dom_md
│ ├── ids
│ ├── inbox
│ ├── leases
│ ├── metadata
│ ├── outbox
│ └── xleases
├── ha_agent
│ ├── hosted-engine.lockspace -> /run/vdsm/storage/3afc47ba-afb9-413f-8de5-8d9a2f45ecde/6023f2b1-ea6e-485b-9ac2-8decd5f7820d/b38a5e37-fac4-4c23-a0c4-7359adff619c
│ └── hosted-engine.metadata -> /run/vdsm/storage/3afc47ba-afb9-413f-8de5-8d9a2f45ecde/77082dd8-7cb5-41cc-a69f-0f4c0380db23/38d552c5-689d-47b7-9eea-adb308da8027
├── images
│ ├── 1dc69552-dcc6-484d-8149-86c93ff4b8cc
│ │ ├── e4e26573-09a5-43fa-91ec-37d12de46480
│ │ ├── e4e26573-09a5-43fa-91ec-37d12de46480.lease
│ │ └── e4e26573-09a5-43fa-91ec-37d12de46480.meta
│ ├── 375d2483-ee83-4cad-b421-a5a70ec06ba6
│ │ ├── f936d4be-15e3-4983-8bf0-9ba5b97e638a
│ │ ├── f936d4be-15e3-4983-8bf0-9ba5b97e638a.lease
│ │ └── f936d4be-15e3-4983-8bf0-9ba5b97e638a.meta
│ ├── 6023f2b1-ea6e-485b-9ac2-8decd5f7820d
│ │ ├── b38a5e37-fac4-4c23-a0c4-7359adff619c
│ │ ├── b38a5e37-fac4-4c23-a0c4-7359adff619c.lease
│ │ └── b38a5e37-fac4-4c23-a0c4-7359adff619c.meta
│ ├── 685309b1-1ae9-45f3-90c3-d719a594482d
│ │ ├── 9eddcf51-fd15-4de5-a4b6-a83a9082dee0
│ │ ├── 9eddcf51-fd15-4de5-a4b6-a83a9082dee0.lease
│ │ └── 9eddcf51-fd15-4de5-a4b6-a83a9082dee0.meta
│ ├── 74f1b2e7-2483-4e4d-8301-819bcd99129e
│ │ ├── c1888b6a-c48e-46ce-9677-02e172ef07af
│ │ ├── c1888b6a-c48e-46ce-9677-02e172ef07af.lease
│ │ └── c1888b6a-c48e-46ce-9677-02e172ef07af.meta
│ └── 77082dd8-7cb5-41cc-a69f-0f4c0380db23
│ ├── 38d552c5-689d-47b7-9eea-adb308da8027
│ ├── 38d552c5-689d-47b7-9eea-adb308da8027.lease
│ └── 38d552c5-689d-47b7-9eea-adb308da8027.meta
└── master
├── tasks
│ ├── 150927c5-bae6-45e4-842c-a7ba229fc3ba
│ │ └── 150927c5-bae6-45e4-842c-a7ba229fc3ba.job.0
│ ├── 21bba697-26e6-4fd8-ac7c-76f86b458368.temp
│ ├── 26c580b8-cdb2-4d21-9bea-96e0788025e6.temp
│ ├── 2e0e347c-fd01-404f-9459-ef175c82c354.backup
│ │ └── 2e0e347c-fd01-404f-9459-ef175c82c354.task
│ ├── 43f17022-e003-4e9f-81ec-4a01582223bd.backup
│ │ └── 43f17022-e003-4e9f-81ec-4a01582223bd.task
│ ├── 5055f61a-4cc8-459f-8fe5-19427b74a4f2.temp
│ ├── 6826c8f5-b9df-498e-a576-af0c4e7fe69c
│ │ └── 6826c8f5-b9df-498e-a576-af0c4e7fe69c.task
│ ├── 78ed90b0-2a87-4c48-8204-03d4b0bd7694
│ │ └── 78ed90b0-2a87-4c48-8204-03d4b0bd7694.job.0
│ ├── 7c7799a5-d28e-4b42-86ee-84bb8822e82f.temp
│ ├── 95d29b8c-23d9-4d1a-b995-2ba364970893
│ ├── 95d29b8c-23d9-4d1a-b995-2ba364970893.temp
│ ├── a1fa934a-5ea7-4160-ab8c-7e3476dc2676.backup
│ │ └── a1fa934a-5ea7-4160-ab8c-7e3476dc2676.task
│ ├── bcee8725-efde-4848-a108-01c262625aaa
│ │ └── bcee8725-efde-4848-a108-01c262625aaa.job.0
│ ├── c0b5a032-c4a9-4648-b348-c2a5cf4d6cad.temp
│ ├── ce7e2ebf-2c28-435d-b359-14d0da2e9011
│ └── ce7e2ebf-2c28-435d-b359-14d0da2e9011.temp
└── vms
#############
Ok.. I think I am just making things worse.
Questions:
1) Why is this engine showing only on one of the three servers?
2) Why is their "HostedEngine" and "HostedEngineLocal"?
HostedEngineLocal should not be listed in any of the hosts. It is a temp vm used during the initial deployment.
3) These paths for disk do not seem to align with engine file paths on volume.
4) If I redeploy HCI engine via cockpit will it be able to complete build .. I don't care if it wipes the "engine" so long as my data / VMs can be re-ingested.
The nirmal restoration path for your scenario is to redeploy engine by wiping out the previous one and importing the engine configuration backup (hope you have one)
What happened with the ns01 VM? Did you start it?