I tried to do what written in the list.
This is what I did:
[root@ovirt-node3 ~]# hosted-engine --set-maintenance --mode=global
[root@ovirt-engine ~]# systemctl list-units | grep ovirt
ovirt-engine-dwhd.service
loaded active running oVirt Engine Data Warehouse
ovirt-engine.service
loaded active running oVirt Engine
ovirt-fence-kdump-listener.service
loaded active running oVirt Engine fence_kdump listener
ovirt-imageio.service
loaded active running oVirt ImageIO Daemon
ovirt-provider-ovn.service
loaded active running oVirt OVN provider
ovirt-vmconsole-proxy-sshd.service
loaded active running oVirt VM Console SSH server daemon
ovirt-websocket-proxy.service
loaded active running oVirt Engine websockets proxy
dev-mapper-ovirt\x2dswap.swap
loaded active active /dev/mapper/ovirt-swap
[root@ovirt-engine ~]# systemctl stop ovirt-*
[root@ovirt-engine ~]# systemctl list-units | grep ovirt
dev-mapper-ovirt\x2dswap.swap
loaded active active /dev/mapper/ovirt-swap
[root@ovirt-engine ~]# PGPASSWORD="Gu8xDOW8a0Fvfqg26bsyqf" psql --user=engine -h
127.0.0.1 engine
psql (12.11)
Type "help" for help.
engine=# select id, storage_name from storage_domain_static;
id | storage_name
--------------------------------------+------------------------
072fbaa1-08f3-4a40-9f34-a5ca22dd1d74 | ovirt-image-repository
493cc639-8207-4478-a07e-a79d9765c8d1 | ISO_at_node2
d79605bc-f02a-47d1-b35a-b7a3ff2907e3 | gv0
45b4f14c-8323-482f-90ab-99d8fd610018 | gv1
3577c21e-f757-4405-97d1-0f827c9b4e22 | hosted_storage
fdbed4cd-400b-4db1-8f99-e400787445d9 | domainATnetworkstorage
aac79175-ab2b-4b5b-a6e4-9feef9ce17ab | 7000a28
(7 rows)
engine=# select storage_domain_id, ovf_disk_id from storage_domains_ovf_info where
storage_domain_id = 'aac79175-ab2b-4b5b-a6e4-9feef9ce17ab';
storage_domain_id | ovf_disk_id
--------------------------------------+--------------------------------------
aac79175-ab2b-4b5b-a6e4-9feef9ce17ab | e150cef7-91d6-4538-b6a7-d185a1b02509
aac79175-ab2b-4b5b-a6e4-9feef9ce17ab | 491920f8-a06e-4702-aa17-8af4589374b4
(2 rows)
engine=# delete from storage_domain_dynamic where id =
'aac79175-ab2b-4b5b-a6e4-9feef9ce17ab';
DELETE 1
engine=# delete from storage_domain_static where id =
'aac79175-ab2b-4b5b-a6e4-9feef9ce17ab';
DELETE 1
engine=# delete from base_disks where disk_id in
('e150cef7-91d6-4538-b6a7-d185a1b02509','491920f8-a06e-4702-aa17-8af4589374b4');
DELETE 2
engine=# delete from storage_domains_ovf_info where storage_domain_id =
'aac79175-ab2b-4b5b-a6e4-9feef9ce17ab';
DELETE 0
engine=# delete from storage_pool_iso_map where storage_id =
'aac79175-ab2b-4b5b-a6e4-9feef9ce17ab';
DELETE 0
[root@ovirt-node2 ~]# lvmdevices
Device /dev/mapper/Samsung_SSD_870_EVO_4TB_S6BCNG0R300064E IDTYPE=mpath_uuid
IDNAME=mpath-Samsung_SSD_870_EVO_4TB_S6BCNG0R300064E
DEVNAME=/dev/mapper/Samsung_SSD_870_EVO_4TB_S6BCNG0R300064E
PVID=1ScYNcZ0GcuR7j9xYezuh562K2GX4f7h
Device /dev/mapper/ST4000NM000A-2HZ100_WJG1ZC85 IDTYPE=mpath_uuid
IDNAME=mpath-ST4000NM000A-2HZ100_WJG1ZC85 DEVNAME=/dev/mapper/ST4000NM000A-2HZ100_WJG1ZC85
PVID=PALZBWIFpZSGNlZo1uamMhDdauYrsXb8
Device /dev/nvme0n1p3 IDTYPE=sys_wwid IDNAME=eui.6479a74c40200874 DEVNAME=/dev/nvme0n1p3
PVID=S22fRYq8H37g23RVdBshQLotkqsHwYz2 PART=3
Device /dev/nvme0n1p4 IDTYPE=sys_wwid IDNAME=eui.6479a74c40200874 DEVNAME=/dev/nvme0n1p4
PVID=9vfdUZXH9puCBiBf7BNLK0bI3Q485xdx PART=4
--> Device /dev/mapper/3600507640082010b380000000000002a IDTYPE=mpath_uuid
IDNAME=mpath-3600507640082010b380000000000002a
DEVNAME=/dev/mapper/3600507640082010b380000000000002a
PVID=uVTp8Un0vqElbC4SdayZ0L8o5jIK0Hdi
engine=# select * from luns;
physical_volume_id | lun_id |
volume_group_id | serial | lun_mapping | vendor_id |
product_id | device_size | discard_max_size
----------------------------------------+-----------------------------------+----------------------------------------+----------------------------+-------------+-----------+------------+-------------+------------------
uVTp8U-n0vq-ElbC-4Sda-yZ0L-8o5j-IK0Hdi | 3600507640082010b380000000000002a |
lIX0VF-h1xs-u4m5-BYiO-8iwB-Gjxh-xWkQWr | SIBM_2145_0100208042ceXX00 | 0 | IBM
| 2145 | 1024 | 2097152
(1 row)
engine=# delete from luns where physical_volume_id =
'uVTp8U-n0vq-ElbC-4Sda-yZ0L-8o5j-IK0Hdi';
DELETE 1
engine=# select * from storage_server_connections;
id | connection
| user_name | password | iqn | port | portal |
storage_type | mo
unt_options | vfs_type | nfs_version | nfs_timeo | nfs_retrans |
gluster_volume_id
--------------------------------------+--------------------------------------------------------+-----------+----------+-------------------------------------------+------+--------+--------------+-------------------------
-----------------------------------+-----------+-------------+-----------+-------------+--------------------------------------
4ecbbb11-7608-4244-8aad-273110dbca91 | localhost:/glen
| | | | | 1 |
7 |
| glusterfs | | | |
db369b1e-6065-4a2b-848c-0e16ea5dfc6d | ovirt-node2.ovirt:/dati/vd0/nfs_at_node2
| | | | | 1 |
1 |
| | auto | | |
dcd8001a-5ac8-4bd3-aa7c-3a300c6e6971 | ovirt-node2.ovirt:/gv1
| | | | | 1 |
7 | backup-volfile-servers=o
virt-node3.ovirt:ovirt-node4.ovirt | glusterfs | | | |
863221f4-e11c-4589-95e9-aa3948e177f5
402e4f09-3c47-4b2b-a5fd-67957a5a698e | ovirt-node2.ovirt:/gv0
| | | | | 1 |
7 | backup-volfile-servers=o
virt-node3.ovirt:ovirt-node4.ovirt | glusterfs | | | |
187320c6-2d12-409f-91b0-d22179903eaf
-> 09e9aa89-7e92-4505-bff8-75298862de89 | 169.254.30.26
| | | iqn.1986-03.com.ibm:2145.gav7000a28.node1 | 3260 | 4
| 3 |
| | | | |
aedbec57-fcfc-419e-b7f5-253fc7bc4083 |
networkstorage.ovirt:/dati/networkstorage/ovirt_domain | | |
| | 1 | 1 |
| | auto | | |
(6 rows)
engine=# delete from storage_server_connections where
id='09e9aa89-7e92-4505-bff8-75298862de89';
DELETE 1
-- I don't have any disk mapped in the storage domain
engine=# -- not done delete from base_disks where
disk_id='03d651eb-14a9-4dca-8c87-605f101a5e0c';
engine=# -- not done delete from permissions where
object_id='03d651eb-14a9-4dca-8c87-605f101a5e0c';
***********************
Searching for objects:
*** Storage uuid: aac79175-ab2b-4b5b-a6e4-9feef9ce17ab
engine=# select * from job_subject_entity where
entity_id='aac79175-ab2b-4b5b-a6e4-9feef9ce17ab';
job_id | entity_id | entity_type
--------------------------------------+--------------------------------------+-------------
34a3912a-970f-4262-94a8-9ef25bf2a0c6 | aac79175-ab2b-4b5b-a6e4-9feef9ce17ab | Storage
c6053af1-21d8-4917-85e5-b63176a15708 | aac79175-ab2b-4b5b-a6e4-9feef9ce17ab | Storage
(2 rows)
*** ovf_disk_id e150cef7-91d6-4538-b6a7-d185a1b02509 491920f8-a06e-4702-aa17-8af4589374b4
engine=# select * from images where image_group_id in
('e150cef7-91d6-4538-b6a7-d185a1b02509','491920f8-a06e-4702-aa17-8af4589374b4');
image_guid | creation_date | size |
it_guid | parentid | imagestatus |
lastmodified | vm_snapshot_id | volu
me_type | volume_format | image_group_id | _create_date
| _update_date | active | volume_classification | qcow_compat |
sequence_number
--------------------------------------+------------------------+-----------+--------------------------------------+--------------------------------------+-------------+----------------------------+----------------+-----
--------+---------------+--------------------------------------+-------------------------------+-------------------------------+--------+-----------------------+-------------+-----------------
b9bf1dc5-358d-4aa0-b8ee-d00781f9a40d | 2022-09-21 15:06:00+00 | 134217728 |
00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000000000 | 1
| 2022-09-21 16:10:51.461+00 | |
1 | 5 | e150cef7-91d6-4538-b6a7-d185a1b02509 | 2022-09-21
18:13:11.824767+00 | 2022-09-21 16:10:52.430083+00 | t | 0 |
0 | 1
a36e1483-bad5-4737-8efd-b088d8488a3a | 2022-09-21 15:06:00+00 | 134217728 |
00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000000000 | 1
| 2022-09-21 16:10:51.461+00 | |
1 | 5 | 491920f8-a06e-4702-aa17-8af4589374b4 | 2022-09-21
18:13:09.806755+00 | 2022-09-21 16:10:53.265084+00 | t | 0 |
0 | 1
(2 rows)
[root@ovirt-engine ~]# halt -p
[root@ovirt-node3 ~]# hosted-engine --set-maintenance --mode=none
wait... and then the domain disappeared and the engine seems to work.
I think there must be some other housekeeping as I have at least some garbage in the
job_subject_entity, and images tables.
Hope this helps someone