Good Luck!
ssh root@engine
#Switch to postgre user
su - postgres
#If you don't load this , there will be no path for psql , nor it will start at all
source /opt/rh/rh-postgresql95/enable
#open the DB.
psql engine
#Commands in the DB:
select id, storage_name from storage_domain_static;
select storage_domain_id, ovf_disk_id from storage_domains_ovf_info where storag e_domain_id='fbe7bf1a-2f03-4311-89fa-5031eab638bf';
delete from storage_domain_dynamic where id = 'fbe7bf1a-2f03-4311-89fa-5031eab63 8bf';
delete from storage_domain_static where id = 'fbe7bf1a-2f03-4311-89fa-5031eab638 bf';
delete from base_disks where disk_id = '7a155ede-5317-4860-aa93-de1dc283213e';
delete from base_disks where disk_id = '7dedd0e1-8ce8-444e-8a3d-117c46845bb0';
delete from storage_domains_ovf_info where storage_domain_id = 'fbe7bf1a-2f03-43 11-89fa-5031eab638bf';
delete from storage_pool_iso_map where storage_id = 'fbe7bf1a-2f03-4311-89fa-503 1eab638bf';
#I think this shows all tables:
select table_schema ,table_name from information_schema.tables order by table_sc hema,table_name;
#Maybe you don't need this one and you need to find the NFS volume:
select * from gluster_volumes ;
delete from gluster_volumes where id = '9b06a1e9-8102-4cd7-bc56-84960a1efaa2';
select table_schema ,table_name from information_schema.tables order by table_sc hema,table_name;
# The previous delete failed as there was an entry in storage_server_connections.
#In your case could be different
select * from storage_server_connections;
delete from storage_server_connections where id = '490ee1c7-ae29-45c0-bddd-61708 22c8490';
delete from gluster_volumes where id = '9b06a1e9-8102-4cd7-bc56-84960a1efaa2';