I  completed disabled appArmor and selinux on the Ubuntu NFS server (non-ZFS server)


root@dongle:/exports/00/storage# grep 'apparmor=0' /etc/default/grub

GRUB_CMDLINE_LINUX_DEFAULT="quiet splash apparmor=0"

root@msnowubntlt:/exports/00/storage# aa-status 

apparmor module is loaded.

apparmor filesystem is not mounted.

root@msnowubntlt:/exports/00/storage# sestatus 

SELinux status:                 disabled

root@dongle:/exports/00/storage# 


I  went ahead and disabled UFW and Firewalld for good measure.

root@dongle:/exports/00# systemctl status ufw.service

● ufw.service - Uncomplicated firewall

     Loaded: loaded (/lib/systemd/system/ufw.service; enabled; vendor preset: enabled)

     Active: inactive (dead) since Sun 2021-01-24 14:32:54 MST; 15min ago

       Docs: man:ufw(8)

    Process: 353 ExecStart=/lib/ufw/ufw-init start quiet (code=exited, status=0/SUCCESS)

    Process: 157355 ExecStop=/lib/ufw/ufw-init stop (code=exited, status=0/SUCCESS)

   Main PID: 353 (code=exited, status=0/SUCCESS)


Jan 24 14:32:54 dongle.co.slakin.net systemd[1]: Stopping Uncomplicated firewall...

Jan 24 14:32:54 dongle.co.slakin.net ufw-init[157355]: Skip stopping firewall: ufw (not enabled)

Jan 24 14:32:54 dongle.co.slakin.net systemd[1]: ufw.service: Succeeded.

Jan 24 14:32:54 dongle.co.slakin.net systemd[1]: Stopped Uncomplicated firewall.

Warning: journal has been rotated since unit was started, output may be incomplete.

root@dongle:/exports/00# systemctl status firewalld.service

● firewalld.service - firewalld - dynamic firewall daemon

     Loaded: loaded (/lib/systemd/system/firewalld.service; enabled; vendor preset: enabled)

     Active: inactive (dead) since Sun 2021-01-24 14:33:00 MST; 15min ago

       Docs: man:firewalld(1)

    Process: 817 ExecStart=/usr/sbin/firewalld --nofork --nopid (code=exited, status=0/SUCCESS)

   Main PID: 817 (code=exited, status=0/SUCCESS)


Jan 24 14:17:06 dongle.co.slakin.net systemd[1]: Starting firewalld - dynamic firewall daemon...

Jan 24 14:17:06 dongle.co.slakin.net systemd[1]: Started firewalld - dynamic firewall daemon.

Jan 24 14:32:58 dongle.co.slakin.net systemd[1]: Stopping firewalld - dynamic firewall daemon...

Jan 24 14:33:00 dongle.co.slakin.net systemd[1]: firewalld.service: Succeeded.

Jan 24 14:33:00 dongle.co.slakin.net systemd[1]: Stopped firewalld - dynamic firewall daemon.

root@dongle:/exports/00# 



I started with a new fresh mount that is empty and owned by 36:36

root@dongle:/exports/00/storage# ls -lan

total 8

drwxr-xr-x 2 36 36 4096 Jan 23 10:09 .

drwxrwxrwx 3 36 36 4096 Jan 23 09:41 ..

root@msnowubntlt:/exports/00/storage# showmount -e $HOSTNAME

Export list for msnowubntlt:

/exports/00           *

/exports/host_storage *

root@dongle:/exports/00/storage# 


I  tried to create an NFS domain from the ovirt gui but it still fails. brick(ovirt node host) successfully mounts the NFS share and creates files:

root@dongle:/exports/00# find . -exec ls -ln '{}' \;

total 4

drwxrwxr-x 3 36 36 4096 Jan 24 14:43 storage

total 4

drwxr-xr-x 4 36 36 4096 Jan 24 14:43 a9ab8829-14bd-45cc-a57b-03b01e96f407

total 8

drwxr-xr-x 2 36 36 4096 Jan 24 14:43 dom_md

drwxr-xr-x 2 36 36 4096 Jan 24 14:43 images

total 0

total 252

-rw-rw---- 1 36 36        0 Jan 24 14:43 ids

-rw-rw---- 1 36 36 16777216 Jan 24 14:43 inbox

-rw-rw---- 1 36 36        0 Jan 24 14:43 leases

-rw-r--r-- 1 36 36      345 Jan 24 14:43 metadata

-rw-rw---- 1 36 36 16777216 Jan 24 14:43 outbox

-rw-rw---- 1 36 36  1302528 Jan 24 14:43 xleases

-rw-rw---- 1 36 36 16777216 Jan 24 14:43 ./storage/a9ab8829-14bd-45cc-a57b-03b01e96f407/dom_md/outbox

-rw-r--r-- 1 36 36 345 Jan 24 14:43 ./storage/a9ab8829-14bd-45cc-a57b-03b01e96f407/dom_md/metadata

-rw-rw---- 1 36 36 0 Jan 24 14:43 ./storage/a9ab8829-14bd-45cc-a57b-03b01e96f407/dom_md/leases

-rw-rw---- 1 36 36 1302528 Jan 24 14:43 ./storage/a9ab8829-14bd-45cc-a57b-03b01e96f407/dom_md/xleases

-rw-rw---- 1 36 36 16777216 Jan 24 14:43 ./storage/a9ab8829-14bd-45cc-a57b-03b01e96f407/dom_md/inbox

-rw-rw---- 1 36 36 0 Jan 24 14:43 ./storage/a9ab8829-14bd-45cc-a57b-03b01e96f407/dom_md/ids

root@dongle:/exports/00# 


vdsm.log still shows a failure...

[root@brick vdsm]# ./reset.sh 

-rw-r--r--. 1 vdsm kvm  6.9K Jan 24 14:42 mom.log

-rw-r--r--. 1 root root  32K Jan 24 14:43 supervdsm.log

-rw-r--r--. 1 vdsm kvm   64K Jan 24 14:43 vdsm.log

-rw-r--r--. 1 vdsm kvm     2 Jan 24 14:35 vdsm-schema-error.log

-rw-r--r--. 1 vdsm kvm  2 Jan 24 14:43 mom.log

-rw-r--r--. 1 root root 2 Jan 24 14:43 supervdsm.log

-rw-r--r--. 1 vdsm kvm  2 Jan 24 14:43 vdsm.log

-rw-r--r--. 1 vdsm kvm  2 Jan 24 14:43 vdsm-schema-error.log

 

2021-01-24 14:43:09,764-0700 INFO  (jsonrpc/4) [api.host] START getAllVmStats() from=::ffff:172.16.1.29,45646 (api:48)

2021-01-24 14:43:09,765-0700 INFO  (jsonrpc/4) [api.host] FINISH getAllVmStats return={'status': {'code': 0, 'message': 'Done'}, 'statsList': (suppressed)} from=::ffff:172.16.1.29,45646 (api:54)

2021-01-24 14:43:12,680-0700 INFO  (periodic/1) [vdsm.api] START repoStats(domains=()) from=internal, task_id=1f14dd7b-e4e5-4b11-b889-fec1cfa82a53 (api:48)

2021-01-24 14:43:12,682-0700 INFO  (periodic/1) [vdsm.api] FINISH repoStats return={} from=internal, task_id=1f14dd7b-e4e5-4b11-b889-fec1cfa82a53 (api:54)

2021-01-24 14:43:13,439-0700 INFO  (jsonrpc/7) [api.host] START getStats() from=::ffff:172.16.1.29,45646 (api:48)

2021-01-24 14:43:13,472-0700 INFO  (jsonrpc/7) [vdsm.api] START repoStats(domains=()) from=::ffff:172.16.1.29,45646, task_id=daf4ce0b-f8e1-4067-af3a-47620f5b9491 (api:48)

2021-01-24 14:43:13,474-0700 INFO  (jsonrpc/7) [vdsm.api] FINISH repoStats return={} from=::ffff:172.16.1.29,45646, task_id=daf4ce0b-f8e1-4067-af3a-47620f5b9491 (api:54)

2021-01-24 14:43:13,475-0700 INFO  (jsonrpc/7) [vdsm.api] START multipath_health() from=::ffff:172.16.1.29,45646, task_id=14c53e2e-e304-46e6-b2bc-df8642fd31fd (api:48)

2021-01-24 14:43:13,476-0700 INFO  (jsonrpc/7) [vdsm.api] FINISH multipath_health return={} from=::ffff:172.16.1.29,45646, task_id=14c53e2e-e304-46e6-b2bc-df8642fd31fd (api:54)

2021-01-24 14:43:13,488-0700 INFO  (jsonrpc/7) [api.host] FINISH getStats return={'status': {'code': 0, 'message': 'Done'}, 'info': (suppressed)} from=::ffff:172.16.1.29,45646 (api:54)

2021-01-24 14:43:13,898-0700 INFO  (jsonrpc/3) [vdsm.api] START connectStorageServer(domType=1, spUUID='00000000-0000-0000-0000-000000000000', conList=[{'password': '********', 'protocol_version': 'auto', 'port': '', 'iqn': '', 'connection': 'dongle.co.slakin.net:/exports/00/storage', 'ipv6_enabled': 'false', 'id': '00000000-0000-0000-0000-000000000000', 'user': '', 'tpgt': '1'}], options=None) from=::ffff:172.16.1.29,45646, flow_id=30b1a760-9117-4f1f-aea3-64b849f5e38c, task_id=e3c977b4-9994-438c-9fa8-f0af5ae49909 (api:48)

2021-01-24 14:43:13,902-0700 INFO  (jsonrpc/3) [storage.StorageServer.MountConnection] Creating directory '/rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage' (storageServer:167)

2021-01-24 14:43:13,902-0700 INFO  (jsonrpc/3) [storage.fileUtils] Creating directory: /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage mode: None (fileUtils:201)

2021-01-24 14:43:13,903-0700 INFO  (jsonrpc/3) [storage.Mount] mounting dongle.co.slakin.net:/exports/00/storage at /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage (mount:207)

2021-01-24 14:43:14,082-0700 INFO  (jsonrpc/3) [IOProcessClient] (dongle.co.slakin.net:_exports_00_storage) Closing client (__init__:641)

2021-01-24 14:43:14,083-0700 INFO  (jsonrpc/3) [IOProcessClient] (23673598-92e5-4f9e-91de-1a38a03d7511) Closing client (__init__:641)

2021-01-24 14:43:14,086-0700 INFO  (ioprocess/109656) [IOProcessClient] (dongle.co.slakin.net:_exports_00_storage) Killing ioprocess (__init__:191)

2021-01-24 14:43:14,086-0700 INFO  (ioprocess/109665) [IOProcessClient] (23673598-92e5-4f9e-91de-1a38a03d7511) Killing ioprocess (__init__:191)

2021-01-24 14:43:14,089-0700 INFO  (ioprocess/109665) [IOProcessClient] (23673598-92e5-4f9e-91de-1a38a03d7511) ioprocess was terminated by signal 9 (__init__:200)

2021-01-24 14:43:14,090-0700 INFO  (ioprocess/109656) [IOProcessClient] (dongle.co.slakin.net:_exports_00_storage) ioprocess was terminated by signal 9 (__init__:200)

2021-01-24 14:43:14,147-0700 INFO  (jsonrpc/3) [storage.StorageDomainCache] Invalidating storage domain cache (sdc:74)

2021-01-24 14:43:14,147-0700 INFO  (jsonrpc/3) [vdsm.api] FINISH connectStorageServer return={'statuslist': [{'id': '00000000-0000-0000-0000-000000000000', 'status': 0}]} from=::ffff:172.16.1.29,45646, flow_id=30b1a760-9117-4f1f-aea3-64b849f5e38c, task_id=e3c977b4-9994-438c-9fa8-f0af5ae49909 (api:54)

2021-01-24 14:43:14,313-0700 INFO  (jsonrpc/6) [vdsm.api] START connectStorageServer(domType=1, spUUID='00000000-0000-0000-0000-000000000000', conList=[{'password': '********', 'protocol_version': 'auto', 'port': '', 'iqn': '', 'connection': 'dongle.co.slakin.net:/exports/00/storage', 'ipv6_enabled': 'false', 'id': 'ca248090-f338-49b2-9a4d-9b3987a2f75e', 'user': '', 'tpgt': '1'}], options=None) from=::ffff:172.16.1.29,45646, flow_id=1c30523d, task_id=31bd7156-d08a-47aa-8e90-a56175d188ba (api:48)

2021-01-24 14:43:14,319-0700 INFO  (jsonrpc/6) [storage.StorageDomainCache] Invalidating storage domain cache (sdc:74)

2021-01-24 14:43:14,319-0700 INFO  (jsonrpc/6) [vdsm.api] FINISH connectStorageServer return={'statuslist': [{'id': 'ca248090-f338-49b2-9a4d-9b3987a2f75e', 'status': 0}]} from=::ffff:172.16.1.29,45646, flow_id=1c30523d, task_id=31bd7156-d08a-47aa-8e90-a56175d188ba (api:54)

2021-01-24 14:43:14,377-0700 INFO  (jsonrpc/5) [vdsm.api] START createStorageDomain(storageType=1, sdUUID='a9ab8829-14bd-45cc-a57b-03b01e96f407', domainName='dongle_nfs', typeSpecificArg='dongle.co.slakin.net:/exports/00/storage', domClass=1, domVersion='5', block_size=512, max_hosts=250, options=None) from=::ffff:172.16.1.29,45646, flow_id=1c30523d, task_id=b9870145-e55b-4fa9-a07b-5167f763ec0d (api:48)

2021-01-24 14:43:14,378-0700 INFO  (jsonrpc/5) [storage.StorageDomainCache] Refreshing storage domain cache (resize=True) (sdc:80)

2021-01-24 14:43:14,378-0700 INFO  (jsonrpc/5) [storage.ISCSI] Scanning iSCSI devices (iscsi:442)

2021-01-24 14:43:14,496-0700 INFO  (jsonrpc/5) [storage.ISCSI] Scanning iSCSI devices: 0.12 seconds (utils:390)

2021-01-24 14:43:14,497-0700 INFO  (jsonrpc/5) [storage.HBA] Scanning FC devices (hba:60)

2021-01-24 14:43:14,767-0700 INFO  (jsonrpc/5) [storage.HBA] Scanning FC devices: 0.27 seconds (utils:390)

2021-01-24 14:43:14,825-0700 INFO  (jsonrpc/5) [storage.Multipath] Resizing multipath devices (multipath:104)

2021-01-24 14:43:14,831-0700 INFO  (jsonrpc/5) [storage.Multipath] Resizing multipath devices: 0.01 seconds (utils:390)

2021-01-24 14:43:14,831-0700 INFO  (jsonrpc/5) [storage.StorageDomainCache] Refreshing storage domain cache: 0.45 seconds (utils:390)

2021-01-24 14:43:14,832-0700 INFO  (jsonrpc/5) [storage.StorageDomainCache] Looking up domain a9ab8829-14bd-45cc-a57b-03b01e96f407 (sdc:171)

2021-01-24 14:43:15,026-0700 WARN  (jsonrpc/5) [storage.LVM] All 1 tries have failed: cmd=['/sbin/lvm', 'vgs', '--config', 'devices {  preferred_names=["^/dev/mapper/"]  ignore_suspended_devices=1  write_cache_state=0  disable_after_error_count=3  filter=["r|.*|"]  hints="none"  obtain_device_list_from_udev=0 } global {  locking_type=1  prioritise_write_locks=1  wait_for_locks=1  use_lvmetad=0 } backup {  retain_min=50  retain_days=0 }', '--noheadings', '--units', 'b', '--nosuffix', '--separator', '|', '--ignoreskippedcluster', '-o', 'uuid,name,attr,size,free,extent_size,extent_count,free_count,tags,vg_mda_size,vg_mda_free,lv_count,pv_count,pv_name', 'a9ab8829-14bd-45cc-a57b-03b01e96f407'] rc=5 err=['  Volume group "a9ab8829-14bd-45cc-a57b-03b01e96f407" not found', '  Cannot process volume group a9ab8829-14bd-45cc-a57b-03b01e96f407'] (lvm:538)

2021-01-24 14:43:15,032-0700 INFO  (tmap-56/0) [IOProcessClient] (dongle.co.slakin.net:_exports_00_storage) Starting client (__init__:340)

2021-01-24 14:43:15,068-0700 INFO  (ioprocess/109792) [IOProcess] (dongle.co.slakin.net:_exports_00_storage) Starting ioprocess (__init__:465)

2021-01-24 14:43:15,073-0700 INFO  (jsonrpc/5) [storage.StorageDomainCache] Looking up domain a9ab8829-14bd-45cc-a57b-03b01e96f407: 0.24 seconds (utils:390)

2021-01-24 14:43:15,124-0700 INFO  (jsonrpc/5) [IOProcessClient] (a9ab8829-14bd-45cc-a57b-03b01e96f407) Starting client (__init__:340)

2021-01-24 14:43:15,164-0700 INFO  (ioprocess/109802) [IOProcess] (a9ab8829-14bd-45cc-a57b-03b01e96f407) Starting ioprocess (__init__:465)

2021-01-24 14:43:15,193-0700 INFO  (jsonrpc/5) [storage.StorageDomain] Creating domain metadata directory '/rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage/a9ab8829-14bd-45cc-a57b-03b01e96f407/dom_md' (fileSD:441)

2021-01-24 14:43:15,321-0700 INFO  (jsonrpc/5) [storage.xlease] Formatting index for lockspace 'a9ab8829-14bd-45cc-a57b-03b01e96f407' (version=1) (xlease:701)

2021-01-24 14:43:15,460-0700 INFO  (jsonrpc/5) [storage.StorageDomain] Creating domain images directory '/rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage/a9ab8829-14bd-45cc-a57b-03b01e96f407/images' (nfsSD:110)

2021-01-24 14:43:15,529-0700 INFO  (jsonrpc/5) [storage.StorageDomain] Removing remnants of deleted images [] (fileSD:735)

2021-01-24 14:43:15,529-0700 INFO  (jsonrpc/5) [storage.SANLock] Initializing sanlock for domain a9ab8829-14bd-45cc-a57b-03b01e96f407 path=/rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage/a9ab8829-14bd-45cc-a57b-03b01e96f407/dom_md/ids alignment=1048576 block_size=512 io_timeout=10 (clusterlock:286)

2021-01-24 14:43:15,535-0700 ERROR (jsonrpc/5) [storage.SANLock] Cannot initialize lock for domain a9ab8829-14bd-45cc-a57b-03b01e96f407 (clusterlock:305)

Traceback (most recent call last):

  File "/usr/lib/python3.6/site-packages/vdsm/storage/clusterlock.py", line 295, in initLock

    sector=self._block_size)

sanlock.SanlockException: (19, 'Sanlock lockspace write failure', 'No such device')

2021-01-24 14:43:15,535-0700 INFO  (jsonrpc/5) [vdsm.api] FINISH createStorageDomain error=Could not initialize cluster lock: () from=::ffff:172.16.1.29,45646, flow_id=1c30523d, task_id=b9870145-e55b-4fa9-a07b-5167f763ec0d (api:52)

2021-01-24 14:43:15,536-0700 ERROR (jsonrpc/5) [storage.TaskManager.Task] (Task='b9870145-e55b-4fa9-a07b-5167f763ec0d') Unexpected error (task:880)

Traceback (most recent call last):

  File "/usr/lib/python3.6/site-packages/vdsm/storage/clusterlock.py", line 295, in initLock

    sector=self._block_size)

sanlock.SanlockException: (19, 'Sanlock lockspace write failure', 'No such device')


During handling of the above exception, another exception occurred:


Traceback (most recent call last):

  File "/usr/lib/python3.6/site-packages/vdsm/storage/task.py", line 887, in _run

    return fn(*args, **kargs)

  File "<decorator-gen-123>", line 2, in createStorageDomain

  File "/usr/lib/python3.6/site-packages/vdsm/common/api.py", line 50, in method

    ret = func(*args, **kwargs)

  File "/usr/lib/python3.6/site-packages/vdsm/storage/hsm.py", line 2669, in createStorageDomain

    max_hosts=max_hosts)

  File "/usr/lib/python3.6/site-packages/vdsm/storage/nfsSD.py", line 120, in create

    fsd.initSPMlease()

  File "/usr/lib/python3.6/site-packages/vdsm/storage/sd.py", line 1019, in initSPMlease

    return self._manifest.initDomainLock()

  File "/usr/lib/python3.6/site-packages/vdsm/storage/sd.py", line 627, in initDomainLock

    self._domainLock.initLock(self.getDomainLease())

  File "/usr/lib/python3.6/site-packages/vdsm/storage/clusterlock.py", line 306, in initLock

    raise se.ClusterLockInitError()

vdsm.storage.exception.ClusterLockInitError: Could not initialize cluster lock: ()

2021-01-24 14:43:15,536-0700 INFO  (jsonrpc/5) [storage.TaskManager.Task] (Task='b9870145-e55b-4fa9-a07b-5167f763ec0d') aborting: Task is aborted: 'value=Could not initialize cluster lock: () abortedcode=701' (task:1190)

2021-01-24 14:43:15,537-0700 ERROR (jsonrpc/5) [storage.Dispatcher] FINISH createStorageDomain error=Could not initialize cluster lock: () (dispatcher:83)

2021-01-24 14:43:15,537-0700 INFO  (jsonrpc/5) [jsonrpc.JsonRpcServer] RPC call StorageDomain.create failed (error 701) in 1.16 seconds (__init__:312)

2021-01-24 14:43:15,708-0700 INFO  (jsonrpc/0) [vdsm.api] START disconnectStorageServer(domType=1, spUUID='00000000-0000-0000-0000-000000000000', conList=[{'password': '********', 'protocol_version': 'auto', 'port': '', 'iqn': '', 'connection': 'dongle.co.slakin.net:/exports/00/storage', 'ipv6_enabled': 'false', 'id': 'ca248090-f338-49b2-9a4d-9b3987a2f75e', 'user': '', 'tpgt': '1'}], options=None) from=::ffff:172.16.1.29,45646, flow_id=169aaae0-c187-4089-9b90-a8ddeec48dc2, task_id=4363b864-5954-4bac-99d3-e197e6d12400 (api:48)

2021-01-24 14:43:15,709-0700 INFO  (jsonrpc/0) [storage.Mount] unmounting /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage (mount:215)

2021-01-24 14:43:15,835-0700 INFO  (jsonrpc/0) [storage.StorageDomainCache] Refreshing storage domain cache (resize=False) (sdc:80)

2021-01-24 14:43:15,836-0700 INFO  (jsonrpc/0) [storage.ISCSI] Scanning iSCSI devices (iscsi:442)

2021-01-24 14:43:15,952-0700 INFO  (jsonrpc/0) [storage.ISCSI] Scanning iSCSI devices: 0.12 seconds (utils:390)

2021-01-24 14:43:15,952-0700 INFO  (jsonrpc/0) [storage.HBA] Scanning FC devices (hba:60)

2021-01-24 14:43:16,223-0700 INFO  (jsonrpc/0) [storage.HBA] Scanning FC devices: 0.27 seconds (utils:390)

2021-01-24 14:43:16,283-0700 INFO  (jsonrpc/0) [storage.StorageDomainCache] Refreshing storage domain cache: 0.45 seconds (utils:390)

2021-01-24 14:43:16,284-0700 INFO  (jsonrpc/0) [vdsm.api] FINISH disconnectStorageServer return={'statuslist': [{'id': 'ca248090-f338-49b2-9a4d-9b3987a2f75e', 'status': 0}]} from=::ffff:172.16.1.29,45646, flow_id=169aaae0-c187-4089-9b90-a8ddeec48dc2, task_id=4363b864-5954-4bac-99d3-e197e6d12400 (api:54)

^C




supervdsm.log shows a bit more info

[root@brick vdsm]# cat  supervdsm.log 

 

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:10,237::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:10,237::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:10,269::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:10,270::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|jsonrpc/3::DEBUG::2021-01-24 14:43:13,904::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call mount with (<vdsm.supervdsm_server._SuperVdsm object at 0x7ff15f423588>, 'dongle.co.slakin.net:/exports/00/storage', '/rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage') {'mntOpts': 'soft,nosharecache,timeo=100,retrans=3,nolock', 'vfstype': 'nfs', 'cgroup': None}

MainProcess|jsonrpc/3::DEBUG::2021-01-24 14:43:13,904::commands::211::root::(execCmd) /usr/bin/taskset --cpu-list 0-3 /usr/bin/mount -t nfs -o soft,nosharecache,timeo=100,retrans=3,nolock dongle.co.slakin.net:/exports/00/storage /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage (cwd None)

MainProcess|jsonrpc/3::DEBUG::2021-01-24 14:43:14,020::commands::224::root::(execCmd) SUCCESS: <err> = b''; <rc> = 0

MainProcess|jsonrpc/3::DEBUG::2021-01-24 14:43:14,021::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return mount with None

MainProcess|jsonrpc/3::DEBUG::2021-01-24 14:43:14,093::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call validateAccess with (<vdsm.supervdsm_server._SuperVdsm object at 0x7ff15f423588>, 'vdsm', ('kvm',), '/rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage', 7) {}

MainProcess|jsonrpc/3::DEBUG::2021-01-24 14:43:14,114::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return validateAccess with None

MainProcess|jsonrpc/3::DEBUG::2021-01-24 14:43:14,116::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call validateAccess with (<vdsm.supervdsm_server._SuperVdsm object at 0x7ff15f423588>, 'qemu', ('qemu', 'kvm'), '/rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage', 5) {}

MainProcess|jsonrpc/3::DEBUG::2021-01-24 14:43:14,142::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return validateAccess with None

MainProcess|jsonrpc/5::DEBUG::2021-01-24 14:43:14,498::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call hbaRescan with (<vdsm.supervdsm_server._SuperVdsm object at 0x7ff15f423588>,) {}

MainProcess|jsonrpc/5::DEBUG::2021-01-24 14:43:14,498::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/libexec/vdsm/fc-scan (cwd None)

MainProcess|jsonrpc/5::DEBUG::2021-01-24 14:43:14,766::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return hbaRescan with None

MainProcess|jsonrpc/5::DEBUG::2021-01-24 14:43:15,080::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call validateAccess with (<vdsm.supervdsm_server._SuperVdsm object at 0x7ff15f423588>, 'vdsm', ('kvm',), '/rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage', 7) {}

MainProcess|jsonrpc/5::DEBUG::2021-01-24 14:43:15,100::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return validateAccess with None

MainProcess|jsonrpc/5::DEBUG::2021-01-24 14:43:15,102::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call validateAccess with (<vdsm.supervdsm_server._SuperVdsm object at 0x7ff15f423588>, 'qemu', ('qemu', 'kvm'), '/rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage', 5) {}

MainProcess|jsonrpc/5::DEBUG::2021-01-24 14:43:15,123::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return validateAccess with None

MainProcess|jsonrpc/0::DEBUG::2021-01-24 14:43:15,710::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call umount with (<vdsm.supervdsm_server._SuperVdsm object at 0x7ff15f423588>, '/rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage') {'force': True, 'lazy': True, 'freeloop': False}

MainProcess|jsonrpc/0::DEBUG::2021-01-24 14:43:15,711::commands::211::root::(execCmd) /usr/bin/taskset --cpu-list 0-3 /usr/bin/umount -f -l /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage (cwd None)

MainProcess|jsonrpc/0::DEBUG::2021-01-24 14:43:15,781::commands::224::root::(execCmd) SUCCESS: <err> = b''; <rc> = 0

MainProcess|jsonrpc/0::DEBUG::2021-01-24 14:43:15,781::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return umount with None

MainProcess|jsonrpc/0::DEBUG::2021-01-24 14:43:15,953::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call hbaRescan with (<vdsm.supervdsm_server._SuperVdsm object at 0x7ff15f423588>,) {}

MainProcess|jsonrpc/0::DEBUG::2021-01-24 14:43:15,954::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/libexec/vdsm/fc-scan (cwd None)

MainProcess|jsonrpc/0::DEBUG::2021-01-24 14:43:16,222::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return hbaRescan with None

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:20,272::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:20,272::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:20,304::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:20,305::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:30,307::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:30,307::logutils::354::root::(_report_stats) ThreadedHandler is ok in the last 60 seconds (max pending: 2)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:30,307::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:30,340::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:30,340::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:40,342::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:40,342::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:40,374::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:40,374::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:50,376::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:50,376::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:50,408::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:43:50,408::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:00,410::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:00,410::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:00,446::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:00,447::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:10,449::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:10,449::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:10,481::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:10,482::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:20,484::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:20,484::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:20,517::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:20,517::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:30,519::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:30,519::logutils::354::root::(_report_stats) ThreadedHandler is ok in the last 60 seconds (max pending: 2)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:30,519::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:30,550::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:30,550::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:40,552::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:40,552::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:40,586::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:40,587::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:50,588::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:50,589::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:50,619::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:44:50,620::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:00,621::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:00,622::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:00,656::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:00,657::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:10,659::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:10,659::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:10,692::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:10,693::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:20,694::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:20,695::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:20,726::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:20,726::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:30,728::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:30,728::logutils::354::root::(_report_stats) ThreadedHandler is ok in the last 60 seconds (max pending: 2)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:30,729::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:30,759::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:30,759::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:40,761::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:40,761::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:40,791::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:40,792::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:50,794::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:50,794::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:50,826::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:45:50,827::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:00,828::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:00,829::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:00,864::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:00,865::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:10,867::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:10,867::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:10,900::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:10,900::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:20,902::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:20,903::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:20,932::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:20,933::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:30,934::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:30,935::logutils::354::root::(_report_stats) ThreadedHandler is ok in the last 60 seconds (max pending: 2)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:30,935::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:30,967::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:30,968::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:40,969::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:40,970::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:41,001::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:41,001::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:51,003::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:51,003::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:51,036::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:46:51,036::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:01,038::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:01,038::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:01,073::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:01,074::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:11,075::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:11,076::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:11,107::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:11,108::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:21,110::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:21,110::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:21,140::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:21,140::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:31,142::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:31,142::logutils::354::root::(_report_stats) ThreadedHandler is ok in the last 60 seconds (max pending: 2)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:31,142::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:31,174::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:31,175::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:41,176::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:41,177::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:41,207::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:41,207::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:51,209::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:51,210::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:51,241::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:47:51,242::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:01,243::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:01,244::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:01,279::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:01,280::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:11,282::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:11,282::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:11,314::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:11,314::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:21,316::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:21,316::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:21,346::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:21,347::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:31,349::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:31,349::logutils::354::root::(_report_stats) ThreadedHandler is ok in the last 60 seconds (max pending: 2)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:31,349::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:31,382::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:31,383::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:41,385::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:41,385::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:41,415::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:41,415::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:51,417::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:51,418::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:51,449::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:48:51,449::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:01,451::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:01,451::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:01,487::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:01,488::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:11,490::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:11,490::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:11,521::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:11,521::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:21,523::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:21,524::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:21,554::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:21,554::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:31,556::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper) call dmsetup_run_status with ('multipath',) {}

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:31,556::logutils::354::root::(_report_stats) ThreadedHandler is ok in the last 60 seconds (max pending: 2)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:31,557::commands::153::common.commands::(start) /usr/bin/taskset --cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:31,589::commands::98::common.commands::(run) SUCCESS: <err> = b''; <rc> = 0

MainProcess|mpathhealth::DEBUG::2021-01-24 14:49:31,590::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper) return dmsetup_run_status with b''


And  last but not least is sanlock.log which shows the smoking gun:

[root@brick log]# tail sanlock.log 

2021-01-24 14:34:40 226677 [1366]: open error -13 EACCES: no permission to open /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage/9aa78da8-c391-46eb-9fcd-426734b6f431/dom_md/ids

2021-01-24 14:34:40 226677 [1366]: check that daemon user sanlock 179 group sanlock 179 has access to disk or file.

2021-01-24 14:43:15 227192 [1365]: open error -13 EACCES: no permission to open /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage/a9ab8829-14bd-45cc-a57b-03b01e96f407/dom_md/ids

2021-01-24 14:43:15 227192 [1365]: check that daemon user sanlock 179 group sanlock 179 has access to disk or file.

2021-01-24 14:55:43 227940 [1366]: open error -13 EACCES: no permission to open /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage/adecf2ee-53d0-4ccd-a02c-ac1f4edf3ffa/dom_md/ids

2021-01-24 14:55:43 227940 [1366]: check that daemon user sanlock 179 group sanlock 179 has access to disk or file.

2021-01-24 15:05:47 228543 [1328]: helper pid 1333 term signal 15

2021-01-24 15:05:47 228544 [120124]: sanlock daemon started 3.8.2 host 3b903780-4f79-1018-816e-aeb2724778a7 (brick.co.slakin.net)

2021-01-24 15:07:52 228669 [120129]: open error -13 EACCES: no permission to open /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage/01bfd183-b46e-432d-a5e4-f0cf51e5df6e/dom_md/ids

2021-01-24 15:07:52 228669 [120129]: check that daemon user sanlock 179 group sanlock 179 has access to disk or file.

[root@brick log]# 


I re-mounted the NFS share; switched to sanlock user; I can see sanlock is in the kvm group but cannot open the file.

[root@brick log]# mkdir /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage

[root@brick log]# mount -t nfs  -o rw,relatime,vers=4.2,rsize=1048576,wsize=1048576,namlen=255,soft,nosharecache,proto=tcp,timeo=100,retrans=3,sec=sys,clientaddr=172.16.1.49,local_lock=none,addr=172.16.1.58 dongle.co.slakin.net:/exports/00/storage /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage

[root@brick log]# su - sanlock -s /bin/bash

Last login: Sun Jan 24 14:58:42 MST 2021 on pts/0

nodectl must be run as root!

nodectl must be run as root!

[sanlock@brick ~]$ cd /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage

[sanlock@brick dongle.co.slakin.net:_exports_00_storage]$ ls -l /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage/01bfd183-b46e-432d-a5e4-f0cf51e5df6e/dom_md/ids

-rw-rw----. 1 vdsm kvm 0 Jan 24 15:07 /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage/01bfd183-b46e-432d-a5e4-f0cf51e5df6e/dom_md/ids

[sanlock@brick dongle.co.slakin.net:_exports_00_storage]$ id

uid=179(sanlock) gid=179(sanlock) groups=179(sanlock),6(disk),36(kvm),107(qemu) context=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023

[sanlock@brick dongle.co.slakin.net:_exports_00_storage]$ cat /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage/01bfd183-b46e-432d-a5e4-f0cf51e5df6e/dom_md/ids

cat: '/rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage/01bfd183-b46e-432d-a5e4-f0cf51e5df6e/dom_md/ids': Permission denied

[sanlock@brick dongle.co.slakin.net:_exports_00_storage]$ 


Switch over to the NFS server (dongle) and change other/world permissions:

root@dongle:/exports/00/storage# chmod o+r 01bfd183-b46e-432d-a5e4-f0cf51e5df6e/dom_md/ids

root@dongle:/exports/00/storage# ls -l !$

ls -l 01bfd183-b46e-432d-a5e4-f0cf51e5df6e/dom_md/ids

-rw-rw-r-- 1 36 36 0 Jan 24 15:07 01bfd183-b46e-432d-a5e4-f0cf51e5df6e/dom_md/ids

root@dongle:/exports/00/storage# 


After changing permissions on the files, sanlock on brick(the oVirt node) can now cat the file

[sanlock@brick dongle.co.slakin.net:_exports_00_storage]$ cat /rhev/data-center/mnt/dongle.co.slakin.net:_exports_00_storage/01bfd183-b46e-432d-a5e4-f0cf51e5df6e/dom_md/ids

[sanlock@brick dongle.co.slakin.net:_exports_00_storage]$ echo $?

0

[sanlock@brick dongle.co.slakin.net:_exports_00_storage]$ 



So in short, even though sanlock is member to gid 36(kvm) and the file ownership and permissions appear to be set correctly, sanlock cannot read files that it should be able to.

..Matt


On Thu, Jan 21, 2021 at 6:27 AM Nir Soffer <nsoffer@redhat.com> wrote:
On Wed, Jan 20, 2021 at 7:36 PM Matt Snow <mattsnow@gmail.com> wrote:
>
>
>
> On Wed, Jan 20, 2021 at 2:46 AM Nir Soffer <nsoffer@redhat.com> wrote:
>>
>> On Mon, Jan 18, 2021 at 8:58 AM Matt Snow <mattsnow@gmail.com> wrote:
>> >
>> > I installed ovirt node 4.4.4 as well as 4.4.5-pre and experience the same problem with both versions. The issue occurs in both cockpit UI and tmux'd CLI of ovirt-hosted-engine-setup. I get passed the point where the VM is created and running.
>> > I tried to do some debugging on my own before reaching out to this list. Any help is much appreciated!
>> >
>> > ovirt node hardware: NUC format Jetway w/ Intel N3160 (Braswell 4 cores/4threads), 8GB RAM, 64GB SSD. I understand this is underspec'd, but I believe it meets the minimum requirements.
>> >
>> > NFS server:
>> > * Ubuntu 19.10 w/ ZFS share w/ 17TB available space.
>>
>> We don't test ZFS, but since  you tried also non-ZFS server with same issue, the
>> issue is probably not ZFS.
>>
>> > * NFS share settings are just 'rw=@172.16.1.0/24' but have also tried 'rw,sec=sys,anon=0' and '@172.16.1.0/24,insecure'
>>
>> You are missing anonuid=36,anongid=36. This should not affect sanlock, but
>> you will have issues with libvirt without this.
>
>
> I found this setting in another email thread and have applied it
>
> root@stumpy:/tanker/ovirt/host_storage# zfs get all tanker/ovirt| grep nfs
>
> tanker/ovirt  sharenfs              rw,anonuid=36,anongid=36  local
>
> root@stumpy:/tanker/ovirt/host_storage#
>>
>>
>> Here is working export from my test system:
>>
>> /export/00 *(rw,sync,anonuid=36,anongid=36,no_subtree_check)
>>
> I have updated both ZFS server and the Ubuntu 20.04 NFS server to use the above settings.
>  # ZFS Server
>
> root@stumpy:/tanker/ovirt/host_storage# zfs set sharenfs="rw,sync,anonuid=36,anongid=36,no_subtree_check" tanker/ovirt
>
> root@stumpy:/tanker/ovirt/host_storage# zfs get all tanker/ovirt| grep nfs
>
> tanker/ovirt  sharenfs              rw,sync,anonuid=36,anongid=36,no_subtree_check  local
>
> root@stumpy:/tanker/ovirt/host_storage#
>
> # Ubuntu laptop
>
> ls -ld /exports/host_storage
>
> drwxr-xr-x 2 36 36 4096 Jan 16 19:42 /exports/host_storage
>
> root@msnowubntlt:/exports/host_storage# showmount -e dongle
>
> Export list for dongle:
>
> /exports/host_storage *
>
> root@msnowubntlt:/exports/host_storage# grep host_storage /etc/exports
>
> /exports/host_storage *(rw,sync,anonuid=36,anongid=36,no_subtree_check)
>
> root@msnowubntlt:/exports/host_storage#
>
>
>
> Interesting point - upon being re-prompted to configure storage after initial failure I provided *different* NFS server information (the ubuntu laptop)
> **snip**
>
> [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Activate storage domain]
>
> [ ERROR ] ovirtsdk4.Error: Fault reason is "Operation Failed". Fault detail is "[]". HTTP response code is 400.
>
> [ ERROR ] fatal: [localhost]: FAILED! => {"changed": false, "msg": "Fault reason is \"Operation Failed\". Fault detail is \"[]\". HTTP response code is 400."}
>
>           Please specify the storage you would like to use (glusterfs, iscsi, fc, nfs)[nfs]: nfs
>
>           Please specify the nfs version you would like to use (auto, v3, v4, v4_0, v4_1, v4_2)[auto]:
>
>           Please specify the full shared storage connection path to use (example: host:/path): dongle:/exports/host_storage
>
>           If needed, specify additional mount options for the connection to the hosted-engine storagedomain (example: rsize=32768,wsize=32768) []:
>
> [ INFO  ] Creating Storage Domain
>
> [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Execute just a specific set of steps]
>
> **snip**
>
> [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Activate storage domain]
>
> [ ERROR ] ovirtsdk4.Error: Fault reason is "Operation Failed". Fault detail is "[]". HTTP response code is 400.
>
> [ ERROR ] fatal: [localhost]: FAILED! => {"changed": false, "msg": "Fault reason is \"Operation Failed\". Fault detail is \"[]\". HTTP response code is 400."}
>
>           Please specify the storage you would like to use (glusterfs, iscsi, fc, nfs)[nfs]:
>
> **snip**
> Upon checking mounts I see that the original server is still being used.
>
> [root@brick ~]# mount | grep nfs
>
> sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw,relatime)
>
> stumpy:/tanker/ovirt/host_storage on /rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage type nfs4 (rw,relatime,vers=4.2,rsize=1048576,wsize=1048576,namlen=255,soft,nosharecache,proto=tcp,timeo=100,retrans=3,sec=sys,clientaddr=172.16.1.49,local_lock=none,addr=172.16.1.50)

Using node and hosted engine makes everything more complicated
and harder to debug.

I would create an engine vm for testing on some machine, and add
this host as regular host. It will be easier to debug this way, since
you don't have to reinstall everything from scratch on every failure.

Once we sort out the issue with the NFS/ZFS storage, using node and
hosted engine makes sense for better availability and saving hardware
resources.

Also, can you explain how to build and configure identical ubuntu storage
server? I can try to test a vm if you give detailed description.

Maybe this an issue with AppArmor?
https://linuxconfig.org/how-to-disable-enable-selinux-on-ubuntu-20-04-focal-fossa-linux

Did you try to disable it on the storage server, or enable selinux?

>
> [root@brick ~]#
>
>
>>
>> > * The target directory is always empty and chown'd 36:36 with 0755 permissions.
>>
>> Correct.
>>
>> > * I have tried using both IP and DNS names. forward and reverse DNS works from ovirt host and other systems on the network.
>> > * The NFS share always gets mounted successfully on the ovirt node system.
>> > * I have tried auto and v3 NFS versions in other various combinations.
>> > * I have also tried setting up an NFS server on a non-ZFS backed storage system that is open to any host and get the same errors as shown below.
>> > * I ran nfs-check.py script without issue against both NFS servers and followed other verification steps listed on https://www.ovirt.org/develop/troubleshooting-nfs-storage-issues.html
>> >
>> > ***Snip from ovirt-hosted-engine-setup***
>> >           Please specify the storage you would like to use (glusterfs, iscsi, fc, nfs)[nfs]: nfs
>> >           Please specify the nfs version you would like to use (auto, v3, v4, v4_0, v4_1, v4_2)[auto]:
>> >           Please specify the full shared storage connection path to use (example: host:/path): stumpy.mydomain.com:/tanker/ovirt/host_storage
>> >           If needed, specify additional mount options for the connection to the hosted-engine storagedomain (example: rsize=32768,wsize=32768) []: rw
>> > [ INFO  ] Creating Storage Domain
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Execute just a specific set of steps]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Force facts gathering]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Wait for the storage interface to be up]
>> > [ INFO  ] skipping: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Check local VM dir stat]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Enforce local VM dir existence]
>> > [ INFO  ] skipping: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : include_tasks]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Obtain SSO token using username/password credentials]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Fetch host facts]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Fetch cluster ID]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Fetch cluster facts]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Fetch Datacenter facts]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Fetch Datacenter ID]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Fetch Datacenter name]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Add NFS storage domain]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Add glusterfs storage domain]
>> > [ INFO  ] skipping: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Add iSCSI storage domain]
>> > [ INFO  ] skipping: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Add Fibre Channel storage domain]
>> > [ INFO  ] skipping: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Get storage domain details]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Find the appliance OVF]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Parse OVF]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Get required size]
>> > [ INFO  ] ok: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Remove unsuitable storage domain]
>> > [ INFO  ] skipping: [localhost]
>> > [ INFO  ] skipping: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Check storage domain free space]
>> > [ INFO  ] skipping: [localhost]
>> > [ INFO  ] TASK [ovirt.ovirt.hosted_engine_setup : Activate storage domain]
>> > [ ERROR ] ovirtsdk4.Error: Fault reason is "Operation Failed". Fault detail is "[]". HTTP response code is 400.
>> > [ ERROR ] fatal: [localhost]: FAILED! => {"changed": false, "msg": "Fault reason is \"Operation Failed\". Fault detail is \"[]\". HTTP response code is 400."}
>> >           Please specify the storage you would like to use (glusterfs, iscsi, fc, nfs)[nfs]:
>> >
>> > ***End snippet**
>> >
>> > The relevant section from /var/log/ovirt-hosted-engine-setup/ovirt-hosted-engine-setup-ansible-create_storage_domain-20210116195220-bnfg1w.log:
>> > **Snip***
>> > 2021-01-16 19:53:56,010-0700 DEBUG ansible on_any args <ansible.executor.task_result.TaskResult object at 0x7f11a04c8320>  kwargs
>> > 2021-01-16 19:53:57,219-0700 INFO ansible task start {'status': 'OK', 'ansible_type': 'task', 'ansible_playbook': '/usr/share/ovirt-hosted-engine-setup/ansible/trigger_role.yml', 'ansible_task': 'ovirt.ovirt.hosted_engine_setup : Activate storage domain'}
>> > 2021-01-16 19:53:57,220-0700 DEBUG ansible on_any args TASK: ovirt.ovirt.hosted_engine_setup : Activate storage domain  kwargs is_conditional:False
>> > 2021-01-16 19:53:57,221-0700 DEBUG ansible on_any args localhost TASK: ovirt.ovirt.hosted_engine_setup : Activate storage domain  kwargs
>> > 2021-01-16 19:54:00,346-0700 DEBUG var changed: host "localhost" var "ansible_play_hosts" type "<class 'list'>" value: "[]"
>> > 2021-01-16 19:54:00,347-0700 DEBUG var changed: host "localhost" var "ansible_play_batch" type "<class 'list'>" value: "[]"
>> > 2021-01-16 19:54:00,348-0700 DEBUG var changed: host "localhost" var "play_hosts" type "<class 'list'>" value: "[]"
>> > 2021-01-16 19:54:00,349-0700 ERROR ansible failed {
>> >     "ansible_host": "localhost",
>> >     "ansible_playbook": "/usr/share/ovirt-hosted-engine-setup/ansible/trigger_role.yml",
>> >     "ansible_result": {
>> >         "_ansible_no_log": false,
>> >         "changed": false,
>> >         "exception": "Traceback (most recent call last):\n  File \"/tmp/ansible_ovirt_storage_domain_payload_f38n25ab/ansible_ovirt_storage_domain_payload.zip/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain.py\", line 783, in main\n  File \"/tmp/ansible_ovirt_storage_domain_payload_f38n25ab/ansible_ovirt_storage_domain_payload.zip/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain.py\", line 638, in post_create_check\n  File \"/usr/lib64/python3.6/site-packages/ovirtsdk4/services.py\", line 3647, in add\n    return self._internal_add(storage_domain, headers, query, wait)\n  File \"/usr/lib64/python3.6/site-packages/ovirtsdk4/service.py\", line 232, in _internal_add\n    return future.wait() if wait else future\n  File \"/usr/lib64/python3.6/site-packages/ovirtsdk4/service.py\", line 55, in wait\n    return self._code(response)\n  File \"/usr/lib64/python3.6/site-packages/ovirtsdk4/service.py\", line 229, in callback\n    self._check_fault(respon
>> >  se)\n  File \"/usr/lib64/python3.6/site-packages/ovirtsdk4/service.py\", line 132, in _check_fault\n    self._raise_error(response, body)\n  File \"/usr/lib64/python3.6/site-packages/ovirtsdk4/service.py\", line 118, in _raise_error\n    raise error\novirtsdk4.Error: Fault reason is \"Operation Failed\". Fault detail is \"[]\". HTTP response code is 400.\n",
>> >         "invocation": {
>> >             "module_args": {
>> >                 "backup": null,
>> >                 "comment": null,
>> >                 "critical_space_action_blocker": null,
>> >                 "data_center": "Default",
>> >                 "description": null,
>> >                 "destroy": null,
>> >                 "discard_after_delete": null,
>> >                 "domain_function": "data",
>> >                 "fcp": null,
>> >                 "fetch_nested": false,
>> >                 "format": null,
>> >                 "glusterfs": null,
>> >                 "host": "brick.mydomain.com",
>> >                 "id": null,
>> >                 "iscsi": null,
>> >                 "localfs": null,
>> >                 "managed_block_storage": null,
>> >                 "name": "hosted_storage",
>> >                 "nested_attributes": [],
>> >                 "nfs": null,
>> >                 "poll_interval": 3,
>> >                 "posixfs": null,
>> >                 "state": "present",
>> >                 "timeout": 180,
>> >                 "wait": true,
>> >                 "warning_low_space": null,
>> >                 "wipe_after_delete": null
>> >             }
>> >         },
>> >         "msg": "Fault reason is \"Operation Failed\". Fault detail is \"[]\". HTTP response code is 400."
>> >     },
>> >     "ansible_task": "Activate storage domain",
>> >     "ansible_type": "task",
>> >     "status": "FAILED",
>> >     "task_duration": 4
>> > }
>> > 2021-01-16 19:54:00,350-0700 DEBUG ansible on_any args <ansible.executor.task_result.TaskResult object at 0x7f11a05f2d30>  kwargs ignore_errors:None
>> > 2021-01-16 19:54:00,358-0700 INFO ansible stats {
>> >     "ansible_playbook": "/usr/share/ovirt-hosted-engine-setup/ansible/trigger_role.yml",
>> >     "ansible_playbook_duration": "01:35 Minutes",
>> >     "ansible_result": "type: <class 'dict'>\nstr: {'localhost': {'ok': 23, 'failures': 1, 'unreachable': 0, 'changed': 0, 'skipped': 7, 'rescued': 0, 'ignored': 0}}",
>> >     "ansible_type": "finish",
>> >     "status": "FAILED"
>> > }
>> > 2021-01-16 19:54:00,359-0700 INFO SUMMARY:
>> > Duration        Task Name
>> > --------        --------
>> > [ < 1 sec ]     Execute just a specific set of steps
>> > [  00:05  ]     Force facts gathering
>> > [  00:03  ]     Check local VM dir stat
>> > [  00:04  ]     Obtain SSO token using username/password credentials
>> > [  00:04  ]     Fetch host facts
>> > [  00:02  ]     Fetch cluster ID
>> > [  00:04  ]     Fetch cluster facts
>> > [  00:04  ]     Fetch Datacenter facts
>> > [  00:02  ]     Fetch Datacenter ID
>> > [  00:02  ]     Fetch Datacenter name
>> > [  00:04  ]     Add NFS storage domain
>> > [  00:04  ]     Get storage domain details
>> > [  00:03  ]     Find the appliance OVF
>> > [  00:03  ]     Parse OVF
>> > [  00:02  ]     Get required size
>> > [ FAILED  ]     Activate storage domain
>> > 2021-01-16 19:54:00,359-0700 DEBUG ansible on_any args <ansible.executor.stats.AggregateStats object at 0x7f11a2e3b8d0>  kwargs
>> > **End snip**
>> >
>> > The relevantsection from /var/log/vdsm/vdsm.log:
>> > ***begin snip***
>> > 2021-01-16 19:53:58,439-0700 INFO  (vmrecovery) [vdsm.api] START getConnectedStoragePoolsList(options=None) from=internal, task_id=b8b21668-189e-4b68-a7f0-c2d2ebf14546 (api:48)
>> > 2021-01-16 19:53:58,439-0700 INFO  (vmrecovery) [vdsm.api] FINISH getConnectedStoragePoolsList return={'poollist': []} from=internal, task_id=b8b21668-189e-4b68-a7f0-c2d2ebf14546 (api:54)
>> > 2021-01-16 19:53:58,440-0700 INFO  (vmrecovery) [vds] recovery: waiting for storage pool to go up (clientIF:726)
>> > 2021-01-16 19:53:58,885-0700 INFO  (jsonrpc/3) [vdsm.api] START connectStorageServer(domType=1, spUUID='00000000-0000-0000-0000-000000000000', conList=[{'password': '********', 'protocol_version': 'auto', 'port': '', 'iqn': '', 'connection': 'stumpy:/tanker/ovirt/host_storage', 'ipv6_enabled': 'false', 'id': '3ffd1e3b-168e-4248-a2af-b28fbdf49eef', 'user': '', 'tpgt': '1'}], options=None) from=::ffff:192.168.222.53,41192, flow_id=592e278f, task_id=5bd52fa3-f790-4ed3-826d-c1f51e5f2291 (api:48)
>> > 2021-01-16 19:53:58,892-0700 INFO  (jsonrpc/3) [storage.StorageDomainCache] Invalidating storage domain cache (sdc:74)
>> > 2021-01-16 19:53:58,892-0700 INFO  (jsonrpc/3) [vdsm.api] FINISH connectStorageServer return={'statuslist': [{'id': '3ffd1e3b-168e-4248-a2af-b28fbdf49eef', 'status': 0}]} from=::ffff:192.168.222.53,41192, flow_id=592e278f, task_id=5bd52fa3-f790-4ed3-826d-c1f51e5f2291 (api:54)
>> > 2021-01-16 19:53:58,914-0700 INFO  (jsonrpc/6) [vdsm.api] START getStorageDomainInfo(sdUUID='54532dd4-3e5b-4885-b88e-599c81efb146', options=None) from=::ffff:192.168.222.53,41192, flow_id=592e278f, task_id=4d5a352d-6096-45b7-a4ee-b6e08ac02f7b (api:48)
>> > 2021-01-16 19:53:58,914-0700 INFO  (jsonrpc/6) [storage.StorageDomain] sdUUID=54532dd4-3e5b-4885-b88e-599c81efb146 (fileSD:535)
>> > 2021-01-16 19:53:58,918-0700 INFO  (jsonrpc/6) [vdsm.api] FINISH getStorageDomainInfo error=Domain is either partially accessible or entirely inaccessible: ('54532dd4-3e5b-4885-b88e-599c81efb146',) from=::ffff:192.168.222.53,41192, flow_id=592e278f, task_id=4d5a352d-6096-45b7-a4ee-b6e08ac02f7b (api:52)
>> > 2021-01-16 19:53:58,918-0700 ERROR (jsonrpc/6) [storage.TaskManager.Task] (Task='4d5a352d-6096-45b7-a4ee-b6e08ac02f7b') Unexpected error (task:880)
>> > Traceback (most recent call last):
>> >   File "/usr/lib/python3.6/site-packages/vdsm/storage/task.py", line 887, in _run
>> >     return fn(*args, **kargs)
>> >   File "<decorator-gen-131>", line 2, in getStorageDomainInfo
>> >   File "/usr/lib/python3.6/site-packages/vdsm/common/api.py", line 50, in method
>> >     ret = func(*args, **kwargs)
>> >   File "/usr/lib/python3.6/site-packages/vdsm/storage/hsm.py", line 2796, in getStorageDomainInfo
>> >     dom = self.validateSdUUID(sdUUID)
>> >   File "/usr/lib/python3.6/site-packages/vdsm/storage/hsm.py", line 312, in validateSdUUID
>> >     sdDom.validate()
>> >   File "/usr/lib/python3.6/site-packages/vdsm/storage/fileSD.py", line 538, in validate
>> >     raise se.StorageDomainAccessError(self.sdUUID)
>> > vdsm.storage.exception.StorageDomainAccessError: Domain is either partially accessible or entirely inaccessible: ('54532dd4-3e5b-4885-b88e-599c81efb146',)
>> > 2021-01-16 19:53:58,918-0700 INFO  (jsonrpc/6) [storage.TaskManager.Task] (Task='4d5a352d-6096-45b7-a4ee-b6e08ac02f7b') aborting: Task is aborted: "value=Domain is either partially accessible or entirely inaccessible: ('54532dd4-3e5b-4885-b88e-599c81efb146',) abortedcode=379" (task:1190)
>> > 2021-01-16 19:53:58,918-0700 ERROR (jsonrpc/6) [storage.Dispatcher] FINISH getStorageDomainInfo error=Domain is either partially accessible or entirely inaccessible: ('54532dd4-3e5b-4885-b88e-599c81efb146',) (dispatcher:83)
>> > 2021-01-16 19:53:58,919-0700 INFO  (jsonrpc/6) [jsonrpc.JsonRpcServer] RPC call StorageDomain.getInfo failed (error 379) in 0.00 seconds (__init__:312)
>> > 2021-01-16 19:53:58,956-0700 INFO  (jsonrpc/1) [vdsm.api] START createStoragePool(poolType=None, spUUID='91fb3f22-5795-11eb-ad9f-00163e3f4683', poolName='Default', masterDom='54532dd4-3e5b-4885-b88e-599c81efb146', domList=['54532dd4-3e5b-4885-b88e-599c81efb146'], masterVersion=7, lockPolicy=None, lockRenewalIntervalSec=5, leaseTimeSec=60, ioOpTimeoutSec=10, leaseRetries=3, options=None) from=::ffff:192.168.222.53,41192, flow_id=592e278f, task_id=a946357f-537a-4c9c-9040-e70a98ee5643 (api:48)
>> > 2021-01-16 19:53:58,958-0700 INFO  (jsonrpc/1) [storage.StoragePool] updating pool 91fb3f22-5795-11eb-ad9f-00163e3f4683 backend from type NoneType instance 0x7f86e45f89d0 to type StoragePoolDiskBackend instance 0x7f8680738408 (sp:157)
>> > 2021-01-16 19:53:58,958-0700 INFO  (jsonrpc/1) [storage.StoragePool] spUUID=91fb3f22-5795-11eb-ad9f-00163e3f4683 poolName=Default master_sd=54532dd4-3e5b-4885-b88e-599c81efb146 domList=['54532dd4-3e5b-4885-b88e-599c81efb146'] masterVersion=7 {'LEASERETRIES': 3, 'LEASETIMESEC': 60, 'LOCKRENEWALINTERVALSEC': 5, 'IOOPTIMEOUTSEC': 10} (sp:602)
>> > 2021-01-16 19:53:58,958-0700 INFO  (jsonrpc/1) [storage.StorageDomain] sdUUID=54532dd4-3e5b-4885-b88e-599c81efb146 (fileSD:535)
>> > 2021-01-16 19:53:58,963-0700 ERROR (jsonrpc/1) [storage.StoragePool] Unexpected error (sp:618)
>> > Traceback (most recent call last):
>> >   File "/usr/lib/python3.6/site-packages/vdsm/storage/sp.py", line 613, in create
>> >     domain.validate()
>> >   File "/usr/lib/python3.6/site-packages/vdsm/storage/fileSD.py", line 538, in validate
>> >     raise se.StorageDomainAccessError(self.sdUUID)
>> > vdsm.storage.exception.StorageDomainAccessError: Domain is either partially accessible or entirely inaccessible: ('54532dd4-3e5b-4885-b88e-599c81efb146',)
>> > 2021-01-16 19:53:58,963-0700 INFO  (jsonrpc/1) [vdsm.api] FINISH createStoragePool error=Domain is either partially accessible or entirely inaccessible: ('54532dd4-3e5b-4885-b88e-599c81efb146',) from=::ffff:192.168.222.53,41192, flow_id=592e278f, task_id=a946357f-537a-4c9c-9040-e70a98ee5643 (api:52)
>> > 2021-01-16 19:53:58,963-0700 ERROR (jsonrpc/1) [storage.TaskManager.Task] (Task='a946357f-537a-4c9c-9040-e70a98ee5643') Unexpected error (task:880)
>> > Traceback (most recent call last):
>> >   File "/usr/lib/python3.6/site-packages/vdsm/storage/sp.py", line 613, in create
>> >     domain.validate()
>> >   File "/usr/lib/python3.6/site-packages/vdsm/storage/fileSD.py", line 538, in validate
>> >     raise se.StorageDomainAccessError(self.sdUUID)
>> > vdsm.storage.exception.StorageDomainAccessError: Domain is either partially accessible or entirely inaccessible: ('54532dd4-3e5b-4885-b88e-599c81efb146',)
>> >
>> > During handling of the above exception, another exception occurred:
>> >
>> > Traceback (most recent call last):
>> >   File "/usr/lib/python3.6/site-packages/vdsm/storage/task.py", line 887, in _run
>> >     return fn(*args, **kargs)
>> >   File "<decorator-gen-31>", line 2, in createStoragePool
>> >   File "/usr/lib/python3.6/site-packages/vdsm/common/api.py", line 50, in method
>> >     ret = func(*args, **kwargs)
>> >   File "/usr/lib/python3.6/site-packages/vdsm/storage/hsm.py", line 1027, in createStoragePool
>> >     leaseParams)
>> >   File "/usr/lib/python3.6/site-packages/vdsm/storage/sp.py", line 619, in create
>> >     raise se.StorageDomainAccessError(sdUUID)
>> > vdsm.storage.exception.StorageDomainAccessError: Domain is either partially accessible or entirely inaccessible: ('54532dd4-3e5b-4885-b88e-599c81efb146',)
>> > 2021-01-16 19:53:58,964-0700 INFO  (jsonrpc/1) [storage.TaskManager.Task] (Task='a946357f-537a-4c9c-9040-e70a98ee5643') aborting: Task is aborted: "value=Domain is either partially accessible or entirely inaccessible: ('54532dd4-3e5b-4885-b88e-599c81efb146',) abortedcode=379" (task:1190)
>> > 2021-01-16 19:53:58,964-0700 ERROR (jsonrpc/1) [storage.Dispatcher] FINISH createStoragePool error=Domain is either partially accessible or entirely inaccessible: ('54532dd4-3e5b-4885-b88e-599c81efb146',) (dispatcher:83)
>> > 2021-01-16 19:53:58,965-0700 INFO  (jsonrpc/1) [jsonrpc.JsonRpcServer] RPC call StoragePool.create failed (error 379) in 0.01 seconds (__init__:312)
>> > 2021-01-16 19:54:01,923-0700 INFO  (jsonrpc/5) [api.host] START getStats() from=::ffff:192.168.222.53,41192 (api:48)
>> > 2021-01-16 19:54:01,964-0700 INFO  (jsonrpc/7) [api.host] START getAllVmStats() from=::ffff:192.168.222.53,41192 (api:48)
>> > 2021-01-16 19:54:01,966-0700 INFO  (jsonrpc/7) [api.host] FINISH getAllVmStats return={'status': {'code': 0, 'message': 'Done'}, 'statsList': (suppressed)} from=::ffff:192.168.222.53,41192 (api:54)
>> > 2021-01-16 19:54:01,977-0700 INFO  (jsonrpc/5) [vdsm.api] START repoStats(domains=()) from=::ffff:192.168.222.53,41192, task_id=7a6c8536-3f1e-41b3-9ab7-bb58a5e75fac (api:48)
>> > 2021-01-16 19:54:01,978-0700 INFO  (jsonrpc/5) [vdsm.api] FINISH repoStats return={} from=::ffff:192.168.222.53,41192, task_id=7a6c8536-3f1e-41b3-9ab7-bb58a5e75fac (api:54)
>> > 2021-01-16 19:54:01,980-0700 INFO  (jsonrpc/5) [vdsm.api] START multipath_health() from=::ffff:192.168.222.53,41192, task_id=b8af45b5-6488-4940-b7d0-1d11b5f76db4 (api:48)
>> > 2021-01-16 19:54:01,981-0700 INFO  (jsonrpc/5) [vdsm.api] FINISH multipath_health return={} from=::ffff:192.168.222.53,41192, task_id=b8af45b5-6488-4940-b7d0-1d11b5f76db4 (api:54)
>> > 2021-01-16 19:54:01,996-0700 INFO  (jsonrpc/5) [api.host] FINISH getStats return={'status': {'code': 0, 'message': 'Done'}, 'info': (suppressed)} from=::ffff:192.168.222.53,41192 (api:54)
>> > ***end snip***
>> > _______________________________________________
>> > Users mailing list -- users@ovirt.org
>> > To unsubscribe send an email to users-leave@ovirt.org
>> > Privacy Statement: https://www.ovirt.org/privacy-policy.html
>> > oVirt Code of Conduct: https://www.ovirt.org/community/about/community-guidelines/
>> > List Archives: https://lists.ovirt.org/archives/list/users@ovirt.org/message/G2TQC6XTJTIBAIOG7BWAFJ3YW3XOMNXF/
>>