[root@brick log]# cat sanlock.log
2021-01-15 18:18:48 3974 [36280]: sanlock daemon started 3.8.2 host
3b903780-4f79-1018-816e-aeb2724778a7 (
)
2021-01-15 19:17:31 7497 [36293]: s1 lockspace
54532dd4-3e5b-4885-b88e-599c81efb146:250:/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage/54532dd4-3e5b-4885-b88e-599c81efb146/dom_md/ids:0
2021-01-15 19:17:31 7497 [50873]: open error -13 EACCES: no permission to
open
/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage/54532dd4-3e5b-4885-b88e-599c81efb146/dom_md/ids
2021-01-15 19:17:31 7497 [50873]: check that daemon user sanlock 179 group
sanlock 179 has access to disk or file.
2021-01-15 19:17:31 7497 [50873]: s1 open_disk
/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage/54532dd4-3e5b-4885-b88e-599c81efb146/dom_md/ids
error -13
2021-01-15 19:17:32 7498 [36293]: s1 add_lockspace fail result -19
2021-01-18 07:23:58 18 [1318]: sanlock daemon started 3.8.2 host
3b903780-4f79-1018-816e-aeb2724778a7 (
)
2021-01-18 08:43:25 4786 [1359]: open error -13 EACCES: no permission to
open
/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage/4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md/ids
2021-01-18 08:43:25 4786 [1359]: check that daemon user sanlock 179 group
sanlock 179 has access to disk or file.
2021-01-18 09:13:17 6578 [1358]: open error -13 EACCES: no permission to
open
/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage/d3aec1fd-57cb-4d48-86b9-0a89ae3741a7/dom_md/ids
2021-01-18 09:13:17 6578 [1358]: check that daemon user sanlock 179 group
sanlock 179 has access to disk or file.
2021-01-18 09:19:45 6966 [1359]: open error -13 EACCES: no permission to
open
/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage/fa5434cf-3e05-45d5-b32e-4948903ee2b4/dom_md/ids
2021-01-18 09:19:45 6966 [1359]: check that daemon user sanlock 179 group
sanlock 179 has access to disk or file.
2021-01-18 09:21:16 7057 [1358]: open error -13 EACCES: no permission to
open
/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage/b0f7b773-7e37-4b6b-a467-64230d5f7391/dom_md/ids
2021-01-18 09:21:16 7057 [1358]: check that daemon user sanlock 179 group
sanlock 179 has access to disk or file.
2021-01-18 09:49:42 8763 [1359]: s1 lockspace
b0f7b773-7e37-4b6b-a467-64230d5f7391:250:/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage/b0f7b773-7e37-4b6b-a467-64230d5f7391/dom_md/ids:0
2021-01-18 09:49:42 8763 [54250]: open error -13 EACCES: no permission to
open
/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage/b0f7b773-7e37-4b6b-a467-64230d5f7391/dom_md/ids
2021-01-18 09:49:42 8763 [54250]: check that daemon user sanlock 179 group
sanlock 179 has access to disk or file.
2021-01-18 09:49:42 8763 [54250]: s1 open_disk
/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage/b0f7b773-7e37-4b6b-a467-64230d5f7391/dom_md/ids
error -13
2021-01-18 09:49:43 8764 [1359]: s1 add_lockspace fail result -19
[root@brick log]# [root@brick log]# su - sanlock -s /bin/bash
Last login: Tue Jan 19 09:05:25 MST 2021 on pts/2
nodectl must be run as root!
nodectl must be run as root!
[sanlock@brick ~]$ grep sanlock /etc/group
disk:x:6:sanlock
kvm:x:36:qemu,ovirtimg,sanlock
sanlock:x:179:vdsm
qemu:x:107:vdsm,ovirtimg,sanlock
[sanlock@brick ~]$ cd
/rhev/data-center/mnt/stumpy\:_tanker_ovirt_host__storage/ && touch
file.txt && ls -l file.txt
-rw-rw-rw-. 1 sanlock sanlock 0 Jan 19 09:07 file.txt
[sanlock@brick stumpy:_tanker_ovirt_host__storage]$ [root@brick log]# su -
sanlock -s /bin/bash
Last login: Tue Jan 19 09:05:25 MST 2021 on pts/2
nodectl must be run as root!
nodectl must be run as root!
[sanlock@brick ~]$ grep sanlock /etc/group
disk:x:6:sanlock
kvm:x:36:qemu,ovirtimg,sanlock
sanlock:x:179:vdsm
qemu:x:107:vdsm,ovirtimg,sanlock
[sanlock@brick ~]$ cd
/rhev/data-center/mnt/stumpy\:_tanker_ovirt_host__storage/ && touch
file.txt && ls -l file.txt
-rw-rw-rw-. 1 sanlock sanlock 0 Jan 19 09:07 file.txt
[sanlock@brick stumpy:_tanker_ovirt_host__storage]$
[sanlock@brick stumpy:_tanker_ovirt_host__storage]$ ls -ltra
total 2
drwxr-xr-x. 3 vdsm kvm 48 Jan 18 09:48 ..
drwxrwxrwx. 2 vdsm kvm 3 Jan 19 09:07 .
-rw-rw-rw-. 1 sanlock sanlock 0 Jan 19 09:07 file.txt
[sanlock@brick stumpy:_tanker_ovirt_host__storage]$
On Tue, Jan 19, 2021 at 6:44 AM Yedidyah Bar David <didi(a)redhat.com> wrote:
On Mon, Jan 18, 2021 at 6:01 PM Matt Snow <mattsnow(a)gmail.com>
wrote:
>
> Hi Didi,
> I did log clean up and am re-running ovirt-hosted-engine-cleanup &&
ovirt-hosted-engine-setup to get you cleaner log files.
>
> searching for host_storage in vdsm.log...
> **snip**
> 2021-01-18 08:43:18,842-0700 INFO (jsonrpc/3) [api.host] FINISH
getStats return={'status': {'code': 0, 'message':
'Done'}, 'info':
(suppressed)} from=::ffff:192.168.222.53,39612 (api:54)
> 2021-01-18 08:43:19,963-0700 INFO (vmrecovery) [vdsm.api] START
getConnectedStoragePoolsList(options=None) from=internal,
task_id=fb80c883-2447-4ed2-b344-aa0c0fb65809 (api:48)
> 2021-01-18 08:43:19,963-0700 INFO (vmrecovery) [vdsm.api] FINISH
getConnectedStoragePoolsList return={'poollist': []} from=internal,
task_id=fb80c883-2447-4ed2-b344-aa0c0fb65809 (api:54)
> 2021-01-18 08:43:19,964-0700 INFO (vmrecovery) [vds] recovery: waiting
for storage pool to go up (clientIF:726)
> 2021-01-18 08:43:20,441-0700 INFO (jsonrpc/4) [vdsm.api] START
connectStorageServer(domType=1,
spUUID='00000000-0000-0000-0000-000000000000', conList=[{'password':
'********', 'protocol_version': 'auto', 'port':
'', 'iqn': '',
'connection': 'stumpy:/tanker/ovirt/host_storage',
'ipv6_enabled': 'false',
'id': '00000000-0000-0000-0000-000000000000', 'user': '',
'tpgt': '1'}],
options=None) from=::ffff:192.168.222.53,39612,
flow_id=2227465c-5040-4199-b1f9-f5305b10b5e5,
task_id=032afa50-381a-44af-a067-d25bcc224355 (api:48)
> 2021-01-18 08:43:20,446-0700 INFO (jsonrpc/4)
[storage.StorageServer.MountConnection] Creating directory
'/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage'
(storageServer:167)
> 2021-01-18 08:43:20,446-0700 INFO (jsonrpc/4) [storage.fileUtils]
Creating directory:
/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage mode: None
(fileUtils:201)
> 2021-01-18 08:43:20,447-0700 INFO (jsonrpc/4) [storage.Mount] mounting
stumpy:/tanker/ovirt/host_storage at
/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage (mount:207)
> 2021-01-18 08:43:21,271-0700 INFO (jsonrpc/4) [IOProcessClient]
(Global) Starting client (__init__:340)
> 2021-01-18 08:43:21,313-0700 INFO (ioprocess/51124) [IOProcess]
(Global) Starting ioprocess (__init__:465)
> 2021-01-18 08:43:21,373-0700 INFO (jsonrpc/4)
[storage.StorageDomainCache] Invalidating storage domain cache (sdc:74)
> 2021-01-18 08:43:21,373-0700 INFO (jsonrpc/4) [vdsm.api] FINISH
connectStorageServer return={'statuslist': [{'id':
'00000000-0000-0000-0000-000000000000', 'status': 0}]}
from=::ffff:192.168.222.53,39612,
flow_id=2227465c-5040-4199-b1f9-f5305b10b5e5,
task_id=032afa50-381a-44af-a067-d25bcc224355 (api:54)
> 2021-01-18 08:43:21,497-0700 INFO (jsonrpc/5) [vdsm.api] START
getStorageDomainsList(spUUID='00000000-0000-0000-0000-000000000000',
domainClass=1, storageType='',
remotePath='stumpy:/tanker/ovirt/host_storage', options=None)
from=::ffff:192.168.222.53,39612,
flow_id=2227465c-5040-4199-b1f9-f5305b10b5e5,
task_id=e37eb000-13da-440f-9197-07495e53ce52 (api:48)
> 2021-01-18 08:43:21,497-0700 INFO (jsonrpc/5)
[storage.StorageDomainCache] Refreshing storage domain cache (resize=True)
(sdc:80)
> 2021-01-18 08:43:21,498-0700 INFO (jsonrpc/5) [storage.ISCSI] Scanning
iSCSI devices (iscsi:442)
> 2021-01-18 08:43:21,628-0700 INFO (jsonrpc/5) [storage.ISCSI] Scanning
iSCSI devices: 0.13 seconds (utils:390)
> 2021-01-18 08:43:21,629-0700 INFO (jsonrpc/5) [storage.HBA] Scanning FC
devices (hba:60)
> 2021-01-18 08:43:21,908-0700 INFO (jsonrpc/5) [storage.HBA] Scanning FC
devices: 0.28 seconds (utils:390)
> 2021-01-18 08:43:21,969-0700 INFO (jsonrpc/5) [storage.Multipath]
Resizing multipath devices (multipath:104)
> 2021-01-18 08:43:21,975-0700 INFO (jsonrpc/5) [storage.Multipath]
Resizing multipath devices: 0.01 seconds (utils:390)
> 2021-01-18 08:43:21,975-0700 INFO (jsonrpc/5)
[storage.StorageDomainCache] Refreshing storage domain cache: 0.48 seconds
(utils:390)
> 2021-01-18 08:43:22,167-0700 INFO (tmap-0/0) [IOProcessClient]
(stumpy:_tanker_ovirt_host__storage) Starting client (__init__:340)
> 2021-01-18 08:43:22,204-0700 INFO (ioprocess/51144) [IOProcess]
(stumpy:_tanker_ovirt_host__storage) Starting ioprocess (__init__:465)
> 2021-01-18 08:43:22,208-0700 INFO (jsonrpc/5) [vdsm.api] FINISH
getStorageDomainsList return={'domlist': []}
from=::ffff:192.168.222.53,39612,
flow_id=2227465c-5040-4199-b1f9-f5305b10b5e5,
task_id=e37eb000-13da-440f-9197-07495e53ce52 (api:54)
> 2021-01-18 08:43:22,999-0700 INFO (jsonrpc/7) [vdsm.api] START
connectStorageServer(domType=1,
spUUID='00000000-0000-0000-0000-000000000000', conList=[{'password':
'********', 'protocol_version': 'auto', 'port':
'', 'iqn': '',
'connection': 'stumpy:/tanker/ovirt/host_storage',
'ipv6_enabled': 'false',
'id': 'bc87e1a4-004e-41b4-b569-9e9413e9c027', 'user': '',
'tpgt': '1'}],
options=None) from=::ffff:192.168.222.53,39612, flow_id=5618fb28,
task_id=51daa36a-e1cf-479d-a93c-1c87f21ce934 (api:48)
> 2021-01-18 08:43:23,007-0700 INFO (jsonrpc/7)
[storage.StorageDomainCache] Invalidating storage domain cache (sdc:74)
> 2021-01-18 08:43:23,007-0700 INFO (jsonrpc/7) [vdsm.api] FINISH
connectStorageServer return={'statuslist': [{'id':
'bc87e1a4-004e-41b4-b569-9e9413e9c027', 'status': 0}]}
from=::ffff:192.168.222.53,39612, flow_id=5618fb28,
task_id=51daa36a-e1cf-479d-a93c-1c87f21ce934 (api:54)
> 2021-01-18 08:43:23,130-0700 INFO (jsonrpc/0) [vdsm.api] START
createStorageDomain(storageType=1,
sdUUID='4b3fb9a9-6975-4b80-a2c1-af4e30865088',
domainName='hosted_storage',
typeSpecificArg='stumpy:/tanker/ovirt/host_storage', domClass=1,
domVersion='5', block_size=512, max_hosts=250, options=None)
from=::ffff:192.168.222.53,39612, flow_id=5618fb28,
task_id=49a1bc04-91d0-4d8f-b847-b6461d980495 (api:48)
> 2021-01-18 08:43:23,130-0700 INFO (jsonrpc/0)
[storage.StorageDomainCache] Refreshing storage domain cache (resize=True)
(sdc:80)
> 2021-01-18 08:43:23,131-0700 INFO (jsonrpc/0) [storage.ISCSI] Scanning
iSCSI devices (iscsi:442)
> 2021-01-18 08:43:23,238-0700 INFO (jsonrpc/0) [storage.ISCSI] Scanning
iSCSI devices: 0.11 seconds (utils:390)
> 2021-01-18 08:43:23,239-0700 INFO (jsonrpc/0) [storage.HBA] Scanning FC
devices (hba:60)
> 2021-01-18 08:43:23,522-0700 INFO (jsonrpc/0) [storage.HBA] Scanning FC
devices: 0.28 seconds (utils:390)
> 2021-01-18 08:43:23,576-0700 INFO (jsonrpc/0) [storage.Multipath]
Resizing multipath devices (multipath:104)
> 2021-01-18 08:43:23,582-0700 INFO (jsonrpc/0) [storage.Multipath]
Resizing multipath devices: 0.01 seconds (utils:390)
> 2021-01-18 08:43:23,583-0700 INFO (jsonrpc/0)
[storage.StorageDomainCache] Refreshing storage domain cache: 0.45 seconds
(utils:390)
> 2021-01-18 08:43:23,583-0700 INFO (jsonrpc/0)
[storage.StorageDomainCache] Looking up domain
4b3fb9a9-6975-4b80-a2c1-af4e30865088 (sdc:171)
> 2021-01-18 08:43:23,760-0700 WARN (jsonrpc/0) [storage.LVM] All 1 tries
have failed: cmd=['/sbin/lvm', 'vgs', '--config', 'devices {
preferred_names=["^/dev/mapper/"] ignore_suspended_devices=1
write_cache_state=0 disable_after_error_count=3 filter=["r|.*|"]
hints="none" obtain_device_list_from_udev=0 } global { locking_type=1
prioritise_write_locks=1 wait_for_locks=1 use_lvmetad=0 } backup {
retain_min=50 retain_days=0 }', '--noheadings', '--units',
'b',
'--nosuffix', '--separator', '|',
'--ignoreskippedcluster', '-o',
'uuid,name,attr,size,free,extent_size,extent_count,free_count,tags,vg_mda_size,vg_mda_free,lv_count,pv_count,pv_name',
'4b3fb9a9-6975-4b80-a2c1-af4e30865088'] rc=5 err=[' Volume group
"4b3fb9a9-6975-4b80-a2c1-af4e30865088" not found', ' Cannot process
volume
group 4b3fb9a9-6975-4b80-a2c1-af4e30865088'] (lvm:538)
> 2021-01-18 08:43:23,771-0700 INFO (jsonrpc/0)
[storage.StorageDomainCache] Looking up domain
4b3fb9a9-6975-4b80-a2c1-af4e30865088: 0.19 seconds (utils:390)
> 2021-01-18 08:43:23,818-0700 INFO (jsonrpc/0) [IOProcessClient]
(4b3fb9a9-6975-4b80-a2c1-af4e30865088) Starting client (__init__:340)
> 2021-01-18 08:43:23,856-0700 INFO (ioprocess/51165) [IOProcess]
(4b3fb9a9-6975-4b80-a2c1-af4e30865088) Starting ioprocess (__init__:465)
> 2021-01-18 08:43:24,166-0700 INFO (jsonrpc/0) [storage.StorageDomain]
Creating domain metadata directory
'/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage/4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md'
(fileSD:441)
> 2021-01-18 08:43:24,970-0700 INFO (vmrecovery) [vdsm.api] START
getConnectedStoragePoolsList(options=None) from=internal,
task_id=5b920619-cd95-40d0-a253-c39d41d71245 (api:48)
> 2021-01-18 08:43:24,971-0700 INFO (vmrecovery) [vdsm.api] FINISH
getConnectedStoragePoolsList return={'poollist': []} from=internal,
task_id=5b920619-cd95-40d0-a253-c39d41d71245 (api:54)
> 2021-01-18 08:43:24,971-0700 INFO (vmrecovery) [vds] recovery: waiting
for storage pool to go up (clientIF:726)
> 2021-01-18 08:43:25,285-0700 INFO (jsonrpc/0) [storage.xlease]
Formatting index for lockspace '4b3fb9a9-6975-4b80-a2c1-af4e30865088'
(version=1) (xlease:701)
> 2021-01-18 08:43:25,441-0700 INFO (jsonrpc/0) [storage.StorageDomain]
Creating domain images directory
'/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage/4b3fb9a9-6975-4b80-a2c1-af4e30865088/images'
(nfsSD:110)
> 2021-01-18 08:43:25,522-0700 INFO (jsonrpc/0) [storage.StorageDomain]
Removing remnants of deleted images [] (fileSD:735)
> 2021-01-18 08:43:25,524-0700 INFO (jsonrpc/0) [storage.SANLock]
Initializing sanlock for domain 4b3fb9a9-6975-4b80-a2c1-af4e30865088
path=/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage/4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md/ids
alignment=1048576 block_size=512 io_timeout=10 (clusterlock:286)
> 2021-01-18 08:43:25,533-0700 ERROR (jsonrpc/0) [storage.SANLock] Cannot
initialize lock for domain 4b3fb9a9-6975-4b80-a2c1-af4e30865088
(clusterlock:305)
> Traceback (most recent call last):
> File "/usr/lib/python3.6/site-packages/vdsm/storage/clusterlock.py",
line 295, in initLock
> sector=self._block_size)
> sanlock.SanlockException: (19, 'Sanlock lockspace write failure', 'No
such device')
> 2021-01-18 08:43:25,534-0700 INFO (jsonrpc/0) [vdsm.api] FINISH
createStorageDomain error=Could not initialize cluster lock: ()
from=::ffff:192.168.222.53,39612, flow_id=5618fb28,
task_id=49a1bc04-91d0-4d8f-b847-b6461d980495 (api:52)
> 2021-01-18 08:43:25,534-0700 ERROR (jsonrpc/0)
[storage.TaskManager.Task] (Task='49a1bc04-91d0-4d8f-b847-b6461d980495')
Unexpected error (task:880)
> Traceback (most recent call last):
> File "/usr/lib/python3.6/site-packages/vdsm/storage/clusterlock.py",
line 295, in initLock
> sector=self._block_size)
> sanlock.SanlockException: (19, 'Sanlock lockspace write failure', 'No
such device')
I think this ^^ is the issue. Can you please check /var/log/sanlock.log?
Adding Nir.
Best regards,
>
> During handling of the above exception, another exception occurred:
>
> Traceback (most recent call last):
> File "/usr/lib/python3.6/site-packages/vdsm/storage/task.py", line
887, in _run
> return fn(*args, **kargs)
> File "<decorator-gen-123>", line 2, in createStorageDomain
> File "/usr/lib/python3.6/site-packages/vdsm/common/api.py", line 50,
in method
> ret = func(*args, **kwargs)
> File "/usr/lib/python3.6/site-packages/vdsm/storage/hsm.py", line
2669, in createStorageDomain
> max_hosts=max_hosts)
> File "/usr/lib/python3.6/site-packages/vdsm/storage/nfsSD.py", line
120, in create
> fsd.initSPMlease()
> File "/usr/lib/python3.6/site-packages/vdsm/storage/sd.py", line 1019,
in initSPMlease
> return self._manifest.initDomainLock()
> File "/usr/lib/python3.6/site-packages/vdsm/storage/sd.py", line 627,
in initDomainLock
> self._domainLock.initLock(self.getDomainLease())
> File "/usr/lib/python3.6/site-packages/vdsm/storage/clusterlock.py",
line 306, in initLock
> raise se.ClusterLockInitError()
> vdsm.storage.exception.ClusterLockInitError: Could not initialize
cluster lock: ()
> 2021-01-18 08:43:25,534-0700 INFO (jsonrpc/0)
[storage.TaskManager.Task] (Task='49a1bc04-91d0-4d8f-b847-b6461d980495')
aborting: Task is aborted: 'value=Could not initialize cluster lock: ()
abortedcode=701' (task:1190)
> 2021-01-18 08:43:25,535-0700 ERROR (jsonrpc/0) [storage.Dispatcher]
FINISH createStorageDomain error=Could not initialize cluster lock: ()
(dispatcher:83)
> 2021-01-18 08:43:25,536-0700 INFO (jsonrpc/0) [jsonrpc.JsonRpcServer]
RPC call StorageDomain.create failed (error 701) in 2.41 seconds
(__init__:312)
> 2021-01-18 08:43:26,086-0700 INFO (jsonrpc/6) [vdsm.api] START
disconnectStorageServer(domType=1,
spUUID='00000000-0000-0000-0000-000000000000', conList=[{'password':
'********', 'protocol_version': 'auto', 'port':
'', 'iqn': '',
'connection': 'stumpy:/tanker/ovirt/host_storage',
'ipv6_enabled': 'false',
'id': 'bc87e1a4-004e-41b4-b569-9e9413e9c027', 'user': '',
'tpgt': '1'}],
options=None) from=::ffff:192.168.222.53,39612,
flow_id=2227465c-5040-4199-b1f9-f5305b10b5e5,
task_id=5e35a85a-4010-425e-81b5-5db3ad6d63f3 (api:48)
> 2021-01-18 08:43:26,087-0700 INFO (jsonrpc/6) [storage.Mount]
unmounting /rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage
(mount:215)
> 2021-01-18 08:43:26,209-0700 INFO (jsonrpc/6)
[storage.StorageDomainCache] Refreshing storage domain cache (resize=False)
(sdc:80)
> 2021-01-18 08:43:26,210-0700 INFO (jsonrpc/6) [storage.ISCSI] Scanning
iSCSI devices (iscsi:442)
> 2021-01-18 08:43:26,346-0700 INFO (jsonrpc/6) [storage.ISCSI] Scanning
iSCSI devices: 0.14 seconds (utils:390)
> 2021-01-18 08:43:26,347-0700 INFO (jsonrpc/6) [storage.HBA] Scanning FC
devices (hba:60)
> 2021-01-18 08:43:26,635-0700 INFO (jsonrpc/6) [storage.HBA] Scanning FC
devices: 0.29 seconds (utils:390)
> 2021-01-18 08:43:26,693-0700 INFO (jsonrpc/6)
[storage.StorageDomainCache] Refreshing storage domain cache: 0.48 seconds
(utils:390)
> 2021-01-18 08:43:26,693-0700 INFO (jsonrpc/6) [vdsm.api] FINISH
disconnectStorageServer return={'statuslist': [{'id':
'bc87e1a4-004e-41b4-b569-9e9413e9c027', 'status': 0}]}
from=::ffff:192.168.222.53,39612,
flow_id=2227465c-5040-4199-b1f9-f5305b10b5e5,
task_id=5e35a85a-4010-425e-81b5-5db3ad6d63f3 (api:54)
> 2021-01-18 08:43:27,353-0700 INFO (periodic/0) [vdsm.api] START
repoStats(domains=()) from=internal,
task_id=236f3956-28f6-407e-b893-e018f90fcd08 (api:48)
> 2021-01-18 08:43:27,355-0700 INFO (periodic/0) [vdsm.api] FINISH
repoStats return={} from=internal,
task_id=236f3956-28f6-407e-b893-e018f90fcd08 (api:54)
> 2021-01-18 08:43:27,694-0700 INFO (jsonrpc/1) [api.host] START
getAllVmStats() from=::ffff:192.168.222.53,39612 (api:48)
> 2021-01-18 08:43:27,697-0700 INFO (jsonrpc/1) [api.host] FINISH
getAllVmStats return={'status': {'code': 0, 'message':
'Done'},
'statsList': (suppressed)} from=::ffff:192.168.222.53,39612 (api:54)
> 2021-01-18 08:43:27,800-0700 INFO (jsonrpc/2) [api.host] START
getAllVmStats() from=::1,55776 (api:48)
> 2021-01-18 08:43:27,802-0700 INFO (jsonrpc/2) [api.host] FINISH
getAllVmStats return={'status': {'code': 0, 'message':
'Done'},
'statsList': (suppressed)} from=::1,55776 (api:54)
> 2021-01-18 08:43:27,826-0700 INFO (jsonrpc/3) [api.host] START
getAllVmIoTunePolicies() from=::1,55776 (api:48)
> 2021-01-18 08:43:27,826-0700 INFO (jsonrpc/3) [api.host] FINISH
getAllVmIoTunePolicies return={'status': {'code': 0, 'message':
'Done'},
'io_tune_policies_dict': {'ddceefdd-57d3-432f-816e-5dc013249bff':
{'policy': [], 'current_values': []}}} from=::1,55776 (api:54)
> 2021-01-18 08:43:29,975-0700 INFO (vmrecovery) [vdsm.api] START
getConnectedStoragePoolsList(options=None) from=internal,
task_id=821ce12a-ab6e-4db5-ba22-bd2630f2730c (api:48)
> 2021-01-18 08:43:29,976-0700 INFO (vmrecovery) [vdsm.api] FINISH
getConnectedStoragePoolsList return={'poollist': []} from=internal,
task_id=821ce12a-ab6e-4db5-ba22-bd2630f2730c (api:54)
> 2021-01-18 08:43:29,976-0700 INFO (vmrecovery) [vds] recovery: waiting
for storage pool to go up (clientIF:726)
> 2021-01-18 08:43:33,826-0700 INFO (jsonrpc/4) [api.host] START
getStats() from=::ffff:192.168.222.53,39612 (api:48)
>
> **end snip**
>
>
>
>
> from supervdsm.log. This is the only section where 'host_storage' shows
up.
>
> **snip**
> MainProcess|mpathhealth::DEBUG::2021-01-18
08:42:55,447::commands::98::common.commands::(run) SUCCESS: <err> = b'';
<rc> = 0
> MainProcess|mpathhealth::DEBUG::2021-01-18
08:42:55,448::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper)
return dmsetup_run_status with b''
> MainProcess|mpathhealth::DEBUG::2021-01-18
08:43:05,450::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper)
call dmsetup_run_status with ('multipath',) {}
> MainProcess|mpathhealth::DEBUG::2021-01-18
08:43:05,450::commands::153::common.commands::(start) /usr/bin/taskset
--cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)
> MainProcess|mpathhealth::DEBUG::2021-01-18
08:43:05,483::commands::98::common.commands::(run) SUCCESS: <err> = b'';
<rc> = 0
> MainProcess|mpathhealth::DEBUG::2021-01-18
08:43:05,484::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper)
return dmsetup_run_status with b''
> MainProcess|mpathhealth::DEBUG::2021-01-18
08:43:15,485::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper)
call dmsetup_run_status with ('multipath',) {}
> MainProcess|mpathhealth::DEBUG::2021-01-18
08:43:15,485::logutils::354::root::(_report_stats) ThreadedHandler is ok in
the last 60 seconds (max pending: 2)
> MainProcess|mpathhealth::DEBUG::2021-01-18
08:43:15,485::commands::153::common.commands::(start) /usr/bin/taskset
--cpu-list 0-3 /usr/sbin/dmsetup status --target multipath (cwd None)
> MainProcess|mpathhealth::DEBUG::2021-01-18
08:43:15,515::commands::98::common.commands::(run) SUCCESS: <err> = b'';
<rc> = 0
> MainProcess|mpathhealth::DEBUG::2021-01-18
08:43:15,516::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper)
return dmsetup_run_status with b''
> MainProcess|jsonrpc/4::DEBUG::2021-01-18
08:43:20,448::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper)
call mount with (<vdsm.supervdsm_server._SuperVdsm object at
0x7f19293acc88>, 'stumpy:/tanker/ovirt/host_storage',
'/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage') {'mntOpts':
'soft,nosharecache,timeo=100,retrans=3,nolock', 'vfstype': 'nfs',
'cgroup':
None}
> MainProcess|jsonrpc/4::DEBUG::2021-01-18
08:43:20,448::commands::211::root::(execCmd) /usr/bin/taskset --cpu-list
0-3 /usr/bin/mount -t nfs -o soft,nosharecache,timeo=100,retrans=3,nolock
stumpy:/tanker/ovirt/host_storage
/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage (cwd None)
> MainProcess|jsonrpc/4::DEBUG::2021-01-18
08:43:21,198::commands::224::root::(execCmd) SUCCESS: <err> = b'';
<rc> = 0
> MainProcess|jsonrpc/4::DEBUG::2021-01-18
08:43:21,198::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper)
return mount with None
> MainProcess|jsonrpc/4::DEBUG::2021-01-18
08:43:21,320::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper)
call validateAccess with (<vdsm.supervdsm_server._SuperVdsm object at
0x7f19293acc88>, 'vdsm', ('kvm',),
'/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage', 7) {}
> MainProcess|jsonrpc/4::DEBUG::2021-01-18
08:43:21,340::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper)
return validateAccess with None
> MainProcess|jsonrpc/4::DEBUG::2021-01-18
08:43:21,341::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper)
call validateAccess with (<vdsm.supervdsm_server._SuperVdsm object at
0x7f19293acc88>, 'qemu', ('qemu', 'kvm'),
'/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage', 5) {}
> MainProcess|jsonrpc/4::DEBUG::2021-01-18
08:43:21,368::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper)
return validateAccess with None
> MainProcess|jsonrpc/5::DEBUG::2021-01-18
08:43:21,630::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper)
call hbaRescan with (<vdsm.supervdsm_server._SuperVdsm object at
0x7f19293acc88>,) {}
> MainProcess|jsonrpc/5::DEBUG::2021-01-18
08:43:21,630::commands::153::common.commands::(start) /usr/bin/taskset
--cpu-list 0-3 /usr/libexec/vdsm/fc-scan (cwd None)
> MainProcess|jsonrpc/5::DEBUG::2021-01-18
08:43:21,907::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper)
return hbaRescan with None
> MainProcess|jsonrpc/0::DEBUG::2021-01-18
08:43:23,246::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper)
call hbaRescan with (<vdsm.supervdsm_server._SuperVdsm object at
0x7f19293acc88>,) {}
> MainProcess|jsonrpc/0::DEBUG::2021-01-18
08:43:23,246::commands::153::common.commands::(start) /usr/bin/taskset
--cpu-list 0-3 /usr/libexec/vdsm/fc-scan (cwd None)
> MainProcess|jsonrpc/0::DEBUG::2021-01-18
08:43:23,521::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper)
return hbaRescan with None
> MainProcess|jsonrpc/0::DEBUG::2021-01-18
08:43:23,778::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper)
call validateAccess with (<vdsm.supervdsm_server._SuperVdsm object at
0x7f19293acc88>, 'vdsm', ('kvm',),
'/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage', 7) {}
> MainProcess|jsonrpc/0::DEBUG::2021-01-18
08:43:23,796::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper)
return validateAccess with None
> MainProcess|jsonrpc/0::DEBUG::2021-01-18
08:43:23,798::supervdsm_server::93::SuperVdsm.ServerCallback::(wrapper)
call validateAccess with (<vdsm.supervdsm_server._SuperVdsm object at
0x7f19293acc88>, 'qemu', ('qemu', 'kvm'),
'/rhev/data-center/mnt/stumpy:_tanker_ovirt_host__storage', 5) {}
> MainProcess|jsonrpc/0::DEBUG::2021-01-18
08:43:23,816::supervdsm_server::100::SuperVdsm.ServerCallback::(wrapper)
return validateAccess with None
> **end snip**
>
> As for the mount area, it does get mounted and unmounted. Files and
directory structure is also created.
>
> root@stumpy:/tanker/ovirt/host_storage# ls -l
> total 1
> drwxrwsr-x 4 vdsm 36 4 Jan 18 08:43 4b3fb9a9-6975-4b80-a2c1-af4e30865088
> root@stumpy:/tanker/ovirt/host_storage# find .
> .
> ./4b3fb9a9-6975-4b80-a2c1-af4e30865088
> ./4b3fb9a9-6975-4b80-a2c1-af4e30865088/images
> ./4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md
> ./4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md/inbox
> ./4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md/leases
> ./4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md/metadata
> ./4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md/ids
> ./4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md/xleases
> ./4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md/outbox
> root@stumpy:/tanker/ovirt/host_storage# find . -exec ls -ltrh '{}' \;
> total 512
> drwxrwsr-x 4 vdsm 36 4 Jan 18 08:43 4b3fb9a9-6975-4b80-a2c1-af4e30865088
> total 12K
> drwxrwsr-x 2 vdsm 36 8 Jan 18 08:43 dom_md
> drwxrwsr-x 2 vdsm 36 2 Jan 18 08:43 images
> total 0
> total 275K
> -rw-rw---- 1 vdsm 36 0 Jan 18 08:43 ids
> -rw-rw---- 1 vdsm 36 16M Jan 18 08:43 inbox
> -rw-rw---- 1 vdsm 36 16M Jan 18 08:43 outbox
> -rw-rw---- 1 vdsm 36 0 Jan 18 08:43 leases
> -rw-rw---- 1 vdsm 36 1.3M Jan 18 08:43 xleases
> -rw-rw-r-- 1 vdsm 36 342 Jan 18 08:43 metadata
> -rw-rw---- 1 vdsm 36 16M Jan 18 08:43
./4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md/inbox
> -rw-rw---- 1 vdsm 36 0 Jan 18 08:43
./4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md/leases
> -rw-rw-r-- 1 vdsm 36 342 Jan 18 08:43
./4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md/metadata
> -rw-rw---- 1 vdsm 36 0 Jan 18 08:43
./4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md/ids
> -rw-rw---- 1 vdsm 36 1.3M Jan 18 08:43
./4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md/xleases
> -rw-rw---- 1 vdsm 36 16M Jan 18 08:43
./4b3fb9a9-6975-4b80-a2c1-af4e30865088/dom_md/outbox
> root@stumpy:/tanker/ovirt/host_storage#
>
> Thank you,
>
> ..Matt
> _______________________________________________
> Users mailing list -- users(a)ovirt.org
> To unsubscribe send an email to users-leave(a)ovirt.org
> Privacy Statement:
https://www.ovirt.org/privacy-policy.html
> oVirt Code of Conduct:
https://www.ovirt.org/community/about/community-guidelines/
> List Archives:
https://lists.ovirt.org/archives/list/users@ovirt.org/message/BORUNFGGNC6...
--
Didi