Fortunatelly, after node reinstallation nu more mpath devices are present:
multipath -ll
Jun 13 06:32:10 | DM multipath kernel driver not loaded
Jun 13 06:32:10 | DM multipath kernel driver not loaded
But now I am encountering this "invalid number format \"virt\" in option
\"brick-uid":
TASK [Sets options for volume]
*******************************************************************************************************************************************
failed: [10.10.8.111] (item={u'key': u'storage.owner-uid',
u'value':
u'virt'}) => {"changed": false, "item": {"key":
"storage.owner-uid",
"value": "virt"}, "msg": "volume set: failed: invalid
number format
\"virt\" in option \"brick-uid\"\n"}
changed: [10.10.8.111] => (item={u'key': u'storage.owner-gid',
u'value':
u'36'})
failed: [10.10.8.111] (item={u'key': u'features.shard', u'value':
u'36'})
=> {"changed": false, "item": {"key":
"features.shard", "value": "36"},
"msg": "volume set: failed: Error, Validation Failed\n"}
changed: [10.10.8.111] => (item={u'key':
u'performance.low-prio-threads',
u'value': u'30'})
changed: [10.10.8.111] => (item={u'key':
u'performance.strict-o-direct',
u'value': u'on'})
changed: [10.10.8.111] => (item={u'key': u'network.remote-dio',
u'value':
u'off'})
failed: [10.10.8.111] (item={u'key': u'network.ping-timeout',
u'value':
u'enable'}) => {"changed": false, "item": {"key":
"network.ping-timeout",
"value": "enable"}, "msg": "volume set: failed: invalid
time format
\"enable\" in \"option ping-timeout\"\n"}
Below, is my gdeploy.conf:
[hosts]
10.10.8.111
[script1]
action=execute
ignore_script_errors=no
file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb -h
10.10.8.111
[disktype]
jbod
[diskcount]
12
[stripesize]
256
[service1]
action=enable
service=chronyd
[service2]
action=restart
service=chronyd
[shell2]
action=execute
command=vdsm-tool configure --force
[script3]
action=execute
file=/usr/share/gdeploy/scripts/blacklist_all_disks.sh
ignore_script_errors=no
[pv]
action=create
devices=sdb
ignore_pv_errors=no
[vg1]
action=create
vgname=gluster_vg_sdb
pvname=sdb
ignore_vg_errors=no
[lv1]
action=create
poolname=gluster_thinpool_sdb
ignore_lv_errors=no
vgname=gluster_vg_sdb
lvtype=thinpool
size=804GB
poolmetadatasize=4GB
[lv2]
action=create
lvname=gluster_lv_engine
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/engine
size=100GB
lvtype=thick
[lv3]
action=create
lvname=gluster_lv_data
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/data
lvtype=thinlv
poolname=gluster_thinpool_sdb
virtualsize=400GB
[lv4]
action=create
lvname=gluster_lv_vmstore
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/vmstore
lvtype=thinlv
poolname=gluster_thinpool_sdb
virtualsize=400GB
[selinux]
yes
[service3]
action=restart
service=glusterd
slice_setup=yes
[firewalld]
action=add
ports=111/tcp,2049/tcp,54321/tcp,5900/tcp,5900-6923/tcp,5666/tcp,16514/tcp,54322/tcp
services=glusterfs
[script2]
action=execute
file=/usr/share/gdeploy/scripts/disable-gluster-hooks.sh
[shell3]
action=execute
command=usermod -a -G gluster qemu
[volume1]
action=create
volname=engine
transport=tcp
key=storage.owner-uid,storage.owner-gid,features.shard,performance.low-prio-threads,performance.strict-o-direct,network.remote-dio,network.ping-timeout,user.cifs,nfs.disable,performance.quick-read,performance.read-ahead,performance.io-cache,cluster.eager-lock
value=36,36,on,32,on,off,30,off,on,off,off,off,enable
#key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,30,on,off,enable
#brick_dirs=10.10.8.111:/gluster_bricks/engine/engine
brick_dirs=/gluster_bricks/engine/engine
ignore_volume_errors=no
[volume2]
action=create
volname=data
transport=tcp
key=storage.owner-uid,storage.owner-gid,features.shard,performance.low-prio-threads,performance.strict-o-direct,network.remote-dio,network.ping-timeout,user.cifs,nfs.disable,performance.quick-read,performance.read-ahead,performance.io-cache,cluster.eager-lock
value=36,36,on,32,on,off,30,off,on,off,off,off,enable
#key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,30,on,off,enable
#brick_dirs=10.10.8.111:/gluster_bricks/data/data
brick_dirs=/gluster_bricks/data/data
ignore_volume_errors=no
[volume3]
action=create
volname=vmstore
transport=tcp
key=storage.owner-uid,storage.owner-gid,features.shard,performance.low-prio-threads,performance.strict-o-direct,network.remote-dio,network.ping-timeout,user.cifs,nfs.disable,performance.quick-read,performance.read-ahead,performance.io-cache,cluster.eager-lock
value=36,36,on,32,on,off,30,off,on,off,off,off,enable
#key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,30,on,off,enable
#brick_dirs=10.10.8.111:/gluster_bricks/vmstore/vmstore
brick_dirs=/gluster_bricks/vmstore/vmstore
ignore_volume_errors=no
I just don't understand how this config should be adjusted so the ansible
script will finnish succesfully... :(
On Wed, Jun 13, 2018 at 9:06 AM, femi adegoke <ovirt(a)fateknollogee.com>
wrote:
A blacklist is a list of the disks that the system should NOT mark
as
multipath disks.
You need to create a file, you can name it local.conf, create it in this
location: /etc/multipath/conf.d/
Use the most current iso.
I think there might be a bug.
_______________________________________________
Users mailing list -- users(a)ovirt.org
To unsubscribe send an email to users-leave(a)ovirt.org
Privacy Statement:
https://www.ovirt.org/site/privacy-policy/
oVirt Code of Conduct:
https://www.ovirt.org/community/about/community-
guidelines/
List Archives:
https://lists.ovirt.org/archives/list/users@ovirt.org/
message/SWNMBQSIM74657FALFANBLAMR2VKXLHI/
--
Best regards, Leo David