[pv]
action=create
devices=sdb
ignore_pv_errors=no
[vg1]
action=create
vgname=gluster_vg_sdb
pvname=sdb
ignore_vg_errors=no
[lv1]
action=create
poolname=gluster_thinpool_sdb
ignore_lv_errors=no
vgname=gluster_vg_sdb
lvtype=thinpool
size=804GB
poolmetadatasize=4GB
[lv2]
action=create
lvname=gluster_lv_engine
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/engine
size=100GB
lvtype=thick
[lv3]
action=create
lvname=gluster_lv_data
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/data
lvtype=thinlv
poolname=gluster_thinpool_sdb
virtualsize=400GB
[volume2]
action=create
volname=data
transport=tcp
key=storage.owner-uid,storage.owner-gid,features.shard,performance.low-prio-threads,performance.strict-o-direct,network.remote-dio,network.ping-timeout,user.cifs,nfs.disable,performance.quick-read,performance.read-ahead,performance.io-cache,cluster.eager-lock
value=36,36,on,32,on,off,30,off,on,off,off,off,enable
#key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,30,on,off,enable
#brick_dirs=10.10.8.111:/gluster_bricks/data/data
brick_dirs=/gluster_bricks/data/data
ignore_volume_errors=no
[volume3]
action=create
volname=vmstore
transport=tcp
key=storage.owner-uid,storage.owner-gid,features.shard,performance.low-prio-threads,performance.strict-o-direct,network.remote-dio,network.ping-timeout,user.cifs,nfs.disable,performance.quick-read,performance.read-ahead,performance.io-cache,cluster.eager-lock
value=36,36,on,32,on,off,30,off,on,off,off,off,enable
#key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,30,on,off,enable
#brick_dirs=10.10.8.111:/gluster_bricks/vmstore/vmstore
brick_dirs=/gluster_bricks/vmstore/vmstore
ignore_volume_errors=no
I just don't understand how this config should be adjusted so the ansible script will finnish succesfully... :(
A blacklist is a list of the disks that the system should NOT mark as multipath disks.
You need to create a file, you can name it local.conf, create it in this location: /etc/multipath/conf.d/
Use the most current iso.
I think there might be a bug.
_______________________________________________ List Archives: https://lists.ovirt.org/
Users mailing list -- users@ovirt.org
To unsubscribe send an email to users-leave@ovirt.org
Privacy Statement: https://www.ovirt.org/site/privacy-policy/
oVirt Code of Conduct: https://www.ovirt.org/community/about/community- guidelines/
archives/list/users@ovirt.org/ message/ SWNMBQSIM74657FALFANBLAMR2VKXL HI/