
adding sahina Il ven 22 feb 2019, 14:01 matteo fedeli <matmilan97@gmail.com> ha scritto:
Hi considering that the deploy with 4.2.7.8 failed I try to reinstall ovirt to version 4.2.8 and there are appened two strange things. During the volume step if i choose jbod mode in the deploy conf remain raid6 type... Why? To solve I have only tried to editing manually the file at line about volume type and the deploy stuck on creating physical volume...
this is my conf file: (I used 3 HDDs 500GB each, node,engine + vmstore and data)
#gdeploy configuration generated by cockpit-gluster plugin [hosts] kansas.planet.bn germany.planet.bn singapore.planet.bn
[script1:kansas.planet.bn] action=execute ignore_script_errors=no file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb -h kansas.planet.bn, germany.planet.bn, singapore.planet.bn
[script1:germany.planet.bn] action=execute ignore_script_errors=no file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb -h kansas.planet.bn, germany.planet.bn, singapore.planet.bn
[script1:singapore.planet.bn] action=execute ignore_script_errors=no file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb -h kansas.planet.bn, germany.planet.bn, singapore.planet.bn
[disktype] jbod
[diskcount] 12
[stripesize] 256
[service1] action=enable service=chronyd
[service2] action=restart service=chronyd
[shell2] action=execute command=vdsm-tool configure --force
[script3] action=execute file=/usr/share/gdeploy/scripts/blacklist_all_disks.sh ignore_script_errors=no
[pv1:kansas.planet.bn] action=create devices=sdb ignore_pv_errors=no
[pv1:germany.planet.bn] action=create devices=sdb ignore_pv_errors=no
[pv1:singapore.planet.bn] action=create devices=sdb ignore_pv_errors=no
[vg1:kansas.planet.bn] action=create vgname=gluster_vg_sdb pvname=sdb ignore_vg_errors=no
[vg1:germany.planet.bn] action=create vgname=gluster_vg_sdb pvname=sdb ignore_vg_errors=no
[vg1:singapore.planet.bn] action=create vgname=gluster_vg_sdb pvname=sdb ignore_vg_errors=no
[lv1:kansas.planet.bn] action=create poolname=gluster_thinpool_sdb ignore_lv_errors=no vgname=gluster_vg_sdb lvtype=thinpool size=1005GB poolmetadatasize=5GB
[lv2:germany.planet.bn] action=create poolname=gluster_thinpool_sdb ignore_lv_errors=no vgname=gluster_vg_sdb lvtype=thinpool size=1005GB poolmetadatasize=5GB
[lv3:singapore.planet.bn] action=create poolname=gluster_thinpool_sdb ignore_lv_errors=no vgname=gluster_vg_sdb lvtype=thinpool size=1005GB poolmetadatasize=5GB
[lv4:kansas.planet.bn] action=create lvname=gluster_lv_engine ignore_lv_errors=no vgname=gluster_vg_sdb mount=/gluster_bricks/engine size=100GB lvtype=thick
[lv5:kansas.planet.bn] action=create lvname=gluster_lv_data ignore_lv_errors=no vgname=gluster_vg_sdb mount=/gluster_bricks/data lvtype=thinlv poolname=gluster_thinpool_sdb virtualsize=500GB
[lv6:kansas.planet.bn] action=create lvname=gluster_lv_vmstore ignore_lv_errors=no vgname=gluster_vg_sdb mount=/gluster_bricks/vmstore lvtype=thinlv poolname=gluster_thinpool_sdb virtualsize=500GB
[lv7:germany.planet.bn] action=create lvname=gluster_lv_engine ignore_lv_errors=no vgname=gluster_vg_sdb mount=/gluster_bricks/engine size=100GB lvtype=thick
[lv8:germany.planet.bn] action=create lvname=gluster_lv_data ignore_lv_errors=no vgname=gluster_vg_sdb mount=/gluster_bricks/data lvtype=thinlv poolname=gluster_thinpool_sdb virtualsize=500GB
[lv9:germany.planet.bn] action=create lvname=gluster_lv_vmstore ignore_lv_errors=no vgname=gluster_vg_sdb mount=/gluster_bricks/vmstore lvtype=thinlv poolname=gluster_thinpool_sdb virtualsize=500GB
[lv10:singapore.planet.bn] action=create lvname=gluster_lv_engine ignore_lv_errors=no vgname=gluster_vg_sdb mount=/gluster_bricks/engine size=100GB lvtype=thick
[lv11:singapore.planet.bn] action=create lvname=gluster_lv_data ignore_lv_errors=no vgname=gluster_vg_sdb mount=/gluster_bricks/data lvtype=thinlv poolname=gluster_thinpool_sdb virtualsize=500GB
[lv12:singapore.planet.bn] action=create lvname=gluster_lv_vmstore ignore_lv_errors=no vgname=gluster_vg_sdb mount=/gluster_bricks/vmstore lvtype=thinlv poolname=gluster_thinpool_sdb virtualsize=500GB
[selinux] yes
[service3] action=restart service=glusterd slice_setup=yes
[firewalld] action=add
ports=111/tcp,2049/tcp,54321/tcp,5900/tcp,5900-6923/tcp,5666/tcp,16514/tcp,54322/tcp services=glusterfs
[script2] action=execute file=/usr/share/gdeploy/scripts/disable-gluster-hooks.sh
[shell3] action=execute command=usermod -a -G gluster qemu
[volume1] action=create volname=engine transport=tcp replica=yes replica_count=3
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal value=virt,36,36,30,on,off,enable brick_dirs=kansas.planet.bn: /gluster_bricks/engine/engine,germany.planet.bn: /gluster_bricks/engine/engine,singapore.planet.bn: /gluster_bricks/engine/engine ignore_volume_errors=no
[volume2] action=create volname=data transport=tcp replica=yes replica_count=3
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal value=virt,36,36,30,on,off,enable brick_dirs=kansas.planet.bn:/gluster_bricks/data/data,germany.planet.bn: /gluster_bricks/data/data,singapore.planet.bn:/gluster_bricks/data/data ignore_volume_errors=no
[volume3] action=create volname=vmstore transport=tcp replica=yes replica_count=3
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal value=virt,36,36,30,on,off,enable brick_dirs=kansas.planet.bn: /gluster_bricks/vmstore/vmstore,germany.planet.bn: /gluster_bricks/vmstore/vmstore,singapore.planet.bn: /gluster_bricks/vmstore/vmstore ignore_volume_errors=no _______________________________________________ Users mailing list -- users@ovirt.org To unsubscribe send an email to users-leave@ovirt.org Privacy Statement: https://www.ovirt.org/site/privacy-policy/ oVirt Code of Conduct: https://www.ovirt.org/community/about/community-guidelines/ List Archives: https://lists.ovirt.org/archives/list/users@ovirt.org/message/GS6YGVEQNIQ63R...