I have 3 servers, Node 1 is 3tb /dev/sda, Node 2, 3tb /dev/sdb, node3 3tb /dev/sdb
I start the process for gluster deployment. I change node 1 to sda and all the other ones
to sdb. I get no errors however,
when I get to
Creating physical Volume all it does is spin forever . doesnt get any further. I can leave
it there for 5 hours and doesn't go anywhere.
#gdeploy configuration generated by cockpit-gluster plugin
[hosts]
cmdnode1.cmd911.com
cmdnode2.cmd911.com
cmdnode3.cmd911.com
[
script1:cmdnode1.cmd911.com]
action=execute
ignore_script_errors=no
file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sda -h
cmdnode1.cmd911.com,
cmdnode2.cmd911.com,
cmdnode3.cmd911.com
[
script1:cmdnode2.cmd911.com]
action=execute
ignore_script_errors=no
file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb -h
cmdnode1.cmd911.com,
cmdnode2.cmd911.com,
cmdnode3.cmd911.com
[
script1:cmdnode3.cmd911.com]
action=execute
ignore_script_errors=no
file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb -h
cmdnode1.cmd911.com,
cmdnode2.cmd911.com,
cmdnode3.cmd911.com
[disktype]
raid6
[diskcount]
12
[stripesize]
256
[service1]
action=enable
service=chronyd
[service2]
action=restart
service=chronyd
[shell2]
action=execute
command=vdsm-tool configure --force
[script3]
action=execute
file=/usr/share/gdeploy/scripts/blacklist_all_disks.sh
ignore_script_errors=no
[
pv1:cmdnode1.cmd911.com]
action=create
devices=sda
ignore_pv_errors=no
[
pv1:cmdnode2.cmd911.com]
action=create
devices=sdb
ignore_pv_errors=no
[
pv1:cmdnode3.cmd911.com]
action=create
devices=sdb
ignore_pv_errors=no
[
vg1:cmdnode1.cmd911.com]
action=create
vgname=gluster_vg_sda
pvname=sda
ignore_vg_errors=no
[
vg1:cmdnode2.cmd911.com]
action=create
vgname=gluster_vg_sdb
pvname=sdb
ignore_vg_errors=no
[
vg1:cmdnode3.cmd911.com]
action=create
vgname=gluster_vg_sdb
pvname=sdb
ignore_vg_errors=no
[
lv1:cmdnode1.cmd911.com]
action=create
poolname=gluster_thinpool_sda
ignore_lv_errors=no
vgname=gluster_vg_sda
lvtype=thinpool
size=1005GB
poolmetadatasize=5GB
[
lv2:cmdnode2.cmd911.com]
action=create
poolname=gluster_thinpool_sdb
ignore_lv_errors=no
vgname=gluster_vg_sdb
lvtype=thinpool
size=1005GB
poolmetadatasize=5GB
[
lv3:cmdnode3.cmd911.com]
action=create
poolname=gluster_thinpool_sdb
ignore_lv_errors=no
vgname=gluster_vg_sdb
lvtype=thinpool
size=41GB
poolmetadatasize=1GB
[
lv4:cmdnode1.cmd911.com]
action=create
lvname=gluster_lv_engine
ignore_lv_errors=no
vgname=gluster_vg_sda
mount=/gluster_bricks/engine
size=100GB
lvtype=thick
[
lv5:cmdnode1.cmd911.com]
action=create
lvname=gluster_lv_data
ignore_lv_errors=no
vgname=gluster_vg_sda
mount=/gluster_bricks/data
lvtype=thinlv
poolname=gluster_thinpool_sda
virtualsize=500GB
[
lv6:cmdnode1.cmd911.com]
action=create
lvname=gluster_lv_vmstore
ignore_lv_errors=no
vgname=gluster_vg_sda
mount=/gluster_bricks/vmstore
lvtype=thinlv
poolname=gluster_thinpool_sda
virtualsize=500GB
[
lv7:cmdnode2.cmd911.com]
action=create
lvname=gluster_lv_engine
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/engine
size=100GB
lvtype=thick
[
lv8:cmdnode2.cmd911.com]
action=create
lvname=gluster_lv_data
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/data
lvtype=thinlv
poolname=gluster_thinpool_sdb
virtualsize=500GB
[
lv9:cmdnode2.cmd911.com]
action=create
lvname=gluster_lv_vmstore
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/vmstore
lvtype=thinlv
poolname=gluster_thinpool_sdb
virtualsize=500GB
[
lv10:cmdnode3.cmd911.com]
action=create
lvname=gluster_lv_engine
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/engine
size=20GB
lvtype=thick
[
lv11:cmdnode3.cmd911.com]
action=create
lvname=gluster_lv_data
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/data
lvtype=thinlv
poolname=gluster_thinpool_sdb
virtualsize=20GB
[
lv12:cmdnode3.cmd911.com]
action=create
lvname=gluster_lv_vmstore
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/vmstore
lvtype=thinlv
poolname=gluster_thinpool_sdb
virtualsize=20GB
[selinux]
yes
[service3]
action=restart
service=glusterd
slice_setup=yes
[firewalld]
action=add
ports=111/tcp,2049/tcp,54321/tcp,5900/tcp,5900-6923/tcp,5666/tcp,16514/tcp,54322/tcp
services=glusterfs
[script2]
action=execute
file=/usr/share/gdeploy/scripts/disable-gluster-hooks.sh
[shell3]
action=execute
command=usermod -a -G gluster qemu
[volume1]
action=create
volname=engine
transport=tcp
replica=yes
replica_count=3
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,30,on,off,enable
brick_dirs=cmdnode1.cmd911.com:/gluster_bricks/engine/engine,cmdnode2.cmd911.com:/gluster_bricks/engine/engine,cmdnode3.cmd911.com:/gluster_bricks/engine/engine
ignore_volume_errors=no
arbiter_count=1
[volume2]
action=create
volname=data
transport=tcp
replica=yes
replica_count=3
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,30,on,off,enable
brick_dirs=cmdnode1.cmd911.com:/gluster_bricks/data/data,cmdnode2.cmd911.com:/gluster_bricks/data/data,cmdnode3.cmd911.com:/gluster_bricks/data/data
ignore_volume_errors=no
arbiter_count=1
[volume3]
action=create
volname=vmstore
transport=tcp
replica=yes
replica_count=3
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,30,on,off,enable
brick_dirs=cmdnode1.cmd911.com:/gluster_bricks/vmstore/vmstore,cmdnode2.cmd911.com:/gluster_bricks/vmstore/vmstore,cmdnode3.cmd911.com:/gluster_bricks/vmstore/vmstore
ignore_volume_errors=no
arbiter_count=1