Hello everyone,
Geetign stucked into this situation, and just wanted to know if it actually
b done, or should I follow diffrent approach.
I need to do this single instace deployment for a POC, with the evantual
scale this setup up to 3 nodes in the future.
I folowed this tuturial:
https://www.ovirt.org/documentation/gluster-hyperconverged/chap-Single_no...
But when i run gdeploy, i end up with this error:
TASK [Run a shell script]
**********************************************************************************************************************************************
failed: [10.10.8.101]
(item=/usr/share/gdeploy/scripts/blacklist_all_disks.sh) => {"changed":
true, "failed_when_result": true, "item":
"/usr/share/gdeploy/scripts/blacklist_all_disks.sh", "msg":
"non-zero
return code", "rc": 1, "stderr": "Shared connection to
10.10.8.101
closed.\r\n", "stdout": "iscsiadm: No active sessions.\r\nThis script
will
prevent listing iscsi devices when multipath CLI is called\r\nwithout
parameters, and so no LUNs will be discovered by applications like
VDSM\r\n(oVirt, RHV) which shell-out to call `/usr/sbin/multipath` after
target login\r\nJun 12 14:30:48 | 3614187705c01820022b002b00c52f72e2: map
in use\r\nJun 12 14:30:48 | failed to remove multipath map
3614187705c01820022b002b00c52f72e\r\n", "stdout_lines": ["iscsiadm:
No
active sessions.", "This script will prevent listing iscsi devices when
multipath CLI is called", "without parameters, and so no LUNs will be
discovered by applications like VDSM", "(oVirt, RHV) which shell-out to
call `/usr/sbin/multipath` after target login", "Jun 12 14:30:48 |
3614187705c01820022b002b00c52f72e2: map in use", "Jun 12 14:30:48 | failed
to remove multipath map 3614187705c01820022b002b00c52f72e"]}
to retry, use: --limit @/tmp/tmpbYZBC6/run-script.retry
PLAY RECAP
*************************************************************************************************************************************************************
10.10.8.101 : ok=0 changed=0 unreachable=0 failed=1
This is my gdeploy.conf :
[hosts]
10.10.8.101
[script1]
action=execute
ignore_script_errors=no
file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb -h
10.10.8.111
[disktype]
jbod
[diskcount]
12
[stripesize]
256
[service1]
action=enable
service=chronyd
[service2]
action=restart
service=chronyd
[shell2]
action=execute
command=vdsm-tool configure --force
[script3]
action=execute
file=/usr/share/gdeploy/scripts/blacklist_all_disks.sh
ignore_script_errors=no
[pv]
action=create
devices=sdb
ignore_pv_errors=no
[vg1]
action=create
vgname=gluster_vg_sdb
pvname=sdb
ignore_vg_errors=no
[lv1]
action=create
poolname=gluster_thinpool_sdb
ignore_lv_errors=no
vgname=gluster_vg_sdb
lvtype=thinpool
size=804GB
poolmetadatasize=4GB
[lv2]
action=create
lvname=gluster_lv_engine
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/engine
size=100GB
lvtype=thick
[lv3]
action=create
lvname=gluster_lv_data
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/data
lvtype=thinlv
poolname=gluster_thinpool_sdb
virtualsize=400GB
[lv4]
action=create
lvname=gluster_lv_vmstore
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/vmstore
lvtype=thinlv
poolname=gluster_thinpool_sdb
virtualsize=400GB
[selinux]
yes
[service3]
action=restart
service=glusterd
slice_setup=yes
[firewalld]
action=add
ports=111/tcp,2049/tcp,54321/tcp,5900/tcp,5900-6923/tcp,5666/tcp,16514/tcp,54322/tcp
services=glusterfs
[script2]
action=execute
file=/usr/share/gdeploy/scripts/disable-gluster-hooks.sh
[shell3]
action=execute
command=usermod -a -G gluster qemu
[volume1]
action=create
volname=engine
transport=tcp
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,30,on,off,enable
#brick_dirs=10.10.8.111:/gluster_bricks/engine/engine
brick_dirs=/gluster_bricks/engine/engine
ignore_volume_errors=no
[volume2]
action=create
volname=data
transport=tcp
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,30,on,off,enable
#brick_dirs=10.10.8.111:/gluster_bricks/data/data
brick_dirs=/gluster_bricks/data/data
ignore_volume_errors=no
[volume3]
action=create
volname=vmstore
transport=tcp
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,30,on,off,enable
#brick_dirs=10.10.8.111:/gluster_bricks/vmstore/vmstore
brick_dirs=/gluster_bricks/vmstore/vmstore
ignore_volume_errors=no
Any thoughs on this ? Scratching my head in getting this sorted out....
Thank you very much !
Have a nice day !
--
Best regards, Leo David