)
And it's fixed in lvm2-2.02.46-3.el5
Can you please check your lvm2 version?
On Mon, Mar 11, 2019 at 1:57 PM Sahina Bose <sabose(a)redhat.com> wrote:
+Gobinda Das +Dhanjal Parth
On Mon, Mar 11, 2019 at 1:42 AM <adrianquintero(a)gmail.com> wrote:
>
> Hello I am trying to run a Hyperconverged setup "COnfigure gluster
storage and ovirt hosted engine", however I get the following error
>
>
__________________________________________________________________________________________________
> PLAY [gluster_servers]
*********************************************************
>
> TASK [Create LVs with specified size for the VGs]
******************************
> failed: [
ovirt01.grupokino.com] (item={u'lv':
u'gluster_thinpool_sdb',
u'size': u'45GB', u'extent': u'100%FREE', u'vg':
u'gluster_vg_sdb'}) =>
{"changed": false, "item": {"extent": "100%FREE",
"lv":
"gluster_thinpool_sdb", "size": "45GB", "vg":
"gluster_vg_sdb"}, "msg":
"lvcreate: metadata/pv_map.c:198: consume_pv_area: Assertion `to_go <=
pva->count' failed.\n", "rc": -6}
> to retry, use: --limit @/tmp/tmpwo4SNB/lvcreate.retry
>
> PLAY RECAP
*********************************************************************
>
ovirt01.grupokino.com : ok=0 changed=0 unreachable=0
failed=1
>
__________________________________________________________________________________________________
>
> I know that oVirt Hosted Engine Setup GUI for "gluster wizard (gluster
deployment) does not populate the geodeployConfig.conf file properly
(Generated Gdeploy configuration :
/var/lib/ovirt-hosted-engine-setup/gdeploy/gdeployConfig.conf) so I have
tried to modify it to fit our needs but keep getting the above error
everytime.
>
> Any ideas or comments are welcome...Thanks!
>
>
>
>
> My servers are setup with 4x50GB disks, 1 for the OS and the rest for
Gluster Hyperconverged setup.
>
__________________________________________________________________________________________________
> my gdeployConfig.conf file:
>
__________________________________________________________________________________________________
> #gdeploy configuration generated by cockpit-gluster plugin
> [hosts]
>
ovirt01.mydomain.com
>
ovirt02.mydomain.com
>
ovirt03.mydomain.com
>
> [
script1:ovirt01.mydomain.com]
> action=execute
> ignore_script_errors=no
> file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb,sdc,sdd
-h
ovirt01.mydomain.com,
ovirt02.mydomain.com,
ovirt03.mydomain.com
>
> [
script1:ovirt02.mydomain.com]
> action=execute
> ignore_script_errors=no
> file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb,sdc,sdd
-h
ovirt01.mydomain.com,
ovirt02.mydomain.com,
ovirt03.mydomain.com
>
> [
script1:ovirt03.mydomain.com]
> action=execute
> ignore_script_errors=no
> file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb,sdc,sdd
-h
ovirt01.mydomain.com,
ovirt02.mydomain.com,
ovirt03.mydomain.com
>
> [disktype]
> jbod
>
> [diskcount]
> 3
>
> [stripesize]
> 256
>
> [service1]
> action=enable
> service=chronyd
>
> [service2]
> action=restart
> service=chronyd
>
> [shell2]
> action=execute
> command=vdsm-tool configure --force
>
> [script3]
> action=execute
> file=/usr/share/gdeploy/scripts/blacklist_all_disks.sh
> ignore_script_errors=no
>
> [
pv1:ovirt01.mydomain.com]
> action=create
> devices=sdb
> ignore_pv_errors=no
>
> [
pv1:ovirt02.mydomain.com]
> action=create
> devices=sdb
> ignore_pv_errors=no
>
> [
pv1:ovirt03.mydomain.com]
> action=create
> devices=sdb
> ignore_pv_errors=no
>
> [
pv2:ovirt01.mydomain.com]
> action=create
> devices=sdc
> ignore_pv_errors=no
>
> [
pv2:ovirt02.mydomain.com]
> action=create
> devices=sdc
> ignore_pv_errors=no
>
> [
pv2:ovirt03.mydomain.com]
> action=create
> devices=sdc
> ignore_pv_errors=no
>
> [
pv3:ovirt01.mydomain.com]
> action=create
> devices=sdd
> ignore_pv_errors=no
>
> [
pv3:ovirt02.mydomain.com]
> action=create
> devices=sdd
> ignore_pv_errors=no
>
> [
pv3:ovirt03.mydomain.com]
> action=create
> devices=sdd
> ignore_pv_errors=no
>
> [
vg1:ovirt01.mydomain.com]
> action=create
> vgname=gluster_vg_sdb
> pvname=sdb
> ignore_vg_errors=no
>
> [
vg1:ovirt02.mydomain.com]
> action=create
> vgname=gluster_vg_sdb
> pvname=sdb
> ignore_vg_errors=no
>
> [
vg1:ovirt03.mydomain.com]
> action=create
> vgname=gluster_vg_sdb
> pvname=sdb
> ignore_vg_errors=no
>
> [
vg2:ovirt01.mydomain.com]
> action=create
> vgname=gluster_vg_sdc
> pvname=sdc
> ignore_vg_errors=no
>
> [
vg2:ovirt02.mydomain.com]
> action=create
> vgname=gluster_vg_sdc
> pvname=sdc
> ignore_vg_errors=no
>
> [
vg2:ovirt03.mydomain.com]
> action=create
> vgname=gluster_vg_sdc
> pvname=sdc
> ignore_vg_errors=no
>
> [
vg3:ovirt01.mydomain.com]
> action=create
> vgname=gluster_vg_sdd
> pvname=sdd
> ignore_vg_errors=no
>
> [
vg3:ovirt02.mydomain.com]
> action=create
> vgname=gluster_vg_sdd
> pvname=sdd
> ignore_vg_errors=no
>
> [
vg3:ovirt03.mydomain.com]
> action=create
> vgname=gluster_vg_sdd
> pvname=sdd
> ignore_vg_errors=no
>
> [
lv1:ovirt01.mydomain.com]
> action=create
> poolname=gluster_thinpool_sdb
> ignore_lv_errors=no
> vgname=gluster_vg_sdb
> lvtype=thinpool
> size=45GB
> poolmetadatasize=3GB
>
> [
lv2:ovirt02.mydomain.com]
> action=create
> poolname=gluster_thinpool_sdc
> ignore_lv_errors=no
> vgname=gluster_vg_sdc
> lvtype=thinpool
> size=45GB
> poolmetadatasize=3GB
>
> [
lv3:ovirt03.mydomain.com]
> action=create
> poolname=gluster_thinpool_sdd
> ignore_lv_errors=no
> vgname=gluster_vg_sdd
> lvtype=thinpool
> size=45GB
> poolmetadatasize=3GB
>
> [
lv4:ovirt01.mydomain.com]
> action=create
> lvname=gluster_lv_engine
> ignore_lv_errors=no
> vgname=gluster_vg_sdb
> mount=/gluster_bricks/engine
> size=42GB
> lvtype=thick
>
> [
lv5:ovirt01.mydomain.com]
> action=create
> lvname=gluster_lv_data
> ignore_lv_errors=no
> vgname=gluster_vg_sdc
> mount=/gluster_bricks/data
> lvtype=thinlv
> poolname=gluster_thinpool_sdc
> virtualsize=42GB
>
> [
lv6:ovirt01.mydomain.com]
> action=create
> lvname=gluster_lv_vmstore
> ignore_lv_errors=no
> vgname=gluster_vg_sdd
> mount=/gluster_bricks/vmstore
> lvtype=thinlv
> poolname=gluster_thinpool_sdd
> virtualsize=42GB
>
> [
lv7:ovirt02.mydomain.com]
> action=create
> lvname=gluster_lv_engine
> ignore_lv_errors=no
> vgname=gluster_vg_sdb
> mount=/gluster_bricks/engine
> size=42GB
> lvtype=thick
>
> [
lv8:ovirt02.mydomain.com]
> action=create
> lvname=gluster_lv_data
> ignore_lv_errors=no
> vgname=gluster_vg_sdc
> mount=/gluster_bricks/data
> lvtype=thinlv
> poolname=gluster_thinpool_sdc
> virtualsize=42GB
>
> [
lv9:ovirt02.mydomain.com]
> action=create
> lvname=gluster_lv_vmstore
> ignore_lv_errors=no
> vgname=gluster_vg_sdd
> mount=/gluster_bricks/vmstore
> lvtype=thinlv
> poolname=gluster_thinpool_sdd
> virtualsize=42GB
>
> [
lv10:ovirt03.mydomain.com]
> action=create
> lvname=gluster_lv_engine
> ignore_lv_errors=no
> vgname=gluster_vg_sdb
> mount=/gluster_bricks/engine
> size=42GB
> lvtype=thick
>
> [
lv11:ovirt03.mydomain.com]
> action=create
> lvname=gluster_lv_data
> ignore_lv_errors=no
> vgname=gluster_vg_sdc
> mount=/gluster_bricks/data
> lvtype=thinlv
> poolname=gluster_thinpool_sdc
> virtualsize=42GB
>
> [
lv12:ovirt03.mydomain.com]
> action=create
> lvname=gluster_lv_vmstore
> ignore_lv_errors=no
> vgname=gluster_vg_sdd
> mount=/gluster_bricks/vmstore
> lvtype=thinlv
> poolname=gluster_thinpool_sdd
> virtualsize=42GB
>
> [selinux]
> yes
>
> [service3]
> action=restart
> service=glusterd
> slice_setup=yes
>
> [firewalld]
> action=add
>
ports=111/tcp,2049/tcp,54321/tcp,5900/tcp,5900-6923/tcp,5666/tcp,16514/tcp,54322/tcp
> services=glusterfs
>
> [script2]
> action=execute
> file=/usr/share/gdeploy/scripts/disable-gluster-hooks.sh
>
> [shell3]
> action=execute
> command=usermod -a -G gluster qemu
>
> [volume1]
> action=create
> volname=engine
> transport=tcp
> replica=yes
> replica_count=3
>
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
> value=virt,36,36,30,on,off,enable
>
brick_dirs=ovirt01.mydomain.com:
/gluster_bricks/engine/engine,ovirt02.mydomain.com:
/gluster_bricks/engine/engine,ovirt03.mydomain.com:
/gluster_bricks/engine/engine
> ignore_volume_errors=no
>
> [volume2]
> action=create
> volname=data
> transport=tcp
> replica=yes
> replica_count=3
>
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
> value=virt,36,36,30,on,off,enable
>
brick_dirs=ovirt01.mydomain.com:
/gluster_bricks/data/data,ovirt02.mydomain.com:
/gluster_bricks/data/data,ovirt03.mydomain.com:/gluster_bricks/data/data
> ignore_volume_errors=no
>
> [volume3]
> action=create
> volname=vmstore
> transport=tcp
> replica=yes
> replica_count=3
>
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performance.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
> value=virt,36,36,30,on,off,enable
>
brick_dirs=ovirt01.mydomain.com:
/gluster_bricks/vmstore/vmstore,ovirt02.mydomain.com:
/gluster_bricks/vmstore/vmstore,ovirt03.mydomain.com:
/gluster_bricks/vmstore/vmstore
> ignore_volume_errors=no
>
---------------------------------------------------------------------------------------------------
> _______________________________________________
> Users mailing list -- users(a)ovirt.org
> To unsubscribe send an email to users-leave(a)ovirt.org
> Privacy Statement:
https://www.ovirt.org/site/privacy-policy/
> oVirt Code of Conduct:
https://www.ovirt.org/community/about/community-guidelines/
> List Archives:
https://lists.ovirt.org/archives/list/users@ovirt.org/message/4DCUGOSWOK6...