Hello,
I am trying to setup a single hyperconverged node. The disks that I will be using for the
creation of my engine and all VMs are on a RAID 5. This volume is using hardware raid
with an LSI controller (LSI 9361-4i). I am unsure of the appropriate values to use for
the Gluster config. Here is the info about my environment.
[root@vmh ~]# mount
sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime,seclabel)
proc on /proc type proc (rw,nosuid,nodev,noexec,relatime)
devtmpfs on /dev type devtmpfs
(rw,nosuid,seclabel,size=65713924k,nr_inodes=16428481,mode=755)
securityfs on /sys/kernel/security type securityfs (rw,nosuid,nodev,noexec,relatime)
tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev,seclabel)
devpts on /dev/pts type devpts
(rw,nosuid,noexec,relatime,seclabel,gid=5,mode=620,ptmxmode=000)
tmpfs on /run type tmpfs (rw,nosuid,nodev,seclabel,mode=755)
tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,seclabel,mode=755)
cgroup on /sys/fs/cgroup/systemd type cgroup
(rw,nosuid,nodev,noexec,relatime,seclabel,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd)
pstore on /sys/fs/pstore type pstore (rw,nosuid,nodev,noexec,relatime)
cgroup on /sys/fs/cgroup/freezer type cgroup
(rw,nosuid,nodev,noexec,relatime,seclabel,freezer)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,pids)
cgroup on /sys/fs/cgroup/cpuset type cgroup
(rw,nosuid,nodev,noexec,relatime,seclabel,cpuset)
cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup
(rw,nosuid,nodev,noexec,relatime,seclabel,cpuacct,cpu)
cgroup on /sys/fs/cgroup/blkio type cgroup
(rw,nosuid,nodev,noexec,relatime,seclabel,blkio)
cgroup on /sys/fs/cgroup/hugetlb type cgroup
(rw,nosuid,nodev,noexec,relatime,seclabel,hugetlb)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup
(rw,nosuid,nodev,noexec,relatime,seclabel,net_prio,net_cls)
cgroup on /sys/fs/cgroup/devices type cgroup
(rw,nosuid,nodev,noexec,relatime,seclabel,devices)
cgroup on /sys/fs/cgroup/perf_event type cgroup
(rw,nosuid,nodev,noexec,relatime,seclabel,perf_event)
cgroup on /sys/fs/cgroup/memory type cgroup
(rw,nosuid,nodev,noexec,relatime,seclabel,memory)
configfs on /sys/kernel/config type configfs (rw,relatime)
/dev/mapper/onn_vmh-ovirt--node--ng--4.2.5.1--0.20180821.0+1 on / type ext4
(rw,relatime,seclabel,discard,stripe=16,data=ordered)
selinuxfs on /sys/fs/selinux type selinuxfs (rw,relatime)
systemd-1 on /proc/sys/fs/binfmt_misc type autofs
(rw,relatime,fd=22,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=43386)
debugfs on /sys/kernel/debug type debugfs (rw,relatime)
mqueue on /dev/mqueue type mqueue (rw,relatime,seclabel)
hugetlbfs on /dev/hugepages type hugetlbfs (rw,relatime,seclabel)
hugetlbfs on /dev/hugepages1G type hugetlbfs (rw,relatime,seclabel,pagesize=1G)
/dev/mapper/onn_vmh-tmp on /tmp type ext4
(rw,relatime,seclabel,discard,stripe=16,data=ordered)
/dev/mapper/onn_vmh-home on /home type ext4
(rw,relatime,seclabel,discard,stripe=16,data=ordered)
/dev/sdb1 on /boot type ext4 (rw,relatime,seclabel,data=ordered)
/dev/mapper/onn_vmh-var on /var type ext4
(rw,relatime,seclabel,discard,stripe=16,data=ordered)
/dev/mapper/onn_vmh-var_log on /var/log type ext4
(rw,relatime,seclabel,discard,stripe=16,data=ordered)
/dev/mapper/onn_vmh-var_crash on /var/crash type ext4
(rw,relatime,seclabel,discard,stripe=16,data=ordered)
/dev/mapper/onn_vmh-var_log_audit on /var/log/audit type ext4
(rw,relatime,seclabel,discard,stripe=16,data=ordered)
sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw,relatime)
/dev/mapper/3600605b00a2faca222fb4da81ac9bdb1p1 on /data type ext4
(rw,relatime,seclabel,discard,stripe=64,data=ordered)
tmpfs on /run/user/0 type tmpfs
(rw,nosuid,nodev,relatime,seclabel,size=13149252k,mode=700)
[root@vmh ~]# blkid
/dev/sda1: UUID="ce130131-c457-46a0-b6de-e50cc89a6da3" TYPE="ext4"
PARTUUID="5fff73ae-70d4-4697-a307-5f68a4c00f4c"
/dev/sdb1: UUID="47422043-e5d0-4541-86ff-193f61a779b0" TYPE="ext4"
/dev/sdb2: UUID="2f2cf71b-b68e-985a-6433-ed1889595df0"
UUID_SUB="a3e32abe-109c-b436-7712-6c9d9f1d57c9"
LABEL="vmh.cyber-range.lan:pv00" TYPE="linux_raid_member"
/dev/sdc1: UUID="2f2cf71b-b68e-985a-6433-ed1889595df0"
UUID_SUB="7132acc1-e210-f645-5df2-e1a1eff7f836"
LABEL="vmh.cyber-range.lan:pv00" TYPE="linux_raid_member"
/dev/md127: UUID="utB9xu-zva6-j5Ci-3E49-uDya-g2AJ-MQu5Rd"
TYPE="LVM2_member"
/dev/mapper/onn_vmh-ovirt--node--ng--4.2.5.1--0.20180821.0+1:
UUID="8656ff1e-d217-4088-b353-6d2b9f602ce3" TYPE="ext4"
/dev/mapper/onn_vmh-swap: UUID="57a165d1-116e-4e64-a694-2618ffa3a79e"
TYPE="swap"
/dev/mapper/3600605b00a2faca222fb4da81ac9bdb1p1:
UUID="ce130131-c457-46a0-b6de-e50cc89a6da3" TYPE="ext4"
PARTUUID="dac9e1fc-b0d7-43da-b52c-66bb059d8137"
/dev/mapper/onn_vmh-root: UUID="7cc65568-d408-43ab-a793-b6c110d7ba98"
TYPE="ext4"
/dev/mapper/onn_vmh-home: UUID="c7e344d0-f401-4504-b52e-9b5c6023c10e"
TYPE="ext4"
/dev/mapper/onn_vmh-tmp: UUID="3323e010-0dab-4166-b15d-d739a09b4c03"
TYPE="ext4"
/dev/mapper/onn_vmh-var: UUID="c20af647-364d-4a11-8776-8e6aac362425"
TYPE="ext4"
/dev/mapper/onn_vmh-var_log: UUID="eb2e159a-4d50-411d-912c-c8e11f297fea"
TYPE="ext4"
/dev/mapper/onn_vmh-var_log_audit: UUID="65aa85b9-c053-4244-b3b0-c5f91de8ad59"
TYPE="ext4"
/dev/mapper/onn_vmh-var_crash: UUID="579bbef5-bab6-49ce-b8ed-ab92afa85ea8"
TYPE="ext4"
/dev/mapper/3600605b00a2faca222fb4da81ac9bdb1: PTTYPE="gpt"
[root@vmh ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE
MOUNTPOINT
sda 8:0 0 8.2T 0 disk
├─sda1 8:1 0 8.2T 0 part
└─3600605b00a2faca222fb4da81ac9bdb1 253:5 0 8.2T 0 mpath
└─3600605b00a2faca222fb4da81ac9bdb1p1 253:6 0 8.2T 0 part
/data
sdb 8:16 0 223.6G 0 disk
├─sdb1 8:17 0 1G 0 part
/boot
└─sdb2 8:18 0 222.6G 0 part
└─md127 9:127 0 222.5G 0 raid1
├─onn_vmh-pool00_tmeta 253:0 0 1G 0 lvm
│ └─onn_vmh-pool00-tpool 253:2 0 173.6G 0 lvm
│ ├─onn_vmh-ovirt--node--ng--4.2.5.1--0.20180821.0+1 253:3 0 146.6G 0 lvm /
│ ├─onn_vmh-pool00 253:7 0 173.6G 0 lvm
│ ├─onn_vmh-root 253:8 0 146.6G 0 lvm
│ ├─onn_vmh-home 253:9 0 1G 0 lvm
/home
│ ├─onn_vmh-tmp 253:10 0 1G 0 lvm
/tmp
│ ├─onn_vmh-var 253:11 0 15G 0 lvm
/var
│ ├─onn_vmh-var_log 253:12 0 8G 0 lvm
/var/log
│ ├─onn_vmh-var_log_audit 253:13 0 2G 0 lvm
/var/log/audit
│ └─onn_vmh-var_crash 253:14 0 10G 0 lvm
/var/crash
├─onn_vmh-pool00_tdata 253:1 0 173.6G 0 lvm
│ └─onn_vmh-pool00-tpool 253:2 0 173.6G 0 lvm
│ ├─onn_vmh-ovirt--node--ng--4.2.5.1--0.20180821.0+1 253:3 0 146.6G 0 lvm /
│ ├─onn_vmh-pool00 253:7 0 173.6G 0 lvm
│ ├─onn_vmh-root 253:8 0 146.6G 0 lvm
│ ├─onn_vmh-home 253:9 0 1G 0 lvm
/home
│ ├─onn_vmh-tmp 253:10 0 1G 0 lvm
/tmp
│ ├─onn_vmh-var 253:11 0 15G 0 lvm
/var
│ ├─onn_vmh-var_log 253:12 0 8G 0 lvm
/var/log
│ ├─onn_vmh-var_log_audit 253:13 0 2G 0 lvm
/var/log/audit
│ └─onn_vmh-var_crash 253:14 0 10G 0 lvm
/var/crash
└─onn_vmh-swap 253:4 0 4G 0 lvm
[SWAP]
sdc 8:32 0 223.6G 0 disk
└─sdc1 8:33 0 222.6G 0 part
└─md127 9:127 0 222.5G 0 raid1
├─onn_vmh-pool00_tmeta 253:0 0 1G 0 lvm
│ └─onn_vmh-pool00-tpool 253:2 0 173.6G 0 lvm
│ ├─onn_vmh-ovirt--node--ng--4.2.5.1--0.20180821.0+1 253:3 0 146.6G 0 lvm /
│ ├─onn_vmh-pool00 253:7 0 173.6G 0 lvm
│ ├─onn_vmh-root 253:8 0 146.6G 0 lvm
│ ├─onn_vmh-home 253:9 0 1G 0 lvm
/home
│ ├─onn_vmh-tmp 253:10 0 1G 0 lvm
/tmp
│ ├─onn_vmh-var 253:11 0 15G 0 lvm
/var
│ ├─onn_vmh-var_log 253:12 0 8G 0 lvm
/var/log
│ ├─onn_vmh-var_log_audit 253:13 0 2G 0 lvm
/var/log/audit
│ └─onn_vmh-var_crash 253:14 0 10G 0 lvm
/var/crash
├─onn_vmh-pool00_tdata 253:1 0 173.6G 0 lvm
│ └─onn_vmh-pool00-tpool 253:2 0 173.6G 0 lvm
│ ├─onn_vmh-ovirt--node--ng--4.2.5.1--0.20180821.0+1 253:3 0 146.6G 0 lvm /
│ ├─onn_vmh-pool00 253:7 0 173.6G 0 lvm
│ ├─onn_vmh-root 253:8 0 146.6G 0 lvm
│ ├─onn_vmh-home 253:9 0 1G 0 lvm
/home
│ ├─onn_vmh-tmp 253:10 0 1G 0 lvm
/tmp
│ ├─onn_vmh-var 253:11 0 15G 0 lvm
/var
│ ├─onn_vmh-var_log 253:12 0 8G 0 lvm
/var/log
│ ├─onn_vmh-var_log_audit 253:13 0 2G 0 lvm
/var/log/audit
│ └─onn_vmh-var_crash 253:14 0 10G 0 lvm
/var/crash
└─onn_vmh-swap 253:4 0 4G 0 lvm
[SWAP]
[root@vmh ~]# df -h
Filesystem Size Used Avail Use%
Mounted on
/dev/mapper/onn_vmh-ovirt--node--ng--4.2.5.1--0.20180821.0+1 145G 1.9G 135G 2% /
devtmpfs 63G 0 63G 0% /dev
tmpfs 63G 4.0K 63G 1%
/dev/shm
tmpfs 63G 19M 63G 1% /run
tmpfs 63G 0 63G 0%
/sys/fs/cgroup
/dev/mapper/onn_vmh-tmp 976M 2.8M 906M 1% /tmp
/dev/mapper/onn_vmh-home 976M 2.6M 907M 1% /home
/dev/sdb1 976M 204M 706M 23% /boot
/dev/mapper/onn_vmh-var 15G 43M 14G 1% /var
/dev/mapper/onn_vmh-var_log 7.8G 58M 7.3G 1%
/var/log
/dev/mapper/onn_vmh-var_crash 9.8G 37M 9.2G 1%
/var/crash
/dev/mapper/onn_vmh-var_log_audit 2.0G 7.7M 1.8G 1%
/var/log/audit
/dev/mapper/3600605b00a2faca222fb4da81ac9bdb1p1 8.2T 90M 7.8T 1% /data
tmpfs 13G 0 13G 0%
/run/user/0
[root@vmh ~]# lspci | grep RAID
3b:00.0 RAID bus controller: LSI Logic / Symbios Logic MegaRAID SAS-3 3108 [Invader] (rev
02)
[root@vmh ~]# lsmod | grep mega
megaraid_sas 145373 2
[root@vmh ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default
qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: enp96s0f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group
default qlen 1000
link/ether 0c:c4:7a:f9:b9:88 brd ff:ff:ff:ff:ff:ff
inet 172.30.50.3/24 brd 172.30.50.255 scope global noprefixroute dynamic enp96s0f0
valid_lft 47994sec preferred_lft 47994sec
inet6 fe80::119b:61e4:f9:1dca/64 scope link noprefixroute
valid_lft forever preferred_lft forever
3: enp96s0f1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group
default qlen 1000
link/ether 0c:c4:7a:f9:b9:89 brd ff:ff:ff:ff:ff:ff
inet 172.30.50.4/24 brd 172.30.50.255 scope global noprefixroute dynamic enp96s0f1
valid_lft 47994sec preferred_lft 47994sec
inet6 fe80::498f:1c33:7de6:1fd/64 scope link noprefixroute
valid_lft forever preferred_lft forever
19: ;vdsmdummy;: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default
qlen 1000
link/ether 2e:78:2c:51:c8:12 brd ff:ff:ff:ff:ff:ff
In DNS I have defined .3 address as vmh.cyber-range.lan and .4 is defined as
ovirtbe.cyber-range.lan. .3 is the frontend IP and .4 is the backednd IP.
I have done the following so far with my Gluster Config file, I'm sure it needs
further edits.
[hosts]
vmh.cyber-range.lan
[disktype]
jbod #Possible values raid6, raid10, raid5, jbod
[diskcount]
@NUMBER_OF_DATA_DISKS@ #Ignored in case of jbod
[stripesize]
256 #256 in case of jbod
[script1]
action=execute
ignore_script_errors=no
file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d @DEVICE@
#[vdo] # Note: Uncomment if dedupe & compression needs to be enabled on device. Needs
kmod-vdo module
#action=create
#names=@VDO_DEVICE_Name@
#devices=@DEVICE@
#logicalsize=@logical_size@T # Note:logicalsize is 10x physical space on disk
##slabsize=32G # Note: used only when the physical size is few TBs
#blockmapcachesize=128M
#readcache=enabled
#readcachesize=20M
#emulate512=enabled
#writepolicy=auto
[pv]
action=create
devices=@DEVICE@ # Change to @VDO_DEVICE_name@ if using vdo
[vg1]
action=create
vgname=gluster_vg1
pvname=@DEVICE@ # Change to @VDO_DEVICE_name@ if using vdo
[lv1]
action=create
vgname=gluster_vg1
lvname=engine_lv
lvtype=thick
size=100GB
mount=/gluster_bricks/engine
[lv2]
action=create
vgname=gluster_vg1
poolname=lvthinpool
lvtype=thinpool
poolmetadatasize=7.7TB
size=7.7TB #For example: 18000GB, depending on device capacity. Units to be specified.
[lv3]
action=create
lvname=lv_vmdisks
poolname=lvthinpool
vgname=gluster_vg1
lvtype=thinlv
mount=/gluster_bricks/vmstore
virtualsize=@SIZE@ # Units to be specified, for instance 5000GB
[lv4]
action=create
lvname=lv_datadisks
poolname=lvthinpool
vgname=gluster_vg1
lvtype=thinlv
mount=/gluster_bricks/data
virtualsize=@SIZE@ # Units to be specified, for instance 5000GB
#[lv5]
#action=setup-cache
#ssd=@SSD_DEVICE@
#vgname=gluster_vg1
#poolname=lvthinpool
#cache_lv=lvcache
#cache_lvsize=5GB # Provide device size
## cachemode=writeback
[shell2]
action=execute
command=vdsm-tool configure --force
[script3]
action=execute
file=/usr/share/gdeploy/scripts/blacklist_all_disks.sh
ignore_script_errors=no
[selinux]
yes
[service3]
action=restart
service=glusterd
slice_setup=yes
[firewalld]
action=add
ports=111/tcp,2049/tcp,54321/tcp,5900/tcp,5900-6923/tcp,5666/tcp,16514/tcp,54322/tcp
services=glusterfs
[script2]
action=execute
file=/usr/share/gdeploy/scripts/disable-gluster-hooks.sh
[shell3]
action=execute
command=usermod -a -G gluster qemu
[volume]
action=create
volname=engine
transport=tcp
key=storage.owner-uid,storage.owner-gid,features.shard,performance.low-prio-threads,performance.strict-o-direct,network.remote-dio,network.ping-timeout,user.cifs,nfs.disable,performance.quick-read,performance.read-ahead,performance.io-cache,cluster.eager-lock
value=36,36,on,32,on,off,30,off,on,off,off,off,enable
brick_dirs=/gluster_bricks/engine/engine
ignore_volume_errors=no
[volume2]
action=create
volname=vmstore
transport=tcp
key=storage.owner-uid,storage.owner-gid,features.shard,performance.low-prio-threads,performance.strict-o-direct,network.remote-dio,network.ping-timeout,user.cifs,nfs.disable,performance.quick-read,performance.read-ahead,performance.io-cache,cluster.eager-lock
value=36,36,on,32,on,off,30,off,on,off,off,off,enable
brick_dirs=/gluster_bricks/vmstore/vmstore
ignore_volume_errors=no
[volume3]
action=create
volname=data
transport=tcp
key=storage.owner-uid,storage.owner-gid,features.shard,performance.low-prio-threads,performance.strict-o-direct,network.remote-dio,network.ping-timeout,user.cifs,nfs.disable,performance.quick-read,performance.read-ahead,performance.io-cache,cluster.eager-lock
value=36,36,on,32,on,off,30,off,on,off,off,off,enable
brick_dirs=/gluster_bricks/data/data
ignore_volume_errors=no
Can anyone kindly provide the correct values and corrections needed? I am not nearly as
good with Linux storage as I'd like to be. Please let me know if any further info is
required. Many thanks in advance!!!