Dear Feral,
>On that note, have you also had issues with gluster not restarting on
reboot, as well as >all of the HA stuff failing on reboot after power
loss? Thus far, the only way I've got the >cluster to come back to life,
is to manually restart glusterd on all nodes, then put the >cluster back
into "not mainentance" mode, and then manually starting the
hosted-engine vm. >This also fails after 2 or 3 power losses, even though
the entire cluster is happy through >the first 2.
About the gluster not starting - use systemd.mount unit files.
here is my setup and for now works:
[root@ovirt2 yum.repos.d]# systemctl cat gluster_bricks-engine.mount
# /etc/systemd/system/gluster_bricks-engine.mount
[Unit]
Description=Mount glusterfs brick - ENGINE
Requires = vdo.service
After = vdo.service
Before = glusterd.service
Conflicts = umount.target
[Mount]
What=/dev/mapper/gluster_vg_md0-gluster_lv_engine
Where=/gluster_bricks/engine
Type=xfs
Options=inode64,noatime,nodiratime
[Install]
WantedBy=glusterd.service
[root@ovirt2 yum.repos.d]# systemctl cat gluster_bricks-engine.automount
# /etc/systemd/system/gluster_bricks-engine.automount
[Unit]
Description=automount for gluster brick ENGINE
[Automount]
Where=/gluster_bricks/engine
[Install]
WantedBy=multi-user.target
[root@ovirt2 yum.repos.d]# systemctl cat glusterd
# /etc/systemd/system/glusterd.service
[Unit]
Description=GlusterFS, a clustered file-system server
Requires=rpcbind.service gluster_bricks-engine.mount gluster_bricks-data.mount gluster_bricks-isos.mount
After=network.target rpcbind.service gluster_bricks-engine.mount gluster_bricks-data.mount gluster_bricks-isos.mount
Before=network-online.target
[Service]
Type=forking
PIDFile=/var/run/glusterd.pid
LimitNOFILE=65536
Environment="LOG_LEVEL=INFO"
EnvironmentFile=-/etc/sysconfig/glusterd
ExecStart=/usr/sbin/glusterd -p /var/run/glusterd.pid --log-level $LOG_LEVEL $GLUSTERD_OPTIONS
KillMode=process
SuccessExitStatus=15
[Install]
WantedBy=multi-user.target
# /etc/systemd/system/glusterd.service.d/99-cpu.conf
[Service]
CPUAccounting=yes
Slice=glusterfs.slice
Best Regards,
Strahil Nikolov