I see that the selfheal daemon is not running.
Just try the following from host1:
systemctl stop glusterd; sleep 5; systemctl start glusterd
for i in $(gluster volume list); do gluster volume set $i cluster.granular-entry-heal
enable ; done
And then rerun the Ansible flow.
Best Regards,
Strahil Nikolov
В понеделник, 21 декември 2020 г., 17:54:42 Гринуич+2, Charles Lam
<clam2718(a)gmail.com> написа:
Thanks so very much Strahil for your continued assistance!
[root@fmov1n1 conf.d]# gluster pool list
UUID Hostname State
16e921fb-99d3-4a2e-81e6-ba095dbc14ca host2.fqdn.tld Connected
d4488961-c854-449a-a211-1593810df52f host3.fqdn.tld Connected
f9f9282c-0c1d-405a-a3d3-815e5c6b2606 localhost Connected
[root@fmov1n1 conf.d]# gluster volume list
data
engine
vmstore
[root@fmov1n1 conf.d]# for i in $(gluster volume list); do gluster volume status $i;
gluster volume info $i; echo
"#######################################################################################";
done
Status of volume: data
Gluster process TCP Port RDMA Port Online Pid
------------------------------------------------------------------------------
Brick host1.fqdn.tld:/gluster_bricks
/data/data 49153 0 Y 899467
Brick host2.fqdn.tld:/gluster_bricks
/data/data 49153 0 Y 820456
Brick host3.fqdn.tld:/gluster_bricks
/data/data 49153 0 Y 820482
Self-heal Daemon on localhost N/A N/A Y 897788
Self-heal Daemon on host3.fqdn.tld N/A N/A Y 820406
Self-heal Daemon on host2.fqdn.tld N/A N/A Y 820367
Task Status of Volume data
------------------------------------------------------------------------------
There are no active volume tasks
Volume Name: data
Type: Replicate
Volume ID: b4e984c8-7c43-4faa-92e1-84351a645408
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 3 = 3
Transport-type: tcp
Bricks:
Brick1: host1.fqdn.tld:/gluster_bricks/data/data
Brick2: host2.fqdn.tld:/gluster_bricks/data/data
Brick3: host3.fqdn.tld:/gluster_bricks/data/data
Options Reconfigured:
performance.strict-o-direct: on
network.ping-timeout: 30
storage.owner-gid: 36
storage.owner-uid: 36
server.event-threads: 4
client.event-threads: 4
cluster.choose-local: off
user.cifs: off
features.shard: on
cluster.shd-wait-qlength: 10000
cluster.shd-max-threads: 8
cluster.locking-scheme: granular
cluster.data-self-heal-algorithm: full
cluster.server-quorum-type: server
cluster.quorum-type: auto
cluster.eager-lock: enable
network.remote-dio: off
performance.low-prio-threads: 32
performance.io-cache: off
performance.read-ahead: off
performance.quick-read: off
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: on
#######################################################################################
Status of volume: engine
Gluster process TCP Port RDMA Port Online Pid
------------------------------------------------------------------------------
Brick host1.fqdn.tld:/gluster_bricks
/engine/engine 49152 0 Y 897767
Brick host2.fqdn.tld:/gluster_bricks
/engine/engine 49152 0 Y 820346
Brick host3.fqdn.tld:/gluster_bricks
/engine/engine 49152 0 Y 820385
Self-heal Daemon on localhost N/A N/A Y 897788
Self-heal Daemon on host3.fqdn.tld N/A N/A Y 820406
Self-heal Daemon on host2.fqdn.tld N/A N/A Y 820367
Task Status of Volume engine
------------------------------------------------------------------------------
There are no active volume tasks
Volume Name: engine
Type: Replicate
Volume ID: 75cc04e6-d1cb-4069-aa25-81550b7878db
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 3 = 3
Transport-type: tcp
Bricks:
Brick1: host1.fqdn.tld:/gluster_bricks/engine/engine
Brick2: host2.fqdn.tld:/gluster_bricks/engine/engine
Brick3: host3.fqdn.tld:/gluster_bricks/engine/engine
Options Reconfigured:
performance.strict-o-direct: on
network.ping-timeout: 30
storage.owner-gid: 36
storage.owner-uid: 36
server.event-threads: 4
client.event-threads: 4
cluster.choose-local: off
user.cifs: off
features.shard: on
cluster.shd-wait-qlength: 10000
cluster.shd-max-threads: 8
cluster.locking-scheme: granular
cluster.data-self-heal-algorithm: full
cluster.server-quorum-type: server
cluster.quorum-type: auto
cluster.eager-lock: enable
network.remote-dio: off
performance.low-prio-threads: 32
performance.io-cache: off
performance.read-ahead: off
performance.quick-read: off
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: on
#######################################################################################
Status of volume: vmstore
Gluster process TCP Port RDMA Port Online Pid
------------------------------------------------------------------------------
Brick host1.fqdn.tld:/gluster_bricks
/vmstore/vmstore 49154 0 Y 901139
Brick host2.fqdn.tld:/gluster_bricks
/vmstore/vmstore 49154 0 Y 820544
Brick host3.fqdn.tld:/gluster_bricks
/vmstore/vmstore 49154 0 Y 820587
Self-heal Daemon on localhost N/A N/A Y 897788
Self-heal Daemon on host2.fqdn.tld N/A N/A Y 820367
Self-heal Daemon on host3.fqdn.tld N/A N/A Y 820406
Task Status of Volume vmstore
------------------------------------------------------------------------------
There are no active volume tasks
Volume Name: vmstore
Type: Replicate
Volume ID: 9810637b-2bae-48ae-8e4e-786bb92d18d7
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 3 = 3
Transport-type: tcp
Bricks:
Brick1: host1.fqdn.tld:/gluster_bricks/vmstore/vmstore
Brick2: host2.fqdn.tld:/gluster_bricks/vmstore/vmstore
Brick3: host3.fqdn.tld:/gluster_bricks/vmstore/vmstore
Options Reconfigured:
performance.strict-o-direct: on
network.ping-timeout: 30
storage.owner-gid: 36
storage.owner-uid: 36
server.event-threads: 4
client.event-threads: 4
cluster.choose-local: off
user.cifs: off
features.shard: on
cluster.shd-wait-qlength: 10000
cluster.shd-max-threads: 8
cluster.locking-scheme: granular
cluster.data-self-heal-algorithm: full
cluster.server-quorum-type: server
cluster.quorum-type: auto
cluster.eager-lock: enable
network.remote-dio: off
performance.low-prio-threads: 32
performance.io-cache: off
performance.read-ahead: off
performance.quick-read: off
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: on
#######################################################################################
[root@fmov1n1 conf.d]#
Respectfully,
Charles
_______________________________________________
Users mailing list -- users(a)ovirt.org
To unsubscribe send an email to users-leave(a)ovirt.org
Privacy Statement:
https://www.ovirt.org/privacy-policy.html
oVirt Code of Conduct:
https://www.ovirt.org/community/about/community-guidelines/
List Archives:
https://lists.ovirt.org/archives/list/users@ovirt.org/message/IOBHS6OZAGO...