Hello,
Thank you very much for your reply.
I get the following from the below gluster commands:
[root@ov-no1 ~]# gluster volume heal engine info split-brain
Brick ov-no1.ariadne-t.local:/gluster_bricks/engine/engine
Status: Connected
Number of entries in split-brain: 0
Brick ov-no2.ariadne-t.local:/gluster_bricks/engine/engine
Status: Connected
Number of entries in split-brain: 0
Brick ov-no3.ariadne-t.local:/gluster_bricks/engine/engine
Status: Connected
Number of entries in split-brain: 0
[root@ov-no1 ~]# gluster volume heal engine info summary
Brick ov-no1.ariadne-t.local:/gluster_bricks/engine/engine
Status: Connected
Total Number of entries: 1
Number of entries in heal pending: 1
Number of entries in split-brain: 0
Number of entries possibly healing: 0
Brick ov-no2.ariadne-t.local:/gluster_bricks/engine/engine
Status: Connected
Total Number of entries: 1
Number of entries in heal pending: 1
Number of entries in split-brain: 0
Number of entries possibly healing: 0
Brick ov-no3.ariadne-t.local:/gluster_bricks/engine/engine
Status: Connected
Total Number of entries: 1
Number of entries in heal pending: 1
Number of entries in split-brain: 0
Number of entries possibly healing: 0
[root@ov-no1 ~]# gluster volume info
Volume Name: data
Type: Replicate
Volume ID: 6c7bb2e4-ed35-4826-81f6-34fcd2d0a984
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x (2 + 1) = 3
Transport-type: tcp
Bricks:
Brick1: ov-no1.ariadne-t.local:/gluster_bricks/data/data
Brick2: ov-no2.ariadne-t.local:/gluster_bricks/data/data
Brick3: ov-no3.ariadne-t.local:/gluster_bricks/data/data (arbiter)
Options Reconfigured:
performance.client-io-threads: on
nfs.disable: on
transport.address-family: inet
performance.strict-o-direct: on
performance.quick-read: off
performance.read-ahead: off
performance.io-cache: off
performance.low-prio-threads: 32
network.remote-dio: off
cluster.eager-lock: enable
cluster.quorum-type: auto
cluster.server-quorum-type: server
cluster.data-self-heal-algorithm: full
cluster.locking-scheme: granular
cluster.shd-max-threads: 8
cluster.shd-wait-qlength: 10000
features.shard: on
user.cifs: off
cluster.choose-local: off
client.event-threads: 4
server.event-threads: 4
network.ping-timeout: 30
storage.owner-uid: 36
storage.owner-gid: 36
cluster.granular-entry-heal: enable
Volume Name: engine
Type: Replicate
Volume ID: 7173c827-309f-4e84-a0da-6b2b8eb50264
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 3 = 3
Transport-type: tcp
Bricks:
Brick1: ov-no1.ariadne-t.local:/gluster_bricks/engine/engine
Brick2: ov-no2.ariadne-t.local:/gluster_bricks/engine/engine
Brick3: ov-no3.ariadne-t.local:/gluster_bricks/engine/engine
Options Reconfigured:
performance.client-io-threads: on
nfs.disable: on
transport.address-family: inet
performance.strict-o-direct: on
performance.quick-read: off
performance.read-ahead: off
performance.io-cache: off
performance.low-prio-threads: 32
network.remote-dio: off
cluster.eager-lock: enable
cluster.quorum-type: auto
cluster.server-quorum-type: server
cluster.data-self-heal-algorithm: full
cluster.locking-scheme: granular
cluster.shd-max-threads: 8
cluster.shd-wait-qlength: 10000
features.shard: on
user.cifs: off
cluster.choose-local: off
client.event-threads: 4
server.event-threads: 4
network.ping-timeout: 30
storage.owner-uid: 36
storage.owner-gid: 36
cluster.granular-entry-heal: enable
Volume Name: vmstore
Type: Replicate
Volume ID: 29992fc1-3e09-4360-b651-4449fcd32767
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x (2 + 1) = 3
Transport-type: tcp
Bricks:
Brick1: ov-no1.ariadne-t.local:/gluster_bricks/vmstore/vmstore
Brick2: ov-no2.ariadne-t.local:/gluster_bricks/vmstore/vmstore
Brick3: ov-no3.ariadne-t.local:/gluster_bricks/vmstore/vmstore (arbiter)
Options Reconfigured:
performance.client-io-threads: on
nfs.disable: on
transport.address-family: inet
performance.strict-o-direct: on
performance.quick-read: off
performance.read-ahead: off
performance.io-cache: off
performance.low-prio-threads: 32
network.remote-dio: off
cluster.eager-lock: enable
cluster.quorum-type: auto
cluster.server-quorum-type: server
cluster.data-self-heal-algorithm: full
cluster.locking-scheme: granular
cluster.shd-max-threads: 8
cluster.shd-wait-qlength: 10000
features.shard: on
user.cifs: off
cluster.choose-local: off
client.event-threads: 4
server.event-threads: 4
network.ping-timeout: 30
storage.owner-uid: 36
storage.owner-gid: 36
cluster.granular-entry-heal: enable
[root@ov-no1 ~]# gluster volume heal engine info
Brick ov-no1.ariadne-t.local:/gluster_bricks/engine/engine
Status: Connected
Number of entries: 0
Brick ov-no2.ariadne-t.local:/gluster_bricks/engine/engine
/80f6e393-9718-4738-a14a-64cf43c3d8c2/images/d5de54b6-9f8e-4fba-819b-ebf6780757d2/a48555f4-be23-4467-8a54-400ae7baf9d7
Status: Connected
Number of entries: 1
Brick ov-no3.ariadne-t.local:/gluster_bricks/engine/engine
/80f6e393-9718-4738-a14a-64cf43c3d8c2/images/d5de54b6-9f8e-4fba-819b-ebf6780757d2/a48555f4-be23-4467-8a54-400ae7baf9d7
Status: Connected
Number of entries: 1
However, checking the contents of the above entry in each host I get the following output
in which it's shown that the file in the third host has a different date (the current
date):
[root@ov-no1 ~]# ls
/rhev/data-center/mnt/glusterSD/ov-no1.ariadne-t.local\:_engine/80f6e393-9718-4738-a14a-64cf43c3d8c2/images/d5de54b6-9f8e-4fba-819b-ebf6780757d2/
total 4.6G
drwxr-xr-x. 2 vdsm kvm 149 Sep 11 2019 .
drwxr-xr-x. 8 vdsm kvm 8.0K Sep 11 2019 ..
-rw-rw----. 1 vdsm kvm 100G Dec 30 13:20 a48555f4-be23-4467-8a54-400ae7baf9d7
-rw-rw----. 1 vdsm kvm 1.0M Feb 24 20:50 a48555f4-be23-4467-8a54-400ae7baf9d7.lease
-rw-r--r--. 1 vdsm kvm 321 Sep 11 2019 a48555f4-be23-4467-8a54-400ae7baf9d7.meta
[root@ov-no2 ~]# ls
/rhev/data-center/mnt/glusterSD/ov-no1.ariadne-t.local\:_engine/80f6e393-9718-4738-a14a-64cf43c3d8c2/images/d5de54b6-9f8e-4fba-819b-ebf6780757d2/
total 4.6G
drwxr-xr-x. 2 vdsm kvm 149 Sep 11 2019 .
drwxr-xr-x. 8 vdsm kvm 8.0K Sep 11 2019 ..
-rw-rw----. 1 vdsm kvm 100G Dec 30 13:20 a48555f4-be23-4467-8a54-400ae7baf9d7
-rw-rw----. 1 vdsm kvm 1.0M Feb 24 20:50 a48555f4-be23-4467-8a54-400ae7baf9d7.lease
-rw-r--r--. 1 vdsm kvm 321 Sep 11 2019 a48555f4-be23-4467-8a54-400ae7baf9d7.meta
[root@ov-no3 ~]# ls
/rhev/data-center/mnt/glusterSD/ov-no1.ariadne-t.local\:_engine/80f6e393-9718-4738-a14a-64cf43c3d8c2/images/d5de54b6-9f8e-4fba-819b-ebf6780757d2/
total 4.6G
drwxr-xr-x. 2 vdsm kvm 149 Sep 11 2019 .
drwxr-xr-x. 8 vdsm kvm 8.0K Sep 11 2019 ..
-rw-rw----. 1 vdsm kvm 100G Mar 3 18:13 a48555f4-be23-4467-8a54-400ae7baf9d7
-rw-rw----. 1 vdsm kvm 1.0M Feb 24 20:50 a48555f4-be23-4467-8a54-400ae7baf9d7.lease
-rw-r--r--. 1 vdsm kvm 321 Sep 11 2019 a48555f4-be23-4467-8a54-400ae7baf9d7.meta
Also, the stat command on each host gives the following:
[root@ov-no1 ~]# stat
/rhev/data-center/mnt/glusterSD/ov-no1.ariadne-t.local\:_engine/80f6e393-9718-4738-a14a-64cf43c3d8c2/images/d5de54b6-9f8e-4fba-819b-ebf6780757d2/a48555f4-be23-4467-8a54-400ae7baf9d7
File:
‘/rhev/data-center/mnt/glusterSD/ov-no1.ariadne-t.local:_engine/80f6e393-9718-4738-a14a-64cf43c3d8c2/images/d5de54b6-9f8e-4fba-819b-ebf6780757d2/a48555f4-be23-4467-8a54-400ae7baf9d7’
Size: 107374182400 Blocks: 9569291 IO Block: 131072 regular file
Device: 29h/41d Inode: 10220711633933694927 Links: 1
Access: (0660/-rw-rw----) Uid: ( 36/ vdsm) Gid: ( 36/ kvm)
Context: system_u:object_r:fusefs_t:s0
Access: 2019-09-11 19:08:58.012200046 +0300
Modify: 2020-12-30 13:20:39.794315096 +0200
Change: 2020-12-30 13:20:39.794315096 +0200
Birth: -
[root@ov-no2 ~]# stat
/rhev/data-center/mnt/glusterSD/ov-no1.ariadne-t.local\:_engine/80f6e393-9718-4738-a14a-64cf43c3d8c2/images/d5de54b6-9f8e-4fba-819b-ebf6780757d2/a48555f4-be23-4467-8a54-400ae7baf9d7
File:
‘/rhev/data-center/mnt/glusterSD/ov-no1.ariadne-t.local:_engine/80f6e393-9718-4738-a14a-64cf43c3d8c2/images/d5de54b6-9f8e-4fba-819b-ebf6780757d2/a48555f4-be23-4467-8a54-400ae7baf9d7’
Size: 107374182400 Blocks: 9569291 IO Block: 131072 regular file
Device: 29h/41d Inode: 10220711633933694927 Links: 1
Access: (0660/-rw-rw----) Uid: ( 36/ vdsm) Gid: ( 36/ kvm)
Context: system_u:object_r:fusefs_t:s0
Access: 2019-09-11 19:08:58.012200046 +0300
Modify: 2020-12-30 13:20:39.794315096 +0200
Change: 2020-12-30 13:20:39.794315096 +0200
Birth: -
[root@ov-no3 ~]# stat
/rhev/data-center/mnt/glusterSD/ov-no1.ariadne-t.local\:_engine/80f6e393-9718-4738-a14a-64cf43c3d8c2/images/d5de54b6-9f8e-4fba-819b-ebf6780757d2/a48555f4-be23-4467-8a54-400ae7baf9d7
File:
‘/rhev/data-center/mnt/glusterSD/ov-no1.ariadne-t.local:_engine/80f6e393-9718-4738-a14a-64cf43c3d8c2/images/d5de54b6-9f8e-4fba-819b-ebf6780757d2/a48555f4-be23-4467-8a54-400ae7baf9d7’
Size: 107374182400 Blocks: 9569291 IO Block: 131072 regular file
Device: 29h/41d Inode: 10220711633933694927 Links: 1
Access: (0660/-rw-rw----) Uid: ( 36/ vdsm) Gid: ( 36/ kvm)
Context: system_u:object_r:fusefs_t:s0
Access: 2020-10-02 03:02:51.104699119 +0300
Modify: 2021-03-03 18:23:07.122575696 +0200
Change: 2021-03-03 18:23:07.122575696 +0200
Birth: -
Should I use the
gluster volume heal <VOLNAME> split-brain source-brick <HOSTNAME:BRICKNAME>
command to initiate the healing process?