Thanks for the reply. Here is my glusterfs options for the volume, am I missing anything critical?
[root@cluster1-vm ~]# gluster volume info storage
Volume Name: storage
Type: Distributed-Disperse
Volume ID: 67112b70-e319-4629-b768-03df9d9a0e84
Status: Started
Snapshot Count: 0
Number of Bricks: 2 x (4 + 2) = 12
Transport-type: tcp
Bricks:
Brick1: node1-vm:/var/glusterfs/storage/1
Brick2: node2-vm:/var/glusterfs/storage/1
Brick3: node3-vm:/var/glusterfs/storage/1
Brick4: node1-vm:/var/glusterfs/storage/2
Brick5: node2-vm:/var/glusterfs/storage/2
Brick6: node3-vm:/var/glusterfs/storage/2
Brick7: node1-vm:/var/glusterfs/storage/3
Brick8: node2-vm:/var/glusterfs/storage/3
Brick9: node3-vm:/var/glusterfs/storage/3
Brick10: node1-vm:/var/glusterfs/storage/4
Brick11: node2-vm:/var/glusterfs/storage/4
Brick12: node3-vm:/var/glusterfs/storage/4
Options Reconfigured:
storage.owner-gid: 36
storage.owner-uid: 36
network.ping-timeout: 5
performance.client-io-threads: on
server.event-threads: 4
client.event-threads: 4
cluster.choose-local: off
user.cifs: off
features.shard: on
cluster.shd-wait-qlength: 10000
cluster.locking-scheme: granular
cluster.data-self-heal-algorithm: full
cluster.server-quorum-type: server
cluster.quorum-type: auto
cluster.eager-lock: enable
performance.strict-o-direct: on
network.remote-dio: disable
performance.low-prio-threads: 32
performance.io-cache: off
performance.read-ahead: off
performance.quick-read: off
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on