5 月 292020
 

节点server6离线

[root@server6 ~]# init 0

查看卷信息

[root@server1 ~]# gluster volume info

Volume Name: data-volume
Type: Disperse
Volume ID: fd3fdef5-a1c5-41c6-83f7-a9df9e3ccbb3
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x (4 + 2) = 6
Transport-type: tcp
Bricks:
Brick1: server1:/mnt/volume_sfo2_01/brick1
Brick2: server2:/mnt/volume_sfo2_02/brick2
Brick3: server3:/mnt/volume_sfo2_03/brick3
Brick4: server4:/mnt/volume_sfo2_04/brick4
Brick5: server5:/mnt/volume_sfo2_05/brick5
Brick6: server6:/mnt/volume_sfo2_06/brick6
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
[root@server1 ~]#

查看卷状态信息显示server6及相关块已不在列表中

[root@server1 ~]# gluster volume status
Status of volume: data-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick server1:/mnt/volume_sfo2_01/brick1    49152     0          Y       10380
Brick server2:/mnt/volume_sfo2_02/brick2    49152     0          Y       10273
Brick server3:/mnt/volume_sfo2_03/brick3    49152     0          Y       10269
Brick server4:/mnt/volume_sfo2_04/brick4    49152     0          Y       10276
Brick server5:/mnt/volume_sfo2_05/brick5    49152     0          Y       10274
Self-heal Daemon on localhost               N/A       N/A        Y       10401
Self-heal Daemon on server3                 N/A       N/A        Y       10290
Self-heal Daemon on server5                 N/A       N/A        Y       10295
Self-heal Daemon on server2                 N/A       N/A        Y       10294
Self-heal Daemon on server4                 N/A       N/A        Y       10297
 
Task Status of Volume data-volume
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@server1 ~]#

查看节点状态信息显示server6已断开连接

[root@server1 ~]# gluster peer status
Number of Peers: 5

Hostname: server2
Uuid: d331a6e5-b533-42a6-bd78-b07f33edbb0f
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: c925e178-a154-4e00-b678-a0b9a30187a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: 278a51f2-e399-4182-8f37-9c47e35205d3
State: Peer in Cluster (Connected)

Hostname: server5
Uuid: a0be5978-e05b-46bc-83b7-c34ae212cf21
State: Peer in Cluster (Connected)

Hostname: server6
Uuid: a505fa8b-72f3-47a1-af1c-19ca01c1245a
State: Peer in Cluster (Disconnected)
[root@server1 ~]#

写入文件

[root@server7 ~]# for i in `seq -w 21 40`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
[root@server7 ~]# ls /mnt/
copy-test-01 copy-test-05 copy-test-09 copy-test-13 copy-test-17 copy-test-21 copy-test-25 copy-test-29 copy-test-33 copy-test-37
copy-test-02 copy-test-06 copy-test-10 copy-test-14 copy-test-18 copy-test-22 copy-test-26 copy-test-30 copy-test-34 copy-test-38
copy-test-03 copy-test-07 copy-test-11 copy-test-15 copy-test-19 copy-test-23 copy-test-27 copy-test-31 copy-test-35 copy-test-39
copy-test-04 copy-test-08 copy-test-12 copy-test-16 copy-test-20 copy-test-24 copy-test-28 copy-test-32 copy-test-36 copy-test-40
[root@server7 ~]#

节点server5离线

[root@server5 ~]# init 0

查看节点状态信息

[root@server2 ~]# gluster peer status
Number of Peers: 5

Hostname: server1
Uuid: a97fa9a8-e97f-421e-b92c-07ef39a488cd
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: c925e178-a154-4e00-b678-a0b9a30187a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: 278a51f2-e399-4182-8f37-9c47e35205d3
State: Peer in Cluster (Connected)

Hostname: server5
Uuid: a0be5978-e05b-46bc-83b7-c34ae212cf21
State: Peer in Cluster (Disconnected)

Hostname: server6
Uuid: a505fa8b-72f3-47a1-af1c-19ca01c1245a
State: Peer in Cluster (Disconnected)
[root@server2 ~]#

查看卷状态信息

[root@server2 ~]# gluster volume status
Status of volume: data-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick server1:/mnt/volume_sfo2_01/brick1    49152     0          Y       10380
Brick server2:/mnt/volume_sfo2_02/brick2    49152     0          Y       10273
Brick server3:/mnt/volume_sfo2_03/brick3    49152     0          Y       10269
Brick server4:/mnt/volume_sfo2_04/brick4    49152     0          Y       10276
Self-heal Daemon on localhost               N/A       N/A        Y       10294
Self-heal Daemon on server1                 N/A       N/A        Y       10401
Self-heal Daemon on server3                 N/A       N/A        Y       10290
Self-heal Daemon on server4                 N/A       N/A        Y       10297
 
Task Status of Volume data-volume
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@server2 ~]#

写入文件

[root@server7 ~]# for i in `seq -w 41 60`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
[root@server7 ~]# ls /mnt/
copy-test-01 copy-test-06 copy-test-11 copy-test-16 copy-test-21 copy-test-26 copy-test-31 copy-test-36 copy-test-41 copy-test-46 copy-test-51 copy-test-56
copy-test-02 copy-test-07 copy-test-12 copy-test-17 copy-test-22 copy-test-27 copy-test-32 copy-test-37 copy-test-42 copy-test-47 copy-test-52 copy-test-57
copy-test-03 copy-test-08 copy-test-13 copy-test-18 copy-test-23 copy-test-28 copy-test-33 copy-test-38 copy-test-43 copy-test-48 copy-test-53 copy-test-58
copy-test-04 copy-test-09 copy-test-14 copy-test-19 copy-test-24 copy-test-29 copy-test-34 copy-test-39 copy-test-44 copy-test-49 copy-test-54 copy-test-59
copy-test-05 copy-test-10 copy-test-15 copy-test-20 copy-test-25 copy-test-30 copy-test-35 copy-test-40 copy-test-45 copy-test-50 copy-test-55 copy-test-60
[root@server7 ~]#

节点server4离线

[root@server4 ~]# init 0

查看节点状态信息

[root@server2 ~]# gluster peer status
Number of Peers: 5

Hostname: server1
Uuid: a97fa9a8-e97f-421e-b92c-07ef39a488cd
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: c925e178-a154-4e00-b678-a0b9a30187a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: 278a51f2-e399-4182-8f37-9c47e35205d3
State: Peer in Cluster (Disconnected)

Hostname: server5
Uuid: a0be5978-e05b-46bc-83b7-c34ae212cf21
State: Peer in Cluster (Disconnected)

Hostname: server6
Uuid: a505fa8b-72f3-47a1-af1c-19ca01c1245a
State: Peer in Cluster (Disconnected)
[root@server2 ~]#

查看卷状态信息

[root@server2 ~]# gluster volume status
Status of volume: data-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick server1:/mnt/volume_sfo2_01/brick1    49152     0          Y       10380
Brick server2:/mnt/volume_sfo2_02/brick2    49152     0          Y       10273
Brick server3:/mnt/volume_sfo2_03/brick3    49152     0          Y       10269
Self-heal Daemon on localhost               N/A       N/A        Y       10294
Self-heal Daemon on server3                 N/A       N/A        Y       10290
Self-heal Daemon on server1                 N/A       N/A        Y       10401
 
Task Status of Volume data-volume
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@server2 ~]#

在客户端节点查看挂载目录文件(已不可用)

[root@server7 ~]# ls /mnt 
ls: cannot access /mnt: Transport endpoint is not connected
[root@server7 ~]#
5 月 282020
 

配置为6个100GB存储块(Brick)并容许最多2个块宕机失效的分散卷

206.189.167.54 server1
64.227.54.71 server2
64.227.54.85 server3
206.189.171.152 server4
64.227.54.28 server5
64.227.54.35 server6
206.189.171.164 server7

将节点加入受信存储池(Trusted Pool)

[root@server1 ~]# gluster peer probe server2
peer probe: success. 
[root@server1 ~]# gluster peer probe server3
peer probe: success. 
[root@server1 ~]# gluster peer probe server4
peer probe: success. 
[root@server1 ~]# gluster peer probe server5
peer probe: success. 
[root@server1 ~]# gluster peer probe server6
peer probe: success. 
[root@server1 ~]#

查看节点状态

[root@server1 ~]# gluster peer status
Number of Peers: 5

Hostname: server2
Uuid: d331a6e5-b533-42a6-bd78-b07f33edbb0f
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: c925e178-a154-4e00-b678-a0b9a30187a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: 278a51f2-e399-4182-8f37-9c47e35205d3
State: Peer in Cluster (Connected)

Hostname: server5
Uuid: a0be5978-e05b-46bc-83b7-c34ae212cf21
State: Peer in Cluster (Connected)

Hostname: server6
Uuid: a505fa8b-72f3-47a1-af1c-19ca01c1245a
State: Peer in Cluster (Connected)
[root@server1 ~]#

在节点上创建Brick目录

[root@server1 ~]# mkdir -p /mnt/volume_sfo2_01/brick1
[root@server2 ~]# mkdir -p /mnt/volume_sfo2_02/brick2
[root@server3 ~]# mkdir -p /mnt/volume_sfo2_03/brick3
[root@server4 ~]# mkdir -p /mnt/volume_sfo2_04/brick4
[root@server5 ~]# mkdir -p /mnt/volume_sfo2_05/brick5
[root@server6 ~]# mkdir -p /mnt/volume_sfo2_06/brick6

创建卷时未指定redundancy参数系统将自动计算并提示

[root@server1 ~]# gluster volume create data-volume disperse 6 transport tcp \
> server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
> server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
> server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6
The optimal redundancy for this configuration is 2. Do you want to create the volume with this value ? (y/n) n

Usage:
volume create <NEW-VOLNAME> [stripe <COUNT>] [[replica <COUNT> [arbiter <COUNT>]]|[replica 2 thin-arbiter 1]] [disperse [<COUNT>]] [disperse-data <COUNT>] [redundancy <COUNT>] [transport <tcp|rdma|tcp,rdma>] <NEW-BRICK> <TA-BRICK>... [force]

[root@server1 ~]#

创建卷时指定disperse参数和redundancy参数

gluster volume create data-volume disperse 6 redundancy 2 transport tcp \
server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6

[root@server1 ~]# gluster volume create data-volume disperse 6 redundancy 2 transport tcp \
> server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
> server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
> server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6
volume create: data-volume: success: please start the volume to access data
[root@server1 ~]#

启动卷并查看卷信息和状态信息

[root@server1 ~]# gluster volume start data-volume
volume start: data-volume: success
[root@server1 ~]#

[root@server1 ~]# gluster volume info

Volume Name: data-volume
Type: Disperse
Volume ID: fd3fdef5-a1c5-41c6-83f7-a9df9e3ccbb3
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x (4 + 2) = 6
Transport-type: tcp
Bricks:
Brick1: server1:/mnt/volume_sfo2_01/brick1
Brick2: server2:/mnt/volume_sfo2_02/brick2
Brick3: server3:/mnt/volume_sfo2_03/brick3
Brick4: server4:/mnt/volume_sfo2_04/brick4
Brick5: server5:/mnt/volume_sfo2_05/brick5
Brick6: server6:/mnt/volume_sfo2_06/brick6
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
[root@server1 ~]#

客户端安装GlusterFS必要组件

[root@server7 ~]# yum -y install centos-release-gluster
[root@server7 ~]# yum -y install glusterfs glusterfs-fuse glusterfs-rdma

挂载data-volume卷并查看磁盘信息(实际可用存储400GB)

[root@server7 ~]# mount -t glusterfs server6:/data-volume /mnt/
[root@server7 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 60G 1020M 59G 2% /
devtmpfs 897M 0 897M 0% /dev
tmpfs 920M 0 920M 0% /dev/shm
tmpfs 920M 17M 903M 2% /run
tmpfs 920M 0 920M 0% /sys/fs/cgroup
tmpfs 184M 0 184M 0% /run/user/0
server6:/data-volume 400G 4.2G 396G 2% /mnt
[root@server7 ~]#

客户端随机写入文件

[root@server7 ~]# for i in `seq -w 1 20`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
[root@server7 ~]# ls /mnt/
copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19
copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20
[root@server7 ~]#

在服务端节点查看随机写入文件的分布

[root@server1 ~]# ls /mnt/volume_sfo2_01/brick1/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server1 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_01
tmpfs           184M     0  184M   0% /run/user/0
[root@server1 ~]# 

[root@server2 ~]# ls /mnt/volume_sfo2_02/brick2/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server2 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_02
tmpfs           184M     0  184M   0% /run/user/0
[root@server2 ~]# 

[root@server3 ~]# ls /mnt/volume_sfo2_03/brick3/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server3 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_03
tmpfs           184M     0  184M   0% /run/user/0
[root@server3 ~]# 

[root@server4 ~]# ls /mnt/volume_sfo2_04/brick4/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server4 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_04
tmpfs           184M     0  184M   0% /run/user/0
[root@server4 ~]# 

[root@server5 ~]# ls /mnt/volume_sfo2_05/brick5/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server5 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  975M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_05
tmpfs           184M     0  184M   0% /run/user/0
[root@server5 ~]#  

[root@server6 ~]# ls /mnt/volume_sfo2_06/brick6/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server6 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_06
tmpfs           184M     0  184M   0% /run/user/0
[root@server6 ~]#
5 月 272020
 

服务端

DigitalOcean/2Core/2G/60G+100G
165.227.27.221 server1
159.89.152.41 server2
159.89.151.236 server3
167.172.118.183 server4
167.172.126.43 server5
64.225.47.139 server6

客户端

DigitalOcean/2Core/2G/60G
64.225.47.123 server7

查看可用磁盘信息

[root@server1 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  901M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_01
tmpfs           184M     0  184M   0% /run/user/0
[root@server1 ~]# 

[root@server2 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  901M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_02
tmpfs           184M     0  184M   0% /run/user/0
[root@server2 ~]# 

[root@server3 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  901M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_03
tmpfs           184M     0  184M   0% /run/user/0
[root@server3 ~]# 

[root@server4 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  901M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_04
tmpfs           184M     0  184M   0% /run/user/0
[root@server4 ~]# 

[root@server5 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_05
tmpfs           184M     0  184M   0% /run/user/0
[root@server5 ~]# 

[root@server6 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  973M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_06
tmpfs           184M     0  184M   0% /run/user/0
[root@server6 ~]#

在服务端节点安装并启动GlusterFS服务

sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config;
yum -y install centos-release-gluster;
yum -y install glusterfs-server;
systemctl enable glusterfsd;
systemctl start glusterfsd;

将节点加入受信存储池(Trusted Pool)

在受信存储池建立,节点间彼此建立通信连接后,只有受信成员节点可以将新节点加入池,新节点不可以直接操作已加入受信存储池的节点。

[root@server1 ~]# gluster peer probe server2
peer probe: success. 
[root@server1 ~]# gluster peer probe server3
peer probe: success. 
[root@server1 ~]# gluster peer probe server4
peer probe: success. 
[root@server1 ~]# gluster peer probe server5
peer probe: success. 
[root@server1 ~]# gluster peer probe server6
peer probe: success. 
[root@server1 ~]# gluster peer status
Number of Peers: 5

Hostname: server2
Uuid: 6231013f-07cc-4701-93b3-34d4c623a890
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: aa808d87-4e7c-4ecd-bcf0-13ea03f844a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: d153d847-ad46-4c85-8336-f8e553d5aab6
State: Peer in Cluster (Connected)

Hostname: server5
Uuid: a90c2969-67eb-4792-b5ce-6b4b3d782675
State: Peer in Cluster (Connected)

Hostname: server6
Uuid: 3ed5adc9-d3f7-40eb-8bbd-45f0882f55cd
State: Peer in Cluster (Connected)
[root@server1 ~]#

在节点上创建Brick目录

[root@server1 ~]# mkdir -p /mnt/volume_sfo2_01/brick1
[root@server2 ~]# mkdir -p /mnt/volume_sfo2_02/brick2
[root@server3 ~]# mkdir -p /mnt/volume_sfo2_03/brick3
[root@server4 ~]# mkdir -p /mnt/volume_sfo2_04/brick4
[root@server5 ~]# mkdir -p /mnt/volume_sfo2_05/brick5
[root@server6 ~]# mkdir -p /mnt/volume_sfo2_06/brick6

创建6节点3副本分布式副本卷

gluster volume create data-volume replica 3 transport tcp \
server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6

[root@server1 ~]# gluster volume create data-volume replica 3 transport tcp \
> server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
> server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
> server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6
volume create: data-volume: success: please start the volume to access data
[root@server1 ~]#

查看卷信息

[root@server1 ~]# gluster volume info

Volume Name: data-volume
Type: Distributed-Replicate
Volume ID: 2a2103ab-17e4-47b5-9d4c-96e460ac419c
Status: Created
Snapshot Count: 0
Number of Bricks: 2 x 3 = 6
Transport-type: tcp
Bricks:
Brick1: server1:/mnt/volume_sfo2_01/brick1
Brick2: server2:/mnt/volume_sfo2_02/brick2
Brick3: server3:/mnt/volume_sfo2_03/brick3
Brick4: server4:/mnt/volume_sfo2_04/brick4
Brick5: server5:/mnt/volume_sfo2_05/brick5
Brick6: server6:/mnt/volume_sfo2_06/brick6
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: off
[root@server1 ~]# 

启动卷并查看卷信息和状态信息

[root@server1 ~]# gluster volume start data-volume
volume start: data-volume: success
[root@server1 ~]# gluster volume info
 
Volume Name: data-volume
Type: Distributed-Replicate
Volume ID: 2a2103ab-17e4-47b5-9d4c-96e460ac419c
Status: Started
Snapshot Count: 0
Number of Bricks: 2 x 3 = 6
Transport-type: tcp
Bricks:
Brick1: server1:/mnt/volume_sfo2_01/brick1
Brick2: server2:/mnt/volume_sfo2_02/brick2
Brick3: server3:/mnt/volume_sfo2_03/brick3
Brick4: server4:/mnt/volume_sfo2_04/brick4
Brick5: server5:/mnt/volume_sfo2_05/brick5
Brick6: server6:/mnt/volume_sfo2_06/brick6
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: off
[root@server1 ~]# gluster volume status
Status of volume: data-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick server1:/mnt/volume_sfo2_01/brick1    49152     0          Y       9805 
Brick server2:/mnt/volume_sfo2_02/brick2    49152     0          Y       9843 
Brick server3:/mnt/volume_sfo2_03/brick3    49152     0          Y       9690 
Brick server4:/mnt/volume_sfo2_04/brick4    49152     0          Y       9734 
Brick server5:/mnt/volume_sfo2_05/brick5    49152     0          Y       10285
Brick server6:/mnt/volume_sfo2_06/brick6    49152     0          Y       10470
Self-heal Daemon on localhost               N/A       N/A        Y       9826 
Self-heal Daemon on server5                 N/A       N/A        Y       10306
Self-heal Daemon on server2                 N/A       N/A        Y       9864 
Self-heal Daemon on server6                 N/A       N/A        Y       10491
Self-heal Daemon on server3                 N/A       N/A        Y       9711 
Self-heal Daemon on server4                 N/A       N/A        Y       9755 
 
Task Status of Volume data-volume
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@server1 ~]#

客户端安装GlusterFS必要组件

[root@server7 ~]# yum -y install centos-release-gluster
[root@server7 ~]# yum -y install glusterfs glusterfs-fuse glusterfs-rdma

挂载data-volume卷并查看磁盘信息(实际可用存储200GB)

[root@server7 ~]# mount -t glusterfs server6:/data-volume /mnt/
[root@server7 ~]# df -h
Filesystem            Size  Used Avail Use% Mounted on
/dev/vda1              60G 1003M   60G   2% /
devtmpfs              897M     0  897M   0% /dev
tmpfs                 920M     0  920M   0% /dev/shm
tmpfs                 920M   17M  903M   2% /run
tmpfs                 920M     0  920M   0% /sys/fs/cgroup
tmpfs                 184M     0  184M   0% /run/user/0
server6:/data-volume  200G  2.1G  198G   2% /mnt
[root@server7 ~]# 

[root@server7 ~]# mount |grep server6
server6:/data-volume on /mnt type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072)
[root@server7 ~]#

查看与服务端节点间通信状态

客户端随机写入文件

[root@server7 ~]# for i in `seq -w 1 20`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
[root@server7 ~]# ls /mnt/
copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19
copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20
[root@server7 ~]#

在服务端节点查看随机写入文件的分布

[root@server1 ~]# ls /mnt/volume_sfo2_01/brick1/
copy-test-04 copy-test-05 copy-test-09 copy-test-15 copy-test-17 copy-test-18 copy-test-20
[root@server1 ~]#

[root@server2 ~]# ls /mnt/volume_sfo2_02/brick2/
copy-test-04 copy-test-05 copy-test-09 copy-test-15 copy-test-17 copy-test-18 copy-test-20
[root@server2 ~]#

[root@server3 ~]# ls /mnt/volume_sfo2_03/brick3/
copy-test-04 copy-test-05 copy-test-09 copy-test-15 copy-test-17 copy-test-18 copy-test-20
[root@server3 ~]#

[root@server4 ~]# ls /mnt/volume_sfo2_04/brick4/
copy-test-01 copy-test-03 copy-test-07 copy-test-10 copy-test-12 copy-test-14 copy-test-19
copy-test-02 copy-test-06 copy-test-08 copy-test-11 copy-test-13 copy-test-16
[root@server4 ~]#

[root@server5 ~]# ls /mnt/volume_sfo2_05/brick5/
copy-test-01 copy-test-03 copy-test-07 copy-test-10 copy-test-12 copy-test-14 copy-test-19
copy-test-02 copy-test-06 copy-test-08 copy-test-11 copy-test-13 copy-test-16
[root@server5 ~]#

[root@server6 ~]# ls /mnt/volume_sfo2_06/brick6/
copy-test-01 copy-test-03 copy-test-07 copy-test-10 copy-test-12 copy-test-14 copy-test-19
copy-test-02 copy-test-06 copy-test-08 copy-test-11 copy-test-13 copy-test-16
[root@server6 ~]#
5 月 262020
 

分布式卷将文件随机分布在存储卷中的各个块(Bricks)中。分布式卷具有良好的扩展性,但不具备数据冗余能力,该能力需借助服务器软硬件实现。

创建分布式卷命令格式如下:

# gluster volume create [transport tcp | rdma | tcp,rdma]

副本卷通过存储卷中的多个块(Bricks)建立文件的副本。在创建副本卷时,块数量应当等于副本数量,为防止服务器及磁盘故障,每个块都应当分布在独立的服务器上。副本卷提供数据的高可用性和高可靠性。

创建副本卷命令格式如下:

# gluster volume create [replica ] [transport tcp | rdma | tcp,rdma]

分布式副本卷是分布式卷和副本卷的集合,在创建分布式副本卷时,块(Bricks)数量最小应当为指定副本数量的整数倍。在未使用force参数之前,GlusterFS默认副本卷在一个服务器节点上仅允许建立一个块(Bricks)。分布式副本卷可以提高文件读取性能。

创建分布式副本卷命令格式如下:

# gluster volume create [replica ] [transport tcp | rdma | tcp,rdma]

分散卷基于纠错码,将文件编码后条带化分散存储在卷的多个块中,并提供一定冗余性。分散卷可以提高磁盘存储利用率,但性能有所下降。分散卷中的冗余值表示允许多少块失效而不中断对卷的读写操作。

分散卷中的冗余值必须大于0,总块数应当大于2倍的冗余值,也就意味着分散卷至少要由3个块组成。在创建分散卷时如果未指定冗余值,系统将自动计算该值并提示。

分散卷可用存储空间计算公式如下:

<Usable size> = <Brick size> * (#Bricks - Redundancy)

创建分散卷命令格式如下:

# gluster volume create [disperse [<count>]] [redundancy <count>] [transport tcp | rdma | tcp,rdma]

分布式分散卷等效于分布式副本卷,区别在于分布式分散卷通过分散卷将数据存储在块中。

1 月 292015
 

https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/
https://wiki.centos.org/SpecialInterestGroup/Storage/gluster-Quickstart

可扩展的分布式文件系统,实现在唯一全局命名空间中聚合多个服务器节点的磁盘资源。

分布式文件系统节点

glusterfs-01 138.197.217.220 10.138.18.152
glusterfs-02 157.245.169.92 10.138.146.225
glusterfs-03 165.227.21.222 10.138.178.108

在所有节点配置hosts文件

[root@glusterfs-01 ~]# vi /etc/hosts
10.138.18.152 glusterfs-01
10.138.146.225 glusterfs-02
10.138.178.108 glusterfs-03

查看当前可用磁盘和分区信息

[root@glusterfs-01 ~]# fdisk -l

Disk /dev/vda: 64.4 GB, 64424509440 bytes, 125829120 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000b6061

   Device Boot      Start         End      Blocks   Id  System
/dev/vda1   *        2048   125829086    62913519+  83  Linux

Disk /dev/vdb: 0 MB, 466944 bytes, 912 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes


Disk /dev/sda: 107.4 GB, 107374182400 bytes, 209715200 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

[root@glusterfs-01 ~]#

创建分区

[root@glusterfs-01 ~]# fdisk /dev/sda
Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Device does not contain a recognized partition table
Building a new DOS disklabel with disk identifier 0x99c4ee31.

Command (m for help): n
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p): e
Partition number (1-4, default 1): 
First sector (2048-209715199, default 2048): 
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-209715199, default 209715199): 
Using default value 209715199
Partition 1 of type Extended and of size 100 GiB is set

Command (m for help): n
Partition type:
   p   primary (0 primary, 1 extended, 3 free)
   l   logical (numbered from 5)
Select (default p): l
Adding logical partition 5
First sector (4096-209715199, default 4096): 
Using default value 4096
Last sector, +sectors or +size{K,M,G} (4096-209715199, default 209715199): 
Using default value 209715199
Partition 5 of type Linux and of size 100 GiB is set

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.
[root@glusterfs-01 ~]#

查看当前可用磁盘和分区信息

[root@glusterfs-01 ~]# fdisk -l

Disk /dev/vda: 64.4 GB, 64424509440 bytes, 125829120 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000b6061

   Device Boot      Start         End      Blocks   Id  System
/dev/vda1   *        2048   125829086    62913519+  83  Linux

Disk /dev/vdb: 0 MB, 466944 bytes, 912 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes


Disk /dev/sda: 107.4 GB, 107374182400 bytes, 209715200 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0xbb370b51

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1            2048   209715199   104856576    5  Extended
/dev/sda5            4096   209715199   104855552   83  Linux
[root@glusterfs-01 ~]#

在所有节点格式化并挂载数据盘

# mkfs.xfs -i size=512 /dev/sda5
# mkdir -p /data/brick1
# echo '/dev/sda5 /data/brick1 xfs defaults 1 2' >> /etc/fstab
# mount -a && mount

[root@glusterfs-01 ~]# mkfs.xfs -i size=512 /dev/sda5
meta-data=/dev/sda5              isize=512    agcount=4, agsize=6553472 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=26213888, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=12799, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@glusterfs-01 ~]# mkdir -p /data/brick1
[root@glusterfs-01 ~]# echo '/dev/sda5 /data/brick1 xfs defaults 1 2' >> /etc/fstab
[root@glusterfs-01 ~]# mount -a && mount
sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime,seclabel)
proc on /proc type proc (rw,nosuid,nodev,noexec,relatime)
devtmpfs on /dev type devtmpfs (rw,nosuid,seclabel,size=917804k,nr_inodes=229451,mode=755)
securityfs on /sys/kernel/security type securityfs (rw,nosuid,nodev,noexec,relatime)
tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev,seclabel)
devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,seclabel,gid=5,mode=620,ptmxmode=000)
tmpfs on /run type tmpfs (rw,nosuid,nodev,seclabel,mode=755)
tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,seclabel,mode=755)
cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd)
pstore on /sys/fs/pstore type pstore (rw,nosuid,nodev,noexec,relatime)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,hugetlb)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,freezer)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,cpuset)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,perf_event)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,pids)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,net_prio,net_cls)
cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,cpuacct,cpu)
cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,memory)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,blkio)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,devices)
configfs on /sys/kernel/config type configfs (rw,relatime)
/dev/vda1 on / type xfs (rw,relatime,seclabel,attr2,inode64,noquota)
rpc_pipefs on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw,relatime)
selinuxfs on /sys/fs/selinux type selinuxfs (rw,relatime)
hugetlbfs on /dev/hugepages type hugetlbfs (rw,relatime,seclabel)
mqueue on /dev/mqueue type mqueue (rw,relatime,seclabel)
debugfs on /sys/kernel/debug type debugfs (rw,relatime)
systemd-1 on /proc/sys/fs/binfmt_misc type autofs (rw,relatime,fd=32,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=13335)
tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,seclabel,size=188220k,mode=700)
/dev/sda5 on /data/brick1 type xfs (rw,relatime,seclabel,attr2,inode64,noquota)
[root@glusterfs-01 ~]#

在所有节点安装GlusterFS软件

[root@glusterfs-01 ~]# yum -y install centos-release-gluster
[root@glusterfs-01 ~]# yum -y install glusterfs-server

[root@glusterfs-02 ~]# yum -y install centos-release-gluster
[root@glusterfs-02 ~]# yum -y install glusterfs-server

[root@glusterfs-03 ~]# yum -y install centos-release-gluster
[root@glusterfs-03 ~]# yum -y install glusterfs-server

在所有节点注册并启动glusterfsd系统服务

[root@glusterfs-01 ~]# systemctl enable glusterfsd
Created symlink from /etc/systemd/system/multi-user.target.wants/glusterfsd.service to /usr/lib/systemd/system/glusterfsd.service.
[root@glusterfs-01 ~]# systemctl start glusterfsd
[root@glusterfs-01 ~]# systemctl status glusterfsd
● glusterfsd.service - GlusterFS brick processes (stopping only)
   Loaded: loaded (/usr/lib/systemd/system/glusterfsd.service; enabled; vendor preset: disabled)
   Active: active (exited) since Tue 2020-05-26 07:28:17 UTC; 8s ago
  Process: 10737 ExecStart=/bin/true (code=exited, status=0/SUCCESS)
 Main PID: 10737 (code=exited, status=0/SUCCESS)

May 26 07:28:17 glusterfs-01 systemd[1]: Starting GlusterFS brick processes (stopping only)...
May 26 07:28:17 glusterfs-01 systemd[1]: Started GlusterFS brick processes (stopping only).
[root@glusterfs-01 ~]# 

[root@glusterfs-02 ~]# systemctl enable glusterfsd
Created symlink from /etc/systemd/system/multi-user.target.wants/glusterfsd.service to /usr/lib/systemd/system/glusterfsd.service.
[root@glusterfs-02 ~]# systemctl start glusterfsd
[root@glusterfs-02 ~]# systemctl status glusterfsd
● glusterfsd.service - GlusterFS brick processes (stopping only)
   Loaded: loaded (/usr/lib/systemd/system/glusterfsd.service; enabled; vendor preset: disabled)
   Active: active (exited) since Tue 2020-05-26 07:29:21 UTC; 11s ago
  Process: 18817 ExecStart=/bin/true (code=exited, status=0/SUCCESS)
 Main PID: 18817 (code=exited, status=0/SUCCESS)

May 26 07:29:20 glusterfs-02 systemd[1]: Starting GlusterFS brick processes (stopping only)...
May 26 07:29:21 glusterfs-02 systemd[1]: Started GlusterFS brick processes (stopping only).
[root@glusterfs-02 ~]# 

[root@glusterfs-03 ~]# systemctl enable glusterfsd
Created symlink from /etc/systemd/system/multi-user.target.wants/glusterfsd.service to /usr/lib/systemd/system/glusterfsd.service.
[root@glusterfs-03 ~]# systemctl start glusterfsd
[root@glusterfs-03 ~]# systemctl status glusterfsd
● glusterfsd.service - GlusterFS brick processes (stopping only)
   Loaded: loaded (/usr/lib/systemd/system/glusterfsd.service; enabled; vendor preset: disabled)
   Active: active (exited) since Tue 2020-05-26 07:30:27 UTC; 7s ago
  Process: 18444 ExecStart=/bin/true (code=exited, status=0/SUCCESS)
 Main PID: 18444 (code=exited, status=0/SUCCESS)

May 26 07:30:27 glusterfs-03 systemd[1]: Starting GlusterFS brick processes (stopping only)...
May 26 07:30:27 glusterfs-03 systemd[1]: Started GlusterFS brick processes (stopping only).
[root@glusterfs-03 ~]#

查看端口监听

[root@glusterfs-01 ~]# netstat -lntuop
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name     Timer
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      1195/master          off (0.00/0/0)
tcp        0      0 0.0.0.0:24007           0.0.0.0:*               LISTEN      1047/glusterd        off (0.00/0/0)
tcp        0      0 0.0.0.0:111             0.0.0.0:*               LISTEN      1/systemd            off (0.00/0/0)
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      1247/sshd            off (0.00/0/0)
tcp6       0      0 ::1:25                  :::*                    LISTEN      1195/master          off (0.00/0/0)
tcp6       0      0 :::111                  :::*                    LISTEN      1/systemd            off (0.00/0/0)
tcp6       0      0 :::22                   :::*                    LISTEN      1247/sshd            off (0.00/0/0)
udp        0      0 0.0.0.0:111             0.0.0.0:*                           1/systemd            off (0.00/0/0)
udp        0      0 127.0.0.1:323           0.0.0.0:*                           647/chronyd          off (0.00/0/0)
udp        0      0 0.0.0.0:802             0.0.0.0:*                           629/rpcbind          off (0.00/0/0)
udp6       0      0 :::111                  :::*                                1/systemd            off (0.00/0/0)
udp6       0      0 ::1:323                 :::*                                647/chronyd          off (0.00/0/0)
udp6       0      0 :::802                  :::*                                629/rpcbind          off (0.00/0/0)
[root@glusterfs-01 ~]#

查看版本信息

[root@glusterfs-01 ~]# glusterfs -V
glusterfs 7.5
Repository revision: git://git.gluster.org/glusterfs.git
Copyright (c) 2006-2016 Red Hat, Inc. <https://www.gluster.org/>
GlusterFS comes with ABSOLUTELY NO WARRANTY.
It is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3
or later), or the GNU General Public License, version 2 (GPLv2),
in all cases as published by the Free Software Foundation.
[root@glusterfs-01 ~]#

将节点加入受信存储池

[root@glusterfs-01 ~]# gluster peer probe glusterfs-02
peer probe: success. 
[root@glusterfs-01 ~]# gluster peer probe glusterfs-03
peer probe: success. 
[root@glusterfs-01 ~]#

查看节点状态

[root@glusterfs-01 ~]# gluster peer status
Number of Peers: 2

Hostname: glusterfs-02
Uuid: 9375a552-1cce-414c-8850-997800dd1f6e
State: Peer in Cluster (Connected)

Hostname: glusterfs-03
Uuid: c490e4ee-03f7-4b83-9456-6cccd101020f
State: Peer in Cluster (Connected)
[root@glusterfs-01 ~]#

[root@glusterfs-02 ~]# gluster peer status
Number of Peers: 2

Hostname: glusterfs-01
Uuid: 605bacf2-abb4-4083-be2b-0d17c843bc68
State: Peer in Cluster (Connected)

Hostname: glusterfs-03
Uuid: c490e4ee-03f7-4b83-9456-6cccd101020f
State: Peer in Cluster (Connected)
[root@glusterfs-02 ~]#

[root@glusterfs-03 ~]# gluster peer status
Number of Peers: 2

Hostname: glusterfs-01
Uuid: 605bacf2-abb4-4083-be2b-0d17c843bc68
State: Peer in Cluster (Connected)

Hostname: glusterfs-02
Uuid: 9375a552-1cce-414c-8850-997800dd1f6e
State: Peer in Cluster (Connected)
[root@glusterfs-03 ~]#

创建一个存储卷(三副本)

[root@glusterfs-01 ~]# mkdir -p /data/brick1/gv0
[root@glusterfs-02 ~]# mkdir -p /data/brick1/gv0
[root@glusterfs-03 ~]# mkdir -p /data/brick1/gv0

[root@glusterfs-01 ~]# gluster volume create gv0 replica 3 glusterfs-01:/data/brick1/gv0 glusterfs-02:/data/brick1/gv0 glusterfs-03:/data/brick1/gv0
volume create: gv0: success: please start the volume to access data
[root@glusterfs-01 ~]#

[root@glusterfs-01 ~]# gluster volume start gv0
volume start: gv0: success
[root@glusterfs-01 ~]#

查看存储卷信息

[root@glusterfs-01 ~]# gluster volume info
 
Volume Name: gv0
Type: Replicate
Volume ID: aaa143ff-c7db-4b12-9d2f-4199c2cf76c9
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 3 = 3
Transport-type: tcp
Bricks:
Brick1: glusterfs-01:/data/brick1/gv0
Brick2: glusterfs-02:/data/brick1/gv0
Brick3: glusterfs-03:/data/brick1/gv0
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: off
[root@glusterfs-01 ~]#

查看卷状态信息

[root@glusterfs-01 ~]# gluster volume status
Status of volume: gv0
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick glusterfs-01:/data/brick1/gv0         49152     0          Y       1580 
Brick glusterfs-02:/data/brick1/gv0         49152     0          Y       10275
Brick glusterfs-03:/data/brick1/gv0         49152     0          Y       10248
Self-heal Daemon on localhost               N/A       N/A        Y       1601 
Self-heal Daemon on glusterfs-03            N/A       N/A        Y       10269
Self-heal Daemon on glusterfs-02            N/A       N/A        Y       10296
 
Task Status of Volume gv0
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@glusterfs-01 ~]#

通过任意节点挂载已创建的三副本文件系统

在GlusterFS集群文件系统中,执行挂载命令时指定的服务器,仅用于获取卷的配置信息。随后客户端将直接与卷配置文件中的服务器进行通信(甚至不包括用于挂载的服务器)。

[root@glusterfs-01 ~]# mount -t glusterfs glusterfs-03:/gv0 /mnt
[root@glusterfs-01 ~]# mount
sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime)
proc on /proc type proc (rw,nosuid,nodev,noexec,relatime)
devtmpfs on /dev type devtmpfs (rw,nosuid,size=917804k,nr_inodes=229451,mode=755)
securityfs on /sys/kernel/security type securityfs (rw,nosuid,nodev,noexec,relatime)
tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev)
devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000)
tmpfs on /run type tmpfs (rw,nosuid,nodev,mode=755)
tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,mode=755)
cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd)
pstore on /sys/fs/pstore type pstore (rw,nosuid,nodev,noexec,relatime)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_prio,net_cls)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpuacct,cpu)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb)
cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory)
configfs on /sys/kernel/config type configfs (rw,relatime)
/dev/vda1 on / type xfs (rw,relatime,attr2,inode64,noquota)
rpc_pipefs on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw,relatime)
systemd-1 on /proc/sys/fs/binfmt_misc type autofs (rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=12616)
hugetlbfs on /dev/hugepages type hugetlbfs (rw,relatime)
mqueue on /dev/mqueue type mqueue (rw,relatime)
debugfs on /sys/kernel/debug type debugfs (rw,relatime)
/dev/sda5 on /data/brick1 type xfs (rw,relatime,attr2,inode64,noquota)
tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,size=188220k,mode=700)
glusterfs-03:/gv0 on /mnt type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072)
[root@glusterfs-01 ~]#

写入20个文件

[root@glusterfs-01 ~]# for i in `seq -w 1 20`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
[root@glusterfs-01 ~]#

确认写入文件数量

[root@glusterfs-01 ~]# ls -lA /mnt/copy* | wc -l
20
[root@glusterfs-01 ~]#

在各个节点的本地挂载点查看写入的文件

[root@glusterfs-01 ~]# ls /data/brick1/gv0/
copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19
copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20
[root@glusterfs-01 ~]#

[root@glusterfs-02 ~]# ls /data/brick1/gv0/
copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19
copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20
[root@glusterfs-02 ~]#

[root@glusterfs-03 ~]# ls /data/brick1/gv0/
copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19
copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20
[root@glusterfs-03 ~]#