5 月 292020
 

节点server6离线

[root@server6 ~]# init 0

查看卷信息

[root@server1 ~]# gluster volume info

Volume Name: data-volume
Type: Disperse
Volume ID: fd3fdef5-a1c5-41c6-83f7-a9df9e3ccbb3
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x (4 + 2) = 6
Transport-type: tcp
Bricks:
Brick1: server1:/mnt/volume_sfo2_01/brick1
Brick2: server2:/mnt/volume_sfo2_02/brick2
Brick3: server3:/mnt/volume_sfo2_03/brick3
Brick4: server4:/mnt/volume_sfo2_04/brick4
Brick5: server5:/mnt/volume_sfo2_05/brick5
Brick6: server6:/mnt/volume_sfo2_06/brick6
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
[root@server1 ~]#

查看卷状态信息显示server6及相关块已不在列表中

[root@server1 ~]# gluster volume status
Status of volume: data-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick server1:/mnt/volume_sfo2_01/brick1    49152     0          Y       10380
Brick server2:/mnt/volume_sfo2_02/brick2    49152     0          Y       10273
Brick server3:/mnt/volume_sfo2_03/brick3    49152     0          Y       10269
Brick server4:/mnt/volume_sfo2_04/brick4    49152     0          Y       10276
Brick server5:/mnt/volume_sfo2_05/brick5    49152     0          Y       10274
Self-heal Daemon on localhost               N/A       N/A        Y       10401
Self-heal Daemon on server3                 N/A       N/A        Y       10290
Self-heal Daemon on server5                 N/A       N/A        Y       10295
Self-heal Daemon on server2                 N/A       N/A        Y       10294
Self-heal Daemon on server4                 N/A       N/A        Y       10297
 
Task Status of Volume data-volume
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@server1 ~]#

查看节点状态信息显示server6已断开连接

[root@server1 ~]# gluster peer status
Number of Peers: 5

Hostname: server2
Uuid: d331a6e5-b533-42a6-bd78-b07f33edbb0f
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: c925e178-a154-4e00-b678-a0b9a30187a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: 278a51f2-e399-4182-8f37-9c47e35205d3
State: Peer in Cluster (Connected)

Hostname: server5
Uuid: a0be5978-e05b-46bc-83b7-c34ae212cf21
State: Peer in Cluster (Connected)

Hostname: server6
Uuid: a505fa8b-72f3-47a1-af1c-19ca01c1245a
State: Peer in Cluster (Disconnected)
[root@server1 ~]#

写入文件

[root@server7 ~]# for i in `seq -w 21 40`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
[root@server7 ~]# ls /mnt/
copy-test-01 copy-test-05 copy-test-09 copy-test-13 copy-test-17 copy-test-21 copy-test-25 copy-test-29 copy-test-33 copy-test-37
copy-test-02 copy-test-06 copy-test-10 copy-test-14 copy-test-18 copy-test-22 copy-test-26 copy-test-30 copy-test-34 copy-test-38
copy-test-03 copy-test-07 copy-test-11 copy-test-15 copy-test-19 copy-test-23 copy-test-27 copy-test-31 copy-test-35 copy-test-39
copy-test-04 copy-test-08 copy-test-12 copy-test-16 copy-test-20 copy-test-24 copy-test-28 copy-test-32 copy-test-36 copy-test-40
[root@server7 ~]#

节点server5离线

[root@server5 ~]# init 0

查看节点状态信息

[root@server2 ~]# gluster peer status
Number of Peers: 5

Hostname: server1
Uuid: a97fa9a8-e97f-421e-b92c-07ef39a488cd
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: c925e178-a154-4e00-b678-a0b9a30187a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: 278a51f2-e399-4182-8f37-9c47e35205d3
State: Peer in Cluster (Connected)

Hostname: server5
Uuid: a0be5978-e05b-46bc-83b7-c34ae212cf21
State: Peer in Cluster (Disconnected)

Hostname: server6
Uuid: a505fa8b-72f3-47a1-af1c-19ca01c1245a
State: Peer in Cluster (Disconnected)
[root@server2 ~]#

查看卷状态信息

[root@server2 ~]# gluster volume status
Status of volume: data-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick server1:/mnt/volume_sfo2_01/brick1    49152     0          Y       10380
Brick server2:/mnt/volume_sfo2_02/brick2    49152     0          Y       10273
Brick server3:/mnt/volume_sfo2_03/brick3    49152     0          Y       10269
Brick server4:/mnt/volume_sfo2_04/brick4    49152     0          Y       10276
Self-heal Daemon on localhost               N/A       N/A        Y       10294
Self-heal Daemon on server1                 N/A       N/A        Y       10401
Self-heal Daemon on server3                 N/A       N/A        Y       10290
Self-heal Daemon on server4                 N/A       N/A        Y       10297
 
Task Status of Volume data-volume
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@server2 ~]#

写入文件

[root@server7 ~]# for i in `seq -w 41 60`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
[root@server7 ~]# ls /mnt/
copy-test-01 copy-test-06 copy-test-11 copy-test-16 copy-test-21 copy-test-26 copy-test-31 copy-test-36 copy-test-41 copy-test-46 copy-test-51 copy-test-56
copy-test-02 copy-test-07 copy-test-12 copy-test-17 copy-test-22 copy-test-27 copy-test-32 copy-test-37 copy-test-42 copy-test-47 copy-test-52 copy-test-57
copy-test-03 copy-test-08 copy-test-13 copy-test-18 copy-test-23 copy-test-28 copy-test-33 copy-test-38 copy-test-43 copy-test-48 copy-test-53 copy-test-58
copy-test-04 copy-test-09 copy-test-14 copy-test-19 copy-test-24 copy-test-29 copy-test-34 copy-test-39 copy-test-44 copy-test-49 copy-test-54 copy-test-59
copy-test-05 copy-test-10 copy-test-15 copy-test-20 copy-test-25 copy-test-30 copy-test-35 copy-test-40 copy-test-45 copy-test-50 copy-test-55 copy-test-60
[root@server7 ~]#

节点server4离线

[root@server4 ~]# init 0

查看节点状态信息

[root@server2 ~]# gluster peer status
Number of Peers: 5

Hostname: server1
Uuid: a97fa9a8-e97f-421e-b92c-07ef39a488cd
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: c925e178-a154-4e00-b678-a0b9a30187a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: 278a51f2-e399-4182-8f37-9c47e35205d3
State: Peer in Cluster (Disconnected)

Hostname: server5
Uuid: a0be5978-e05b-46bc-83b7-c34ae212cf21
State: Peer in Cluster (Disconnected)

Hostname: server6
Uuid: a505fa8b-72f3-47a1-af1c-19ca01c1245a
State: Peer in Cluster (Disconnected)
[root@server2 ~]#

查看卷状态信息

[root@server2 ~]# gluster volume status
Status of volume: data-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick server1:/mnt/volume_sfo2_01/brick1    49152     0          Y       10380
Brick server2:/mnt/volume_sfo2_02/brick2    49152     0          Y       10273
Brick server3:/mnt/volume_sfo2_03/brick3    49152     0          Y       10269
Self-heal Daemon on localhost               N/A       N/A        Y       10294
Self-heal Daemon on server3                 N/A       N/A        Y       10290
Self-heal Daemon on server1                 N/A       N/A        Y       10401
 
Task Status of Volume data-volume
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@server2 ~]#

在客户端节点查看挂载目录文件(已不可用)

[root@server7 ~]# ls /mnt 
ls: cannot access /mnt: Transport endpoint is not connected
[root@server7 ~]#
5 月 282020
 

配置为6个100GB存储块(Brick)并容许最多2个块宕机失效的分散卷

206.189.167.54 server1
64.227.54.71 server2
64.227.54.85 server3
206.189.171.152 server4
64.227.54.28 server5
64.227.54.35 server6
206.189.171.164 server7

将节点加入受信存储池(Trusted Pool)

[root@server1 ~]# gluster peer probe server2
peer probe: success. 
[root@server1 ~]# gluster peer probe server3
peer probe: success. 
[root@server1 ~]# gluster peer probe server4
peer probe: success. 
[root@server1 ~]# gluster peer probe server5
peer probe: success. 
[root@server1 ~]# gluster peer probe server6
peer probe: success. 
[root@server1 ~]#

查看节点状态

[root@server1 ~]# gluster peer status
Number of Peers: 5

Hostname: server2
Uuid: d331a6e5-b533-42a6-bd78-b07f33edbb0f
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: c925e178-a154-4e00-b678-a0b9a30187a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: 278a51f2-e399-4182-8f37-9c47e35205d3
State: Peer in Cluster (Connected)

Hostname: server5
Uuid: a0be5978-e05b-46bc-83b7-c34ae212cf21
State: Peer in Cluster (Connected)

Hostname: server6
Uuid: a505fa8b-72f3-47a1-af1c-19ca01c1245a
State: Peer in Cluster (Connected)
[root@server1 ~]#

在节点上创建Brick目录

[root@server1 ~]# mkdir -p /mnt/volume_sfo2_01/brick1
[root@server2 ~]# mkdir -p /mnt/volume_sfo2_02/brick2
[root@server3 ~]# mkdir -p /mnt/volume_sfo2_03/brick3
[root@server4 ~]# mkdir -p /mnt/volume_sfo2_04/brick4
[root@server5 ~]# mkdir -p /mnt/volume_sfo2_05/brick5
[root@server6 ~]# mkdir -p /mnt/volume_sfo2_06/brick6

创建卷时未指定redundancy参数系统将自动计算并提示

[root@server1 ~]# gluster volume create data-volume disperse 6 transport tcp \
> server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
> server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
> server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6
The optimal redundancy for this configuration is 2. Do you want to create the volume with this value ? (y/n) n

Usage:
volume create <NEW-VOLNAME> [stripe <COUNT>] [[replica <COUNT> [arbiter <COUNT>]]|[replica 2 thin-arbiter 1]] [disperse [<COUNT>]] [disperse-data <COUNT>] [redundancy <COUNT>] [transport <tcp|rdma|tcp,rdma>] <NEW-BRICK> <TA-BRICK>... [force]

[root@server1 ~]#

创建卷时指定disperse参数和redundancy参数

gluster volume create data-volume disperse 6 redundancy 2 transport tcp \
server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6

[root@server1 ~]# gluster volume create data-volume disperse 6 redundancy 2 transport tcp \
> server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
> server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
> server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6
volume create: data-volume: success: please start the volume to access data
[root@server1 ~]#

启动卷并查看卷信息和状态信息

[root@server1 ~]# gluster volume start data-volume
volume start: data-volume: success
[root@server1 ~]#

[root@server1 ~]# gluster volume info

Volume Name: data-volume
Type: Disperse
Volume ID: fd3fdef5-a1c5-41c6-83f7-a9df9e3ccbb3
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x (4 + 2) = 6
Transport-type: tcp
Bricks:
Brick1: server1:/mnt/volume_sfo2_01/brick1
Brick2: server2:/mnt/volume_sfo2_02/brick2
Brick3: server3:/mnt/volume_sfo2_03/brick3
Brick4: server4:/mnt/volume_sfo2_04/brick4
Brick5: server5:/mnt/volume_sfo2_05/brick5
Brick6: server6:/mnt/volume_sfo2_06/brick6
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
[root@server1 ~]#

客户端安装GlusterFS必要组件

[root@server7 ~]# yum -y install centos-release-gluster
[root@server7 ~]# yum -y install glusterfs glusterfs-fuse glusterfs-rdma

挂载data-volume卷并查看磁盘信息(实际可用存储400GB)

[root@server7 ~]# mount -t glusterfs server6:/data-volume /mnt/
[root@server7 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 60G 1020M 59G 2% /
devtmpfs 897M 0 897M 0% /dev
tmpfs 920M 0 920M 0% /dev/shm
tmpfs 920M 17M 903M 2% /run
tmpfs 920M 0 920M 0% /sys/fs/cgroup
tmpfs 184M 0 184M 0% /run/user/0
server6:/data-volume 400G 4.2G 396G 2% /mnt
[root@server7 ~]#

客户端随机写入文件

[root@server7 ~]# for i in `seq -w 1 20`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
[root@server7 ~]# ls /mnt/
copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19
copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20
[root@server7 ~]#

在服务端节点查看随机写入文件的分布

[root@server1 ~]# ls /mnt/volume_sfo2_01/brick1/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server1 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_01
tmpfs           184M     0  184M   0% /run/user/0
[root@server1 ~]# 

[root@server2 ~]# ls /mnt/volume_sfo2_02/brick2/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server2 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_02
tmpfs           184M     0  184M   0% /run/user/0
[root@server2 ~]# 

[root@server3 ~]# ls /mnt/volume_sfo2_03/brick3/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server3 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_03
tmpfs           184M     0  184M   0% /run/user/0
[root@server3 ~]# 

[root@server4 ~]# ls /mnt/volume_sfo2_04/brick4/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server4 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_04
tmpfs           184M     0  184M   0% /run/user/0
[root@server4 ~]# 

[root@server5 ~]# ls /mnt/volume_sfo2_05/brick5/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server5 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  975M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_05
tmpfs           184M     0  184M   0% /run/user/0
[root@server5 ~]#  

[root@server6 ~]# ls /mnt/volume_sfo2_06/brick6/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server6 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_06
tmpfs           184M     0  184M   0% /run/user/0
[root@server6 ~]#
5 月 272020
 

服务端

DigitalOcean/2Core/2G/60G+100G
165.227.27.221 server1
159.89.152.41 server2
159.89.151.236 server3
167.172.118.183 server4
167.172.126.43 server5
64.225.47.139 server6

客户端

DigitalOcean/2Core/2G/60G
64.225.47.123 server7

查看可用磁盘信息

[root@server1 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  901M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_01
tmpfs           184M     0  184M   0% /run/user/0
[root@server1 ~]# 

[root@server2 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  901M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_02
tmpfs           184M     0  184M   0% /run/user/0
[root@server2 ~]# 

[root@server3 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  901M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_03
tmpfs           184M     0  184M   0% /run/user/0
[root@server3 ~]# 

[root@server4 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  901M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_04
tmpfs           184M     0  184M   0% /run/user/0
[root@server4 ~]# 

[root@server5 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_05
tmpfs           184M     0  184M   0% /run/user/0
[root@server5 ~]# 

[root@server6 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  973M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_06
tmpfs           184M     0  184M   0% /run/user/0
[root@server6 ~]#

在服务端节点安装并启动GlusterFS服务

sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config;
yum -y install centos-release-gluster;
yum -y install glusterfs-server;
systemctl enable glusterfsd;
systemctl start glusterfsd;

将节点加入受信存储池(Trusted Pool)

在受信存储池建立,节点间彼此建立通信连接后,只有受信成员节点可以将新节点加入池,新节点不可以直接操作已加入受信存储池的节点。

[root@server1 ~]# gluster peer probe server2
peer probe: success. 
[root@server1 ~]# gluster peer probe server3
peer probe: success. 
[root@server1 ~]# gluster peer probe server4
peer probe: success. 
[root@server1 ~]# gluster peer probe server5
peer probe: success. 
[root@server1 ~]# gluster peer probe server6
peer probe: success. 
[root@server1 ~]# gluster peer status
Number of Peers: 5

Hostname: server2
Uuid: 6231013f-07cc-4701-93b3-34d4c623a890
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: aa808d87-4e7c-4ecd-bcf0-13ea03f844a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: d153d847-ad46-4c85-8336-f8e553d5aab6
State: Peer in Cluster (Connected)

Hostname: server5
Uuid: a90c2969-67eb-4792-b5ce-6b4b3d782675
State: Peer in Cluster (Connected)

Hostname: server6
Uuid: 3ed5adc9-d3f7-40eb-8bbd-45f0882f55cd
State: Peer in Cluster (Connected)
[root@server1 ~]#

在节点上创建Brick目录

[root@server1 ~]# mkdir -p /mnt/volume_sfo2_01/brick1
[root@server2 ~]# mkdir -p /mnt/volume_sfo2_02/brick2
[root@server3 ~]# mkdir -p /mnt/volume_sfo2_03/brick3
[root@server4 ~]# mkdir -p /mnt/volume_sfo2_04/brick4
[root@server5 ~]# mkdir -p /mnt/volume_sfo2_05/brick5
[root@server6 ~]# mkdir -p /mnt/volume_sfo2_06/brick6

创建6节点3副本分布式副本卷

gluster volume create data-volume replica 3 transport tcp \
server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6

[root@server1 ~]# gluster volume create data-volume replica 3 transport tcp \
> server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
> server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
> server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6
volume create: data-volume: success: please start the volume to access data
[root@server1 ~]#

查看卷信息

[root@server1 ~]# gluster volume info

Volume Name: data-volume
Type: Distributed-Replicate
Volume ID: 2a2103ab-17e4-47b5-9d4c-96e460ac419c
Status: Created
Snapshot Count: 0
Number of Bricks: 2 x 3 = 6
Transport-type: tcp
Bricks:
Brick1: server1:/mnt/volume_sfo2_01/brick1
Brick2: server2:/mnt/volume_sfo2_02/brick2
Brick3: server3:/mnt/volume_sfo2_03/brick3
Brick4: server4:/mnt/volume_sfo2_04/brick4
Brick5: server5:/mnt/volume_sfo2_05/brick5
Brick6: server6:/mnt/volume_sfo2_06/brick6
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: off
[root@server1 ~]# 

启动卷并查看卷信息和状态信息

[root@server1 ~]# gluster volume start data-volume
volume start: data-volume: success
[root@server1 ~]# gluster volume info
 
Volume Name: data-volume
Type: Distributed-Replicate
Volume ID: 2a2103ab-17e4-47b5-9d4c-96e460ac419c
Status: Started
Snapshot Count: 0
Number of Bricks: 2 x 3 = 6
Transport-type: tcp
Bricks:
Brick1: server1:/mnt/volume_sfo2_01/brick1
Brick2: server2:/mnt/volume_sfo2_02/brick2
Brick3: server3:/mnt/volume_sfo2_03/brick3
Brick4: server4:/mnt/volume_sfo2_04/brick4
Brick5: server5:/mnt/volume_sfo2_05/brick5
Brick6: server6:/mnt/volume_sfo2_06/brick6
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: off
[root@server1 ~]# gluster volume status
Status of volume: data-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick server1:/mnt/volume_sfo2_01/brick1    49152     0          Y       9805 
Brick server2:/mnt/volume_sfo2_02/brick2    49152     0          Y       9843 
Brick server3:/mnt/volume_sfo2_03/brick3    49152     0          Y       9690 
Brick server4:/mnt/volume_sfo2_04/brick4    49152     0          Y       9734 
Brick server5:/mnt/volume_sfo2_05/brick5    49152     0          Y       10285
Brick server6:/mnt/volume_sfo2_06/brick6    49152     0          Y       10470
Self-heal Daemon on localhost               N/A       N/A        Y       9826 
Self-heal Daemon on server5                 N/A       N/A        Y       10306
Self-heal Daemon on server2                 N/A       N/A        Y       9864 
Self-heal Daemon on server6                 N/A       N/A        Y       10491
Self-heal Daemon on server3                 N/A       N/A        Y       9711 
Self-heal Daemon on server4                 N/A       N/A        Y       9755 
 
Task Status of Volume data-volume
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@server1 ~]#

客户端安装GlusterFS必要组件

[root@server7 ~]# yum -y install centos-release-gluster
[root@server7 ~]# yum -y install glusterfs glusterfs-fuse glusterfs-rdma

挂载data-volume卷并查看磁盘信息(实际可用存储200GB)

[root@server7 ~]# mount -t glusterfs server6:/data-volume /mnt/
[root@server7 ~]# df -h
Filesystem            Size  Used Avail Use% Mounted on
/dev/vda1              60G 1003M   60G   2% /
devtmpfs              897M     0  897M   0% /dev
tmpfs                 920M     0  920M   0% /dev/shm
tmpfs                 920M   17M  903M   2% /run
tmpfs                 920M     0  920M   0% /sys/fs/cgroup
tmpfs                 184M     0  184M   0% /run/user/0
server6:/data-volume  200G  2.1G  198G   2% /mnt
[root@server7 ~]# 

[root@server7 ~]# mount |grep server6
server6:/data-volume on /mnt type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072)
[root@server7 ~]#

查看与服务端节点间通信状态

客户端随机写入文件

[root@server7 ~]# for i in `seq -w 1 20`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
[root@server7 ~]# ls /mnt/
copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19
copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20
[root@server7 ~]#

在服务端节点查看随机写入文件的分布

[root@server1 ~]# ls /mnt/volume_sfo2_01/brick1/
copy-test-04 copy-test-05 copy-test-09 copy-test-15 copy-test-17 copy-test-18 copy-test-20
[root@server1 ~]#

[root@server2 ~]# ls /mnt/volume_sfo2_02/brick2/
copy-test-04 copy-test-05 copy-test-09 copy-test-15 copy-test-17 copy-test-18 copy-test-20
[root@server2 ~]#

[root@server3 ~]# ls /mnt/volume_sfo2_03/brick3/
copy-test-04 copy-test-05 copy-test-09 copy-test-15 copy-test-17 copy-test-18 copy-test-20
[root@server3 ~]#

[root@server4 ~]# ls /mnt/volume_sfo2_04/brick4/
copy-test-01 copy-test-03 copy-test-07 copy-test-10 copy-test-12 copy-test-14 copy-test-19
copy-test-02 copy-test-06 copy-test-08 copy-test-11 copy-test-13 copy-test-16
[root@server4 ~]#

[root@server5 ~]# ls /mnt/volume_sfo2_05/brick5/
copy-test-01 copy-test-03 copy-test-07 copy-test-10 copy-test-12 copy-test-14 copy-test-19
copy-test-02 copy-test-06 copy-test-08 copy-test-11 copy-test-13 copy-test-16
[root@server5 ~]#

[root@server6 ~]# ls /mnt/volume_sfo2_06/brick6/
copy-test-01 copy-test-03 copy-test-07 copy-test-10 copy-test-12 copy-test-14 copy-test-19
copy-test-02 copy-test-06 copy-test-08 copy-test-11 copy-test-13 copy-test-16
[root@server6 ~]#
5 月 262020
 

分布式卷将文件随机分布在存储卷中的各个块(Bricks)中。分布式卷具有良好的扩展性,但不具备数据冗余能力,该能力需借助服务器软硬件实现。

创建分布式卷命令格式如下:

# gluster volume create [transport tcp | rdma | tcp,rdma]

副本卷通过存储卷中的多个块(Bricks)建立文件的副本。在创建副本卷时,块数量应当等于副本数量,为防止服务器及磁盘故障,每个块都应当分布在独立的服务器上。副本卷提供数据的高可用性和高可靠性。

创建副本卷命令格式如下:

# gluster volume create [replica ] [transport tcp | rdma | tcp,rdma]

分布式副本卷是分布式卷和副本卷的集合,在创建分布式副本卷时,块(Bricks)数量最小应当为指定副本数量的整数倍。在未使用force参数之前,GlusterFS默认副本卷在一个服务器节点上仅允许建立一个块(Bricks)。分布式副本卷可以提高文件读取性能。

创建分布式副本卷命令格式如下:

# gluster volume create [replica ] [transport tcp | rdma | tcp,rdma]

分散卷基于纠错码,将文件编码后条带化分散存储在卷的多个块中,并提供一定冗余性。分散卷可以提高磁盘存储利用率,但性能有所下降。分散卷中的冗余值表示允许多少块失效而不中断对卷的读写操作。

分散卷中的冗余值必须大于0,总块数应当大于2倍的冗余值,也就意味着分散卷至少要由3个块组成。在创建分散卷时如果未指定冗余值,系统将自动计算该值并提示。

分散卷可用存储空间计算公式如下:

<Usable size> = <Brick size> * (#Bricks - Redundancy)

创建分散卷命令格式如下:

# gluster volume create [disperse [<count>]] [redundancy <count>] [transport tcp | rdma | tcp,rdma]

分布式分散卷等效于分布式副本卷,区别在于分布式分散卷通过分散卷将数据存储在块中。

5 月 132020
 

练习代码1及注释

# -*- coding: utf-8 -*-
print "Hello World!"
print "Hello Again"
print "I like typing this."
print "This is fun."
print 'Yay! Printing.'
print "I'd much rather you 'not'."
print 'I "said" do not touch this.'
# #号用来注释
# 使用Unicode UTF-8编码以避免乱码

练习代码2及注释

# -*- coding: utf-8 -*-
# A comment, this is so you can read your program later.
# Anything after the # is ignored by python.

print "I could have code like this." # and the comment after is ignored

# You can also use a commnet to "disable" or comment out a piece of code:
# print "This won't run."

print "This will run."
# 注释可以是对某行代码的自然语言描述也可以用作临时禁用该行代码
# 注释符的英文名称为octothorepe或者pound character
# 引号中的#号作为字符串中的一个普通字符

练习代码3及注释

# -*- coding: utf-8 -*-
print "I will now count my chickens:"

print "Hens", 25 + 30 / 6
print "Roosters", 100 - 25 * 3 % 4

print "Now I will count the eggs:"

print 3 + 2 + 1 - 5 + 4 % 2 - 1 / 4 + 6

print "is it true that 3 + 2 < 5 - 7?"

print 3 + 2 < 5 - 7

print "What is 3 + 2?", 3 + 2
print "What is 5 - 7?", 5 - 7

print "Oh, that's why it's False."

print "How about some more."

print "Is it greater?", 5 > -2
print "Is it greater or equal?", 5 >= -2
print "is it less or equal?", 5 <= -2
# 百分号%表示求余数,75除4得18余3
# 运算优先级为括号,指数,乘,除,加,减
# 1/4运算结果舍去了小数部分
5 月 092020
 

应用Calico网络配置文件

[root@k8s-01 ~]# kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
[root@k8s-01 ~]#

查看Calico相关Pod运行状态

[root@k8s-01 ~]# kubectl get pods --namespace=kube-system -o wide
NAME                                       READY   STATUS    RESTARTS   AGE   IP               NODE     NOMINATED NODE   READINESS GATES
calico-kube-controllers-7d4d547dd6-b6rvr   1/1     Running   0          45m   10.244.165.194   k8s-03   <none>           <none>
calico-node-dccgc                          1/1     Running   0          45m   64.225.118.77    k8s-03   <none>           <none>
calico-node-l2lcp                          1/1     Running   0          45m   157.245.178.77   k8s-02   <none>           <none>
calico-node-zwj8n                          1/1     Running   0          45m   64.225.39.115    k8s-01   <none>           <none>
coredns-5644d7b6d9-tgw7c                   1/1     Running   0          49m   10.244.165.195   k8s-03   <none>           <none>
coredns-5644d7b6d9-tljw2                   1/1     Running   0          49m   10.244.165.193   k8s-03   <none>           <none>
etcd-k8s-01                                1/1     Running   0          48m   64.225.39.115    k8s-01   <none>           <none>
kube-apiserver-k8s-01                      1/1     Running   0          48m   64.225.39.115    k8s-01   <none>           <none>
kube-controller-manager-k8s-01             1/1     Running   0          48m   64.225.39.115    k8s-01   <none>           <none>
kube-proxy-7s8pn                           1/1     Running   0          49m   64.225.39.115    k8s-01   <none>           <none>
kube-proxy-9kxxr                           1/1     Running   0          49m   64.225.118.77    k8s-03   <none>           <none>
kube-proxy-r7w4z                           1/1     Running   0          49m   157.245.178.77   k8s-02   <none>           <none>
kube-scheduler-k8s-01                      1/1     Running   0          48m   64.225.39.115    k8s-01   <none>           <none>
[root@k8s-01 ~]#
5 月 072020
 

准备三个版本的Deployment配置文件(Apache httpd Server版本升级更新)

添加revisionHistoryLimit属性以控制kubectl apply操作历史版本的保留数量

[root@k8s-01 ~]# vi httpd-deployment.v1.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: httpd
spec:
  revisionHistoryLimit: 10
  replicas: 3
  selector:
    matchLabels:
      run: httpd
  template:
    metadata:
      labels:
        run: httpd
    spec:
      containers:
      - name: httpd
        image: httpd:2.4.39
        ports:
        - containerPort: 80

[root@k8s-01 ~]# vi httpd-deployment.v2.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: httpd
spec:
  revisionHistoryLimit: 10
  replicas: 3
  selector:
    matchLabels:
      run: httpd
  template:
    metadata:
      labels:
        run: httpd
    spec:
      containers:
      - name: httpd
        image: httpd:2.4.41
        ports:
        - containerPort: 80

[root@k8s-01 ~]# vi httpd-deployment.v3.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: httpd
spec:
  revisionHistoryLimit: 10
  replicas: 3
  selector:
    matchLabels:
      run: httpd
  template:
    metadata:
      labels:
        run: httpd
    spec:
      containers:
      - name: httpd
        image: httpd:2.4.43
        ports:
        - containerPort: 80

依次应用三个版本配置文件并将操作记录版本化

部署版本2.4.39

[root@k8s-01 ~]# kubectl apply -f httpd-deployment.v1.yaml --record
deployment.apps/httpd created
[root@k8s-01 ~]# kubectl get pods -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE     NOMINATED NODE   READINESS GATES
httpd-5bbc9b449d-p7828   1/1     Running   0          30s   10.244.2.4   k8s-03   <none>           <none>
httpd-5bbc9b449d-twmv9   1/1     Running   0          30s   10.244.1.3   k8s-02   <none>           <none>
httpd-5bbc9b449d-zj4zn   1/1     Running   0          30s   10.244.1.2   k8s-02   <none>           <none>
[root@k8s-01 ~]#

查看Deployment应用版本信息(2.4.39)

[root@k8s-01 ~]# kubectl get deployments.apps -o wide
NAME    READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES         SELECTOR
httpd   3/3     3            3           58s   httpd        httpd:2.4.39   run=httpd
[root@k8s-01 ~]#

更新版本2.4.41

[root@k8s-01 ~]# kubectl apply -f httpd-deployment.v2.yaml --record
deployment.apps/httpd configured
[root@k8s-01 ~]# kubectl get pods -o wide
NAME                     READY   STATUS              RESTARTS   AGE   IP           NODE     NOMINATED NODE   READINESS GATES
httpd-5bb8cdb99c-45tjk   0/1     ContainerCreating   0          8s    <none>       k8s-02   <none>           <none>
httpd-5bb8cdb99c-nz4n6   1/1     Running             0          17s   10.244.2.5   k8s-03   <none>           <none>
httpd-5bbc9b449d-p7828   1/1     Running             0          94s   10.244.2.4   k8s-03   <none>           <none>
httpd-5bbc9b449d-zj4zn   1/1     Running             0          94s   10.244.1.2   k8s-02   <none>           <none>
[root@k8s-01 ~]# kubectl get pods -o wide
NAME                     READY   STATUS              RESTARTS   AGE    IP           NODE     NOMINATED NODE   READINESS GATES
httpd-5bb8cdb99c-45tjk   1/1     Running             0          24s    10.244.1.4   k8s-02   <none>           <none>
httpd-5bb8cdb99c-kmqxb   0/1     ContainerCreating   0          1s     <none>       k8s-02   <none>           <none>
httpd-5bb8cdb99c-nz4n6   1/1     Running             0          33s    10.244.2.5   k8s-03   <none>           <none>
httpd-5bbc9b449d-p7828   1/1     Running             0          110s   10.244.2.4   k8s-03   <none>           <none>
httpd-5bbc9b449d-zj4zn   1/1     Terminating         0          110s   10.244.1.2   k8s-02   <none>           <none>
[root@k8s-01 ~]# kubectl get pods -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE     NOMINATED NODE   READINESS GATES
httpd-5bb8cdb99c-45tjk   1/1     Running   0          44s   10.244.1.4   k8s-02   <none>           <none>
httpd-5bb8cdb99c-kmqxb   1/1     Running   0          21s   10.244.1.5   k8s-02   <none>           <none>
httpd-5bb8cdb99c-nz4n6   1/1     Running   0          53s   10.244.2.5   k8s-03   <none>           <none>
[root@k8s-01 ~]#

查看Deployment应用版本信息(2.4.41)

[root@k8s-01 ~]# kubectl get deployments.apps -o wide
NAME    READY   UP-TO-DATE   AVAILABLE   AGE     CONTAINERS   IMAGES         SELECTOR
httpd   3/3     3            3           3m13s   httpd        httpd:2.4.41   run=httpd
[root@k8s-01 ~]#

更新版本2.4.43

[root@k8s-01 ~]# kubectl apply -f httpd-deployment.v3.yaml --record
deployment.apps/httpd configured
[root@k8s-01 ~]# kubectl get pods -o wide
NAME                     READY   STATUS    RESTARTS   AGE     IP           NODE     NOMINATED NODE   READINESS GATES
httpd-7c68f97dc5-8vwxx   1/1     Running   0          3m26s   10.244.2.6   k8s-03   <none>           <none>
httpd-7c68f97dc5-fn6ql   1/1     Running   0          2m55s   10.244.1.7   k8s-02   <none>           <none>
httpd-7c68f97dc5-s7mwv   1/1     Running   0          3m17s   10.244.1.6   k8s-02   <none>           <none>
[root@k8s-01 ~]#

查看Deployment应用版本信息(2.4.43)

[root@k8s-01 ~]# kubectl get deployments.apps -o wide
NAME    READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES         SELECTOR
httpd   3/3     3            3           5m    httpd        httpd:2.4.43   run=httpd
[root@k8s-01 ~]#

查看deployment的kubectl apply操作历史版本信息

[root@k8s-01 ~]# kubectl rollout history deployment httpd 
deployment.apps/httpd 
REVISION  CHANGE-CAUSE
1         kubectl apply --filename=httpd-deployment.v1.yaml --record=true
2         kubectl apply --filename=httpd-deployment.v2.yaml --record=true
3         kubectl apply --filename=httpd-deployment.v3.yaml --record=true

[root@k8s-01 ~]#

回滚到指定版本(上一个版本)

[root@k8s-01 ~]# kubectl rollout undo deployment httpd --to-revision=2
deployment.apps/httpd rolled back
[root@k8s-01 ~]#

查看历史版本信息

[root@k8s-01 ~]# kubectl rollout history deployment httpd 
deployment.apps/httpd 
REVISION  CHANGE-CAUSE
1         kubectl apply --filename=httpd-deployment.v1.yaml --record=true
3         kubectl apply --filename=httpd-deployment.v3.yaml --record=true
4         kubectl apply --filename=httpd-deployment.v2.yaml --record=true

[root@k8s-01 ~]#

查看当前Deployment应用版本信息

[root@k8s-01 ~]# kubectl get deployments.apps -o wide
NAME    READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES         SELECTOR
httpd   3/3     3            3           9m    httpd        httpd:2.4.41   run=httpd
[root@k8s-01 ~]#
5 月 062020
 

滚动更新(Rolling Update)通过策略控制每次更新副本的数量来保障业务连续性。

准备使用httpd:2.4.41版本镜像的配置文件

[root@k8s-01 ~]# vi httpd-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: httpd
spec:
  replicas: 3
  selector:
    matchLabels:
      run: httpd
  template:
    metadata:
      labels:
        run: httpd
    spec:
      containers:
      - name: httpd
        image: httpd:2.4.41
        ports:
        - containerPort: 80

应用配置文件并获取deployment和replicaset及pod列表信息

[root@k8s-01 ~]# kubectl apply -f httpd-deployment.yaml
deployment.apps/httpd created
[root@k8s-01 ~]#
[root@k8s-01 ~]# kubectl get deployments.apps httpd -o wide
NAME    READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES         SELECTOR
httpd   3/3     3            3           20s   httpd        httpd:2.4.41   run=httpd
[root@k8s-01 ~]#
[root@k8s-01 ~]# kubectl get replicasets.apps -o wide
NAME               DESIRED   CURRENT   READY   AGE   CONTAINERS   IMAGES         SELECTOR
httpd-5bb8cdb99c   3         3         3       36s   httpd        httpd:2.4.41   pod-template-hash=5bb8cdb99c,run=httpd
[root@k8s-01 ~]#
[root@k8s-01 ~]# kubectl get pods -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE     NOMINATED NODE   READINESS GATES
httpd-5bb8cdb99c-454mz   1/1     Running   0          51s   10.244.2.4   k8s-03   <none>           <none>
httpd-5bb8cdb99c-qlzbh   1/1     Running   0          51s   10.244.1.5   k8s-02   <none>           <none>
httpd-5bb8cdb99c-rpt59   1/1     Running   0          51s   10.244.1.6   k8s-02   <none>           <none>
[root@k8s-01 ~]#

修改配置文件为使用httpd:2.4.43版本镜像

apiVersion: apps/v1
kind: Deployment
metadata:
  name: httpd
spec:
  replicas: 3
  selector:
    matchLabels:
      run: httpd
  template:
    metadata:
      labels:
        run: httpd
    spec:
      containers:
      - name: httpd
        image: httpd:2.4.43
        ports:
        - containerPort: 80

应用配置文件并获取deployment和replicaset列表信息

[root@k8s-01 ~]# kubectl apply -f httpd-deployment.yaml
deployment.apps/httpd configured
[root@k8s-01 ~]# kubectl get deployments.apps httpd -o wide
NAME    READY   UP-TO-DATE   AVAILABLE   AGE    CONTAINERS   IMAGES         SELECTOR
httpd   3/3     3            3           3m2s   httpd        httpd:2.4.43   run=httpd
[root@k8s-01 ~]# kubectl get replicasets.apps -o wide
NAME               DESIRED   CURRENT   READY   AGE     CONTAINERS   IMAGES         SELECTOR
httpd-5bb8cdb99c   0         0         0       3m11s   httpd        httpd:2.4.41   pod-template-hash=5bb8cdb99c,run=httpd
httpd-7c68f97dc5   3         3         3       24s     httpd        httpd:2.4.43   pod-template-hash=7c68f97dc5,run=httpd
[root@k8s-01 ~]#

查看滚动更新详情(每次只更新替换一个低版本镜像Pod)

[root@k8s-01 ~]# kubectl describe deployments.apps httpd
Name:                   httpd
Namespace:              default
CreationTimestamp:      Wed, 06 May 2020 09:20:14 +0000
Labels:                 <none>
Annotations:            deployment.kubernetes.io/revision: 2
                        kubectl.kubernetes.io/last-applied-configuration:
                          {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"name":"httpd","namespace":"default"},"spec":{"replicas":3,"selec...
Selector:               run=httpd
Replicas:               3 desired | 3 updated | 3 total | 3 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:  run=httpd
  Containers:
   httpd:
    Image:        httpd:2.4.43
    Port:         80/TCP
    Host Port:    0/TCP
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Available      True    MinimumReplicasAvailable
  Progressing    True    NewReplicaSetAvailable
OldReplicaSets:  <none>
NewReplicaSet:   httpd-7c68f97dc5 (3/3 replicas created)
Events:
  Type    Reason             Age    From                   Message
  ----    ------             ----   ----                   -------
  Normal  ScalingReplicaSet  4m27s  deployment-controller  Scaled up replica set httpd-5bb8cdb99c to 3
  Normal  ScalingReplicaSet  100s   deployment-controller  Scaled up replica set httpd-7c68f97dc5 to 1
  Normal  ScalingReplicaSet  93s    deployment-controller  Scaled down replica set httpd-5bb8cdb99c to 2
  Normal  ScalingReplicaSet  93s    deployment-controller  Scaled up replica set httpd-7c68f97dc5 to 2
  Normal  ScalingReplicaSet  85s    deployment-controller  Scaled down replica set httpd-5bb8cdb99c to 1
  Normal  ScalingReplicaSet  85s    deployment-controller  Scaled up replica set httpd-7c68f97dc5 to 3
  Normal  ScalingReplicaSet  84s    deployment-controller  Scaled down replica set httpd-5bb8cdb99c to 0
[root@k8s-01 ~]#
5 月 062020
 

获取集群内的服务列表(类型为ClusterIP)

[root@k8s-01 ~]# kubectl get service -o wide
NAME            TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE   SELECTOR
httpd-service   ClusterIP   10.109.145.140   <none>        8080/TCP   78m   run=httpd
kubernetes      ClusterIP   10.96.0.1        <none>        443/TCP    85m   <none>
[root@k8s-01 ~]#

修改服务配置文件以添加NodePort配置并应用

[root@k8s-01 ~]# vi httpd-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: httpd-service
spec:
  type: NodePort
  selector:
    run: httpd
  ports:
  - protocol: TCP
    port: 8080
    targetPort: 80
[root@k8s-01 ~]# kubectl apply -f httpd-service.yaml
service/httpd-service configured
[root@k8s-01 ~]#

获取集群内的服务列表(类型为NodePort)

[root@k8s-01 ~]# kubectl get service -o wide
NAME            TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE   SELECTOR
httpd-service   NodePort    10.109.145.140   <none>        8080:30093/TCP   81m   run=httpd
kubernetes      ClusterIP   10.96.0.1        <none>        443/TCP          88m   <none>
[root@k8s-01 ~]#

使用节点的IP+Port方式访问集群内的服务(借助iptbales实现负载均衡的包转发)

[root@k8s-01 ~]# curl 167.99.108.90:30093
<html><body><h1>It works!</h1></body></html>
[root@k8s-01 ~]# curl 206.189.165.254:30093
<html><body><h1>It works!</h1></body></html>
[root@k8s-01 ~]# curl 167.99.108.90:30093
<html><body><h1>It works!</h1></body></html>
[root@k8s-01 ~]#

为NodePort指定固定端口号(默认为30000-32767的随机端口号)

[root@k8s-01 ~]# vi httpd-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: httpd-service
spec:
  type: NodePort
  selector:
    run: httpd
  ports:
  - protocol: TCP
    nodePort: 31234
    port: 8080
    targetPort: 80
[root@k8s-01 ~]# kubectl apply -f httpd-service.yaml
service/httpd-service configured
[root@k8s-01 ~]#

获取集群内的服务列表

[root@k8s-01 ~]# kubectl  get services -o wide
NAME            TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE    SELECTOR
httpd-service   NodePort    10.109.145.140   <none>        8080:31234/TCP   93m    run=httpd
kubernetes      ClusterIP   10.96.0.1        <none>        443/TCP          100m   <none>
[root@k8s-01 ~]#

端口类型说明

nodePort:节点监听端口
port:ClusterIP监听端口
targetPort:Pod监听端口