6 月 102020
 

安装Apache及Subversion服务

[root@centos-s-1vcpu-1gb-sfo3-01 ~]# yum install httpd subversion mod_dav_svn mariadb-server mariadb apr-util-mysql

Installed:
  apr-util-mysql.x86_64 0:1.5.2-6.el7                httpd.x86_64 0:2.4.6-93.el7.centos                 
  mariadb.x86_64 1:5.5.65-1.el7                      mariadb-server.x86_64 1:5.5.65-1.el7               
  mod_dav_svn.x86_64 0:1.7.14-14.el7                 subversion.x86_64 0:1.7.14-14.el7                  

Dependency Installed:
  apr.x86_64 0:1.4.8-5.el7                            apr-util.x86_64 0:1.5.2-6.el7                     
  centos-logos.noarch 0:70.0.6-3.el7.centos           gnutls.x86_64 0:3.3.29-9.el7_6                    
  httpd-tools.x86_64 0:2.4.6-93.el7.centos            libaio.x86_64 0:0.3.109-13.el7                    
  libmodman.x86_64 0:2.0.1-8.el7                      libproxy.x86_64 0:0.4.11-11.el7                   
  mailcap.noarch 0:2.1.41-2.el7                       neon.x86_64 0:0.30.0-4.el7                        
  nettle.x86_64 0:2.7.1-8.el7                         pakchois.x86_64 0:0.4-10.el7                      
  perl.x86_64 4:5.16.3-295.el7                        perl-Carp.noarch 0:1.26-244.el7                   
  perl-Compress-Raw-Bzip2.x86_64 0:2.061-3.el7        perl-Compress-Raw-Zlib.x86_64 1:2.061-4.el7       
  perl-DBD-MySQL.x86_64 0:4.023-6.el7                 perl-DBI.x86_64 0:1.627-4.el7                     
  perl-Data-Dumper.x86_64 0:2.145-3.el7               perl-Encode.x86_64 0:2.51-7.el7                   
  perl-Exporter.noarch 0:5.68-3.el7                   perl-File-Path.noarch 0:2.09-2.el7                
  perl-File-Temp.noarch 0:0.23.01-3.el7               perl-Filter.x86_64 0:1.49-3.el7                   
  perl-Getopt-Long.noarch 0:2.40-3.el7                perl-HTTP-Tiny.noarch 0:0.033-3.el7               
  perl-IO-Compress.noarch 0:2.061-2.el7               perl-Net-Daemon.noarch 0:0.48-5.el7               
  perl-PathTools.x86_64 0:3.40-5.el7                  perl-PlRPC.noarch 0:0.2020-14.el7                 
  perl-Pod-Escapes.noarch 1:1.04-295.el7              perl-Pod-Perldoc.noarch 0:3.20-4.el7              
  perl-Pod-Simple.noarch 1:3.28-4.el7                 perl-Pod-Usage.noarch 0:1.63-3.el7                
  perl-Scalar-List-Utils.x86_64 0:1.27-248.el7        perl-Socket.x86_64 0:2.010-5.el7                  
  perl-Storable.x86_64 0:2.45-3.el7                   perl-Text-ParseWords.noarch 0:3.29-4.el7          
  perl-Time-HiRes.x86_64 4:1.9725-3.el7               perl-Time-Local.noarch 0:1.2300-2.el7             
  perl-constant.noarch 0:1.27-2.el7                   perl-libs.x86_64 4:5.16.3-295.el7                 
  perl-macros.x86_64 4:5.16.3-295.el7                 perl-parent.noarch 1:0.225-244.el7                
  perl-podlators.noarch 0:2.5.1-3.el7                 perl-threads.x86_64 0:1.87-4.el7                  
  perl-threads-shared.x86_64 0:1.43-6.el7             subversion-libs.x86_64 0:1.7.14-14.el7            
  trousers.x86_64 0:0.3.14-2.el7                     

Dependency Updated:
  mariadb-libs.x86_64 1:5.5.65-1.el7 

查看DBD MySQL驱动模块信息

[root@centos-s-1vcpu-1gb-sfo3-01 ~]# yum info apr-util-mysql
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirror.keystealth.org
 * extras: repos-lax.psychz.net
 * updates: mirrors.xtom.com
Installed Packages
Name        : apr-util-mysql
Arch        : x86_64
Version     : 1.5.2
Release     : 6.el7
Size        : 24 k
Repo        : installed
From repo   : base
Summary     : APR utility library MySQL DBD driver
URL         : http://apr.apache.org/
License     : ASL 2.0
Description : This package provides the MySQL driver for the apr-util DBD
            : (database abstraction) interface.

[root@centos-s-1vcpu-1gb-sfo3-01 ~]# rpm -lq apr-util-mysql
/usr/lib64/apr-util-1/apr_dbd_mysql-1.so
/usr/lib64/apr-util-1/apr_dbd_mysql.so
[root@centos-s-1vcpu-1gb-sfo3-01 ~]#

配置MySQL服务并新建数据库及表

[root@centos-s-1vcpu-1gb-sfo3-01 ~]# systemctl enable mariadb
Created symlink from /etc/systemd/system/multi-user.target.wants/mariadb.service to /usr/lib/systemd/system/mariadb.service.
[root@centos-s-1vcpu-1gb-sfo3-01 ~]# systemctl start mariadb
[root@centos-s-1vcpu-1gb-sfo3-01 ~]#

建库

MariaDB [(none)]> create database subversion;
Query OK, 1 row affected (0.00 sec)

MariaDB [(none)]> grant SELECT, INSERT, UPDATE, DELETE on subversion.* to apache@localhost;
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> set password for apache@localhost=password('apachepwd');
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> flush privileges;
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]>

建表

MariaDB [(none)]> use subversion;
Database changed
MariaDB [subversion]> use subversion;
Database changed
MariaDB [subversion]> create table authn (
    -> username varchar(255) not null,
    -> password varchar(255),
    -> status varchar(255),
    -> primary key (username)
    -> );
Query OK, 0 rows affected (0.01 sec)

MariaDB [subversion]>

写入测试数据
生成密码(可指定密码加密函数)

[root@centos-s-1vcpu-1gb-sfo3-01 ~]# htpasswd -nb user1 123456
user1:$apr1$hyGT4jgm$xCWktYtKdOZ.y59Zo.t7C1

[root@centos-s-1vcpu-1gb-sfo3-01 ~]# 

MariaDB [subversion]> INSERT INTO `authn` (`username`, `password`, `status`) 
    -> VALUES('user1', '$apr1$hyGT4jgm$xCWktYtKdOZ.y59Zo.t7C1', 'ok');
Query OK, 1 row affected (0.00 sec)

MariaDB [subversion]>

查看表数据

MariaDB [subversion]> select * from authn;
+----------+---------------------------------------+--------+
| username | password                              | status |
+----------+---------------------------------------+--------+
| user1    | $apr1$hyGT4jgm$xCWktYtKdOZ.y59Zo.t7C1 | ok     |
+----------+---------------------------------------+--------+
1 row in set (0.00 sec)

MariaDB [subversion]>

密码加密函数参考

https://dev.mysql.com/doc/refman/5.6/en/encryption-functions.html#function_password

创建仓库

[root@centos-s-1vcpu-1gb-sfo3-01 ~]# mkdir /var/www/svn
[root@centos-s-1vcpu-1gb-sfo3-01 ~]# cd /var/www/svn/
[root@centos-s-1vcpu-1gb-sfo3-01 svn]# svnadmin create test
[root@centos-s-1vcpu-1gb-sfo3-01 svn]#

配置Apache环境
查看已安装相关模块

[root@centos-s-1vcpu-1gb-sfo3-01 ~]# ls /etc/httpd/modules/ |grep dbd
mod_authn_dbd.so
mod_authz_dbd.so
mod_dbd.so
[root@centos-s-1vcpu-1gb-sfo3-01 ~]# ls /etc/httpd/modules/ |grep socache
mod_authn_socache.so
mod_cache_socache.so
mod_socache_dbm.so
mod_socache_memcache.so
mod_socache_shmcb.so
[root@centos-s-1vcpu-1gb-sfo3-01 ~]#

模块mod_authn_dbd配置参考

http://httpd.apache.org/docs/2.4/mod/mod_authn_dbd.html
http://httpd.apache.org/docs/2.4/mod/mod_dbd.html
http://httpd.apache.org/docs/2.4/mod/mod_authn_socache.html

设置主机名

[root@centos-s-1vcpu-1gb-sfo3-01 ~]# vi /etc/httpd/conf/httpd.conf
ServerName 64.227.106.245

新增配置文件

[root@centos-s-1vcpu-1gb-sfo3-01 ~]# vi /etc/httpd/conf.d/repository.conf
# mod_dbd configuration
# UPDATED to include authentication caching
DBDriver mysql
DBDParams "host=localhost port=3306 dbname=subversion user=apache pass=apachepwd"

DBDMin  4
DBDKeep 8
DBDMax  20
DBDExptime 300

<Location /repos>
  DAV svn
  SVNParentPath /var/www/svn

  # mod_authn_core and mod_auth_basic configuration
  # for mod_authn_dbd
  AuthType Basic
  AuthName "Subversion repository"
  
  # To cache credentials, put socache ahead of dbd here
  AuthBasicProvider socache dbd

  # Also required for caching: tell the cache to cache dbd lookups!
  AuthnCacheProvideFor dbd
  AuthnCacheContext my-server
  
  SVNPathAuthz off

  # Authorization: Authenticated users only
  Require valid-user
  
  # mod_authn_dbd SQL query to authenticate a user
  AuthDBDUserPWQuery "SELECT password FROM authn WHERE username = %s"
</Location>
[root@centos-s-1vcpu-1gb-sfo3-01 ~]# apachectl -t
Syntax OK
[root@centos-s-1vcpu-1gb-sfo3-01 ~]#

启动Apache服务

[root@centos-s-1vcpu-1gb-sfo3-01 ~]# systemctl enable httpd
Created symlink from /etc/systemd/system/multi-user.target.wants/httpd.service to /usr/lib/systemd/system/httpd.service.
[root@centos-s-1vcpu-1gb-sfo3-01 ~]# systemctl start httpd
[root@centos-s-1vcpu-1gb-sfo3-01 ~]#

查看端口监听

[root@centos-s-1vcpu-1gb-sfo3-01 ~]# netstat -lntp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      969/master          
tcp        0      0 0.0.0.0:3306            0.0.0.0:*               LISTEN      1586/mysqld         
tcp        0      0 0.0.0.0:111             0.0.0.0:*               LISTEN      1/systemd           
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      1018/sshd           
tcp6       0      0 ::1:25                  :::*                    LISTEN      969/master          
tcp6       0      0 :::111                  :::*                    LISTEN      1/systemd           
tcp6       0      0 :::80                   :::*                    LISTEN      12198/httpd         
tcp6       0      0 :::22                   :::*                    LISTEN      1018/sshd           
[root@centos-s-1vcpu-1gb-sfo3-01 ~]#

使用浏览器访问仓库进行登录验证

http://64.227.106.245/repos/test
5 月 292020
 

节点server6离线

[root@server6 ~]# init 0

查看卷信息

[root@server1 ~]# gluster volume info

Volume Name: data-volume
Type: Disperse
Volume ID: fd3fdef5-a1c5-41c6-83f7-a9df9e3ccbb3
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x (4 + 2) = 6
Transport-type: tcp
Bricks:
Brick1: server1:/mnt/volume_sfo2_01/brick1
Brick2: server2:/mnt/volume_sfo2_02/brick2
Brick3: server3:/mnt/volume_sfo2_03/brick3
Brick4: server4:/mnt/volume_sfo2_04/brick4
Brick5: server5:/mnt/volume_sfo2_05/brick5
Brick6: server6:/mnt/volume_sfo2_06/brick6
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
[root@server1 ~]#

查看卷状态信息显示server6及相关块已不在列表中

[root@server1 ~]# gluster volume status
Status of volume: data-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick server1:/mnt/volume_sfo2_01/brick1    49152     0          Y       10380
Brick server2:/mnt/volume_sfo2_02/brick2    49152     0          Y       10273
Brick server3:/mnt/volume_sfo2_03/brick3    49152     0          Y       10269
Brick server4:/mnt/volume_sfo2_04/brick4    49152     0          Y       10276
Brick server5:/mnt/volume_sfo2_05/brick5    49152     0          Y       10274
Self-heal Daemon on localhost               N/A       N/A        Y       10401
Self-heal Daemon on server3                 N/A       N/A        Y       10290
Self-heal Daemon on server5                 N/A       N/A        Y       10295
Self-heal Daemon on server2                 N/A       N/A        Y       10294
Self-heal Daemon on server4                 N/A       N/A        Y       10297
 
Task Status of Volume data-volume
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@server1 ~]#

查看节点状态信息显示server6已断开连接

[root@server1 ~]# gluster peer status
Number of Peers: 5

Hostname: server2
Uuid: d331a6e5-b533-42a6-bd78-b07f33edbb0f
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: c925e178-a154-4e00-b678-a0b9a30187a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: 278a51f2-e399-4182-8f37-9c47e35205d3
State: Peer in Cluster (Connected)

Hostname: server5
Uuid: a0be5978-e05b-46bc-83b7-c34ae212cf21
State: Peer in Cluster (Connected)

Hostname: server6
Uuid: a505fa8b-72f3-47a1-af1c-19ca01c1245a
State: Peer in Cluster (Disconnected)
[root@server1 ~]#

写入文件

[root@server7 ~]# for i in `seq -w 21 40`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
[root@server7 ~]# ls /mnt/
copy-test-01 copy-test-05 copy-test-09 copy-test-13 copy-test-17 copy-test-21 copy-test-25 copy-test-29 copy-test-33 copy-test-37
copy-test-02 copy-test-06 copy-test-10 copy-test-14 copy-test-18 copy-test-22 copy-test-26 copy-test-30 copy-test-34 copy-test-38
copy-test-03 copy-test-07 copy-test-11 copy-test-15 copy-test-19 copy-test-23 copy-test-27 copy-test-31 copy-test-35 copy-test-39
copy-test-04 copy-test-08 copy-test-12 copy-test-16 copy-test-20 copy-test-24 copy-test-28 copy-test-32 copy-test-36 copy-test-40
[root@server7 ~]#

节点server5离线

[root@server5 ~]# init 0

查看节点状态信息

[root@server2 ~]# gluster peer status
Number of Peers: 5

Hostname: server1
Uuid: a97fa9a8-e97f-421e-b92c-07ef39a488cd
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: c925e178-a154-4e00-b678-a0b9a30187a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: 278a51f2-e399-4182-8f37-9c47e35205d3
State: Peer in Cluster (Connected)

Hostname: server5
Uuid: a0be5978-e05b-46bc-83b7-c34ae212cf21
State: Peer in Cluster (Disconnected)

Hostname: server6
Uuid: a505fa8b-72f3-47a1-af1c-19ca01c1245a
State: Peer in Cluster (Disconnected)
[root@server2 ~]#

查看卷状态信息

[root@server2 ~]# gluster volume status
Status of volume: data-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick server1:/mnt/volume_sfo2_01/brick1    49152     0          Y       10380
Brick server2:/mnt/volume_sfo2_02/brick2    49152     0          Y       10273
Brick server3:/mnt/volume_sfo2_03/brick3    49152     0          Y       10269
Brick server4:/mnt/volume_sfo2_04/brick4    49152     0          Y       10276
Self-heal Daemon on localhost               N/A       N/A        Y       10294
Self-heal Daemon on server1                 N/A       N/A        Y       10401
Self-heal Daemon on server3                 N/A       N/A        Y       10290
Self-heal Daemon on server4                 N/A       N/A        Y       10297
 
Task Status of Volume data-volume
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@server2 ~]#

写入文件

[root@server7 ~]# for i in `seq -w 41 60`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
[root@server7 ~]# ls /mnt/
copy-test-01 copy-test-06 copy-test-11 copy-test-16 copy-test-21 copy-test-26 copy-test-31 copy-test-36 copy-test-41 copy-test-46 copy-test-51 copy-test-56
copy-test-02 copy-test-07 copy-test-12 copy-test-17 copy-test-22 copy-test-27 copy-test-32 copy-test-37 copy-test-42 copy-test-47 copy-test-52 copy-test-57
copy-test-03 copy-test-08 copy-test-13 copy-test-18 copy-test-23 copy-test-28 copy-test-33 copy-test-38 copy-test-43 copy-test-48 copy-test-53 copy-test-58
copy-test-04 copy-test-09 copy-test-14 copy-test-19 copy-test-24 copy-test-29 copy-test-34 copy-test-39 copy-test-44 copy-test-49 copy-test-54 copy-test-59
copy-test-05 copy-test-10 copy-test-15 copy-test-20 copy-test-25 copy-test-30 copy-test-35 copy-test-40 copy-test-45 copy-test-50 copy-test-55 copy-test-60
[root@server7 ~]#

节点server4离线

[root@server4 ~]# init 0

查看节点状态信息

[root@server2 ~]# gluster peer status
Number of Peers: 5

Hostname: server1
Uuid: a97fa9a8-e97f-421e-b92c-07ef39a488cd
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: c925e178-a154-4e00-b678-a0b9a30187a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: 278a51f2-e399-4182-8f37-9c47e35205d3
State: Peer in Cluster (Disconnected)

Hostname: server5
Uuid: a0be5978-e05b-46bc-83b7-c34ae212cf21
State: Peer in Cluster (Disconnected)

Hostname: server6
Uuid: a505fa8b-72f3-47a1-af1c-19ca01c1245a
State: Peer in Cluster (Disconnected)
[root@server2 ~]#

查看卷状态信息

[root@server2 ~]# gluster volume status
Status of volume: data-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick server1:/mnt/volume_sfo2_01/brick1    49152     0          Y       10380
Brick server2:/mnt/volume_sfo2_02/brick2    49152     0          Y       10273
Brick server3:/mnt/volume_sfo2_03/brick3    49152     0          Y       10269
Self-heal Daemon on localhost               N/A       N/A        Y       10294
Self-heal Daemon on server3                 N/A       N/A        Y       10290
Self-heal Daemon on server1                 N/A       N/A        Y       10401
 
Task Status of Volume data-volume
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@server2 ~]#

在客户端节点查看挂载目录文件(已不可用)

[root@server7 ~]# ls /mnt 
ls: cannot access /mnt: Transport endpoint is not connected
[root@server7 ~]#
5 月 282020
 

配置为6个100GB存储块(Brick)并容许最多2个块宕机失效的分散卷

206.189.167.54 server1
64.227.54.71 server2
64.227.54.85 server3
206.189.171.152 server4
64.227.54.28 server5
64.227.54.35 server6
206.189.171.164 server7

将节点加入受信存储池(Trusted Pool)

[root@server1 ~]# gluster peer probe server2
peer probe: success. 
[root@server1 ~]# gluster peer probe server3
peer probe: success. 
[root@server1 ~]# gluster peer probe server4
peer probe: success. 
[root@server1 ~]# gluster peer probe server5
peer probe: success. 
[root@server1 ~]# gluster peer probe server6
peer probe: success. 
[root@server1 ~]#

查看节点状态

[root@server1 ~]# gluster peer status
Number of Peers: 5

Hostname: server2
Uuid: d331a6e5-b533-42a6-bd78-b07f33edbb0f
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: c925e178-a154-4e00-b678-a0b9a30187a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: 278a51f2-e399-4182-8f37-9c47e35205d3
State: Peer in Cluster (Connected)

Hostname: server5
Uuid: a0be5978-e05b-46bc-83b7-c34ae212cf21
State: Peer in Cluster (Connected)

Hostname: server6
Uuid: a505fa8b-72f3-47a1-af1c-19ca01c1245a
State: Peer in Cluster (Connected)
[root@server1 ~]#

在节点上创建Brick目录

[root@server1 ~]# mkdir -p /mnt/volume_sfo2_01/brick1
[root@server2 ~]# mkdir -p /mnt/volume_sfo2_02/brick2
[root@server3 ~]# mkdir -p /mnt/volume_sfo2_03/brick3
[root@server4 ~]# mkdir -p /mnt/volume_sfo2_04/brick4
[root@server5 ~]# mkdir -p /mnt/volume_sfo2_05/brick5
[root@server6 ~]# mkdir -p /mnt/volume_sfo2_06/brick6

创建卷时未指定redundancy参数系统将自动计算并提示

[root@server1 ~]# gluster volume create data-volume disperse 6 transport tcp \
> server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
> server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
> server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6
The optimal redundancy for this configuration is 2. Do you want to create the volume with this value ? (y/n) n

Usage:
volume create <NEW-VOLNAME> [stripe <COUNT>] [[replica <COUNT> [arbiter <COUNT>]]|[replica 2 thin-arbiter 1]] [disperse [<COUNT>]] [disperse-data <COUNT>] [redundancy <COUNT>] [transport <tcp|rdma|tcp,rdma>] <NEW-BRICK> <TA-BRICK>... [force]

[root@server1 ~]#

创建卷时指定disperse参数和redundancy参数

gluster volume create data-volume disperse 6 redundancy 2 transport tcp \
server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6

[root@server1 ~]# gluster volume create data-volume disperse 6 redundancy 2 transport tcp \
> server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
> server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
> server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6
volume create: data-volume: success: please start the volume to access data
[root@server1 ~]#

启动卷并查看卷信息和状态信息

[root@server1 ~]# gluster volume start data-volume
volume start: data-volume: success
[root@server1 ~]#

[root@server1 ~]# gluster volume info

Volume Name: data-volume
Type: Disperse
Volume ID: fd3fdef5-a1c5-41c6-83f7-a9df9e3ccbb3
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x (4 + 2) = 6
Transport-type: tcp
Bricks:
Brick1: server1:/mnt/volume_sfo2_01/brick1
Brick2: server2:/mnt/volume_sfo2_02/brick2
Brick3: server3:/mnt/volume_sfo2_03/brick3
Brick4: server4:/mnt/volume_sfo2_04/brick4
Brick5: server5:/mnt/volume_sfo2_05/brick5
Brick6: server6:/mnt/volume_sfo2_06/brick6
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
[root@server1 ~]#

客户端安装GlusterFS必要组件

[root@server7 ~]# yum -y install centos-release-gluster
[root@server7 ~]# yum -y install glusterfs glusterfs-fuse glusterfs-rdma

挂载data-volume卷并查看磁盘信息(实际可用存储400GB)

[root@server7 ~]# mount -t glusterfs server6:/data-volume /mnt/
[root@server7 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 60G 1020M 59G 2% /
devtmpfs 897M 0 897M 0% /dev
tmpfs 920M 0 920M 0% /dev/shm
tmpfs 920M 17M 903M 2% /run
tmpfs 920M 0 920M 0% /sys/fs/cgroup
tmpfs 184M 0 184M 0% /run/user/0
server6:/data-volume 400G 4.2G 396G 2% /mnt
[root@server7 ~]#

客户端随机写入文件

[root@server7 ~]# for i in `seq -w 1 20`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
[root@server7 ~]# ls /mnt/
copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19
copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20
[root@server7 ~]#

在服务端节点查看随机写入文件的分布

[root@server1 ~]# ls /mnt/volume_sfo2_01/brick1/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server1 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_01
tmpfs           184M     0  184M   0% /run/user/0
[root@server1 ~]# 

[root@server2 ~]# ls /mnt/volume_sfo2_02/brick2/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server2 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_02
tmpfs           184M     0  184M   0% /run/user/0
[root@server2 ~]# 

[root@server3 ~]# ls /mnt/volume_sfo2_03/brick3/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server3 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_03
tmpfs           184M     0  184M   0% /run/user/0
[root@server3 ~]# 

[root@server4 ~]# ls /mnt/volume_sfo2_04/brick4/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server4 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_04
tmpfs           184M     0  184M   0% /run/user/0
[root@server4 ~]# 

[root@server5 ~]# ls /mnt/volume_sfo2_05/brick5/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server5 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  975M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_05
tmpfs           184M     0  184M   0% /run/user/0
[root@server5 ~]#  

[root@server6 ~]# ls /mnt/volume_sfo2_06/brick6/
copy-test-01  copy-test-03  copy-test-05  copy-test-07  copy-test-09  copy-test-11  copy-test-13  copy-test-15  copy-test-17  copy-test-19
copy-test-02  copy-test-04  copy-test-06  copy-test-08  copy-test-10  copy-test-12  copy-test-14  copy-test-16  copy-test-18  copy-test-20
[root@server6 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_06
tmpfs           184M     0  184M   0% /run/user/0
[root@server6 ~]#
5 月 272020
 

服务端

DigitalOcean/2Core/2G/60G+100G
165.227.27.221 server1
159.89.152.41 server2
159.89.151.236 server3
167.172.118.183 server4
167.172.126.43 server5
64.225.47.139 server6

客户端

DigitalOcean/2Core/2G/60G
64.225.47.123 server7

查看可用磁盘信息

[root@server1 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  901M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_01
tmpfs           184M     0  184M   0% /run/user/0
[root@server1 ~]# 

[root@server2 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  901M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_02
tmpfs           184M     0  184M   0% /run/user/0
[root@server2 ~]# 

[root@server3 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  901M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_03
tmpfs           184M     0  184M   0% /run/user/0
[root@server3 ~]# 

[root@server4 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  901M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_04
tmpfs           184M     0  184M   0% /run/user/0
[root@server4 ~]# 

[root@server5 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  974M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_05
tmpfs           184M     0  184M   0% /run/user/0
[root@server5 ~]# 

[root@server6 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/vda1        60G  973M   60G   2% /
devtmpfs        897M     0  897M   0% /dev
tmpfs           920M     0  920M   0% /dev/shm
tmpfs           920M   17M  903M   2% /run
tmpfs           920M     0  920M   0% /sys/fs/cgroup
/dev/sda        100G   33M  100G   1% /mnt/volume_sfo2_06
tmpfs           184M     0  184M   0% /run/user/0
[root@server6 ~]#

在服务端节点安装并启动GlusterFS服务

sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config;
yum -y install centos-release-gluster;
yum -y install glusterfs-server;
systemctl enable glusterfsd;
systemctl start glusterfsd;

将节点加入受信存储池(Trusted Pool)

在受信存储池建立,节点间彼此建立通信连接后,只有受信成员节点可以将新节点加入池,新节点不可以直接操作已加入受信存储池的节点。

[root@server1 ~]# gluster peer probe server2
peer probe: success. 
[root@server1 ~]# gluster peer probe server3
peer probe: success. 
[root@server1 ~]# gluster peer probe server4
peer probe: success. 
[root@server1 ~]# gluster peer probe server5
peer probe: success. 
[root@server1 ~]# gluster peer probe server6
peer probe: success. 
[root@server1 ~]# gluster peer status
Number of Peers: 5

Hostname: server2
Uuid: 6231013f-07cc-4701-93b3-34d4c623a890
State: Peer in Cluster (Connected)

Hostname: server3
Uuid: aa808d87-4e7c-4ecd-bcf0-13ea03f844a8
State: Peer in Cluster (Connected)

Hostname: server4
Uuid: d153d847-ad46-4c85-8336-f8e553d5aab6
State: Peer in Cluster (Connected)

Hostname: server5
Uuid: a90c2969-67eb-4792-b5ce-6b4b3d782675
State: Peer in Cluster (Connected)

Hostname: server6
Uuid: 3ed5adc9-d3f7-40eb-8bbd-45f0882f55cd
State: Peer in Cluster (Connected)
[root@server1 ~]#

在节点上创建Brick目录

[root@server1 ~]# mkdir -p /mnt/volume_sfo2_01/brick1
[root@server2 ~]# mkdir -p /mnt/volume_sfo2_02/brick2
[root@server3 ~]# mkdir -p /mnt/volume_sfo2_03/brick3
[root@server4 ~]# mkdir -p /mnt/volume_sfo2_04/brick4
[root@server5 ~]# mkdir -p /mnt/volume_sfo2_05/brick5
[root@server6 ~]# mkdir -p /mnt/volume_sfo2_06/brick6

创建6节点3副本分布式副本卷

gluster volume create data-volume replica 3 transport tcp \
server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6

[root@server1 ~]# gluster volume create data-volume replica 3 transport tcp \
> server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \
> server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \
> server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6
volume create: data-volume: success: please start the volume to access data
[root@server1 ~]#

查看卷信息

[root@server1 ~]# gluster volume info

Volume Name: data-volume
Type: Distributed-Replicate
Volume ID: 2a2103ab-17e4-47b5-9d4c-96e460ac419c
Status: Created
Snapshot Count: 0
Number of Bricks: 2 x 3 = 6
Transport-type: tcp
Bricks:
Brick1: server1:/mnt/volume_sfo2_01/brick1
Brick2: server2:/mnt/volume_sfo2_02/brick2
Brick3: server3:/mnt/volume_sfo2_03/brick3
Brick4: server4:/mnt/volume_sfo2_04/brick4
Brick5: server5:/mnt/volume_sfo2_05/brick5
Brick6: server6:/mnt/volume_sfo2_06/brick6
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: off
[root@server1 ~]# 

启动卷并查看卷信息和状态信息

[root@server1 ~]# gluster volume start data-volume
volume start: data-volume: success
[root@server1 ~]# gluster volume info
 
Volume Name: data-volume
Type: Distributed-Replicate
Volume ID: 2a2103ab-17e4-47b5-9d4c-96e460ac419c
Status: Started
Snapshot Count: 0
Number of Bricks: 2 x 3 = 6
Transport-type: tcp
Bricks:
Brick1: server1:/mnt/volume_sfo2_01/brick1
Brick2: server2:/mnt/volume_sfo2_02/brick2
Brick3: server3:/mnt/volume_sfo2_03/brick3
Brick4: server4:/mnt/volume_sfo2_04/brick4
Brick5: server5:/mnt/volume_sfo2_05/brick5
Brick6: server6:/mnt/volume_sfo2_06/brick6
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: off
[root@server1 ~]# gluster volume status
Status of volume: data-volume
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick server1:/mnt/volume_sfo2_01/brick1    49152     0          Y       9805 
Brick server2:/mnt/volume_sfo2_02/brick2    49152     0          Y       9843 
Brick server3:/mnt/volume_sfo2_03/brick3    49152     0          Y       9690 
Brick server4:/mnt/volume_sfo2_04/brick4    49152     0          Y       9734 
Brick server5:/mnt/volume_sfo2_05/brick5    49152     0          Y       10285
Brick server6:/mnt/volume_sfo2_06/brick6    49152     0          Y       10470
Self-heal Daemon on localhost               N/A       N/A        Y       9826 
Self-heal Daemon on server5                 N/A       N/A        Y       10306
Self-heal Daemon on server2                 N/A       N/A        Y       9864 
Self-heal Daemon on server6                 N/A       N/A        Y       10491
Self-heal Daemon on server3                 N/A       N/A        Y       9711 
Self-heal Daemon on server4                 N/A       N/A        Y       9755 
 
Task Status of Volume data-volume
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@server1 ~]#

客户端安装GlusterFS必要组件

[root@server7 ~]# yum -y install centos-release-gluster
[root@server7 ~]# yum -y install glusterfs glusterfs-fuse glusterfs-rdma

挂载data-volume卷并查看磁盘信息(实际可用存储200GB)

[root@server7 ~]# mount -t glusterfs server6:/data-volume /mnt/
[root@server7 ~]# df -h
Filesystem            Size  Used Avail Use% Mounted on
/dev/vda1              60G 1003M   60G   2% /
devtmpfs              897M     0  897M   0% /dev
tmpfs                 920M     0  920M   0% /dev/shm
tmpfs                 920M   17M  903M   2% /run
tmpfs                 920M     0  920M   0% /sys/fs/cgroup
tmpfs                 184M     0  184M   0% /run/user/0
server6:/data-volume  200G  2.1G  198G   2% /mnt
[root@server7 ~]# 

[root@server7 ~]# mount |grep server6
server6:/data-volume on /mnt type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072)
[root@server7 ~]#

查看与服务端节点间通信状态

客户端随机写入文件

[root@server7 ~]# for i in `seq -w 1 20`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
[root@server7 ~]# ls /mnt/
copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19
copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20
[root@server7 ~]#

在服务端节点查看随机写入文件的分布

[root@server1 ~]# ls /mnt/volume_sfo2_01/brick1/
copy-test-04 copy-test-05 copy-test-09 copy-test-15 copy-test-17 copy-test-18 copy-test-20
[root@server1 ~]#

[root@server2 ~]# ls /mnt/volume_sfo2_02/brick2/
copy-test-04 copy-test-05 copy-test-09 copy-test-15 copy-test-17 copy-test-18 copy-test-20
[root@server2 ~]#

[root@server3 ~]# ls /mnt/volume_sfo2_03/brick3/
copy-test-04 copy-test-05 copy-test-09 copy-test-15 copy-test-17 copy-test-18 copy-test-20
[root@server3 ~]#

[root@server4 ~]# ls /mnt/volume_sfo2_04/brick4/
copy-test-01 copy-test-03 copy-test-07 copy-test-10 copy-test-12 copy-test-14 copy-test-19
copy-test-02 copy-test-06 copy-test-08 copy-test-11 copy-test-13 copy-test-16
[root@server4 ~]#

[root@server5 ~]# ls /mnt/volume_sfo2_05/brick5/
copy-test-01 copy-test-03 copy-test-07 copy-test-10 copy-test-12 copy-test-14 copy-test-19
copy-test-02 copy-test-06 copy-test-08 copy-test-11 copy-test-13 copy-test-16
[root@server5 ~]#

[root@server6 ~]# ls /mnt/volume_sfo2_06/brick6/
copy-test-01 copy-test-03 copy-test-07 copy-test-10 copy-test-12 copy-test-14 copy-test-19
copy-test-02 copy-test-06 copy-test-08 copy-test-11 copy-test-13 copy-test-16
[root@server6 ~]#
5 月 262020
 

分布式卷将文件随机分布在存储卷中的各个块(Bricks)中。分布式卷具有良好的扩展性,但不具备数据冗余能力,该能力需借助服务器软硬件实现。

创建分布式卷命令格式如下:

# gluster volume create [transport tcp | rdma | tcp,rdma]

副本卷通过存储卷中的多个块(Bricks)建立文件的副本。在创建副本卷时,块数量应当等于副本数量,为防止服务器及磁盘故障,每个块都应当分布在独立的服务器上。副本卷提供数据的高可用性和高可靠性。

创建副本卷命令格式如下:

# gluster volume create [replica ] [transport tcp | rdma | tcp,rdma]

分布式副本卷是分布式卷和副本卷的集合,在创建分布式副本卷时,块(Bricks)数量最小应当为指定副本数量的整数倍。在未使用force参数之前,GlusterFS默认副本卷在一个服务器节点上仅允许建立一个块(Bricks)。分布式副本卷可以提高文件读取性能。

创建分布式副本卷命令格式如下:

# gluster volume create [replica ] [transport tcp | rdma | tcp,rdma]

分散卷基于纠错码,将文件编码后条带化分散存储在卷的多个块中,并提供一定冗余性。分散卷可以提高磁盘存储利用率,但性能有所下降。分散卷中的冗余值表示允许多少块失效而不中断对卷的读写操作。

分散卷中的冗余值必须大于0,总块数应当大于2倍的冗余值,也就意味着分散卷至少要由3个块组成。在创建分散卷时如果未指定冗余值,系统将自动计算该值并提示。

分散卷可用存储空间计算公式如下:

<Usable size> = <Brick size> * (#Bricks - Redundancy)

创建分散卷命令格式如下:

# gluster volume create [disperse [<count>]] [redundancy <count>] [transport tcp | rdma | tcp,rdma]

分布式分散卷等效于分布式副本卷,区别在于分布式分散卷通过分散卷将数据存储在块中。

1 月 292015
 

https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/
https://wiki.centos.org/SpecialInterestGroup/Storage/gluster-Quickstart

可扩展的分布式文件系统,实现在唯一全局命名空间中聚合多个服务器节点的磁盘资源。

分布式文件系统节点

glusterfs-01 138.197.217.220 10.138.18.152
glusterfs-02 157.245.169.92 10.138.146.225
glusterfs-03 165.227.21.222 10.138.178.108

在所有节点配置hosts文件

[root@glusterfs-01 ~]# vi /etc/hosts
10.138.18.152 glusterfs-01
10.138.146.225 glusterfs-02
10.138.178.108 glusterfs-03

查看当前可用磁盘和分区信息

[root@glusterfs-01 ~]# fdisk -l

Disk /dev/vda: 64.4 GB, 64424509440 bytes, 125829120 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000b6061

   Device Boot      Start         End      Blocks   Id  System
/dev/vda1   *        2048   125829086    62913519+  83  Linux

Disk /dev/vdb: 0 MB, 466944 bytes, 912 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes


Disk /dev/sda: 107.4 GB, 107374182400 bytes, 209715200 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes

[root@glusterfs-01 ~]#

创建分区

[root@glusterfs-01 ~]# fdisk /dev/sda
Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Device does not contain a recognized partition table
Building a new DOS disklabel with disk identifier 0x99c4ee31.

Command (m for help): n
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p): e
Partition number (1-4, default 1): 
First sector (2048-209715199, default 2048): 
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-209715199, default 209715199): 
Using default value 209715199
Partition 1 of type Extended and of size 100 GiB is set

Command (m for help): n
Partition type:
   p   primary (0 primary, 1 extended, 3 free)
   l   logical (numbered from 5)
Select (default p): l
Adding logical partition 5
First sector (4096-209715199, default 4096): 
Using default value 4096
Last sector, +sectors or +size{K,M,G} (4096-209715199, default 209715199): 
Using default value 209715199
Partition 5 of type Linux and of size 100 GiB is set

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.
[root@glusterfs-01 ~]#

查看当前可用磁盘和分区信息

[root@glusterfs-01 ~]# fdisk -l

Disk /dev/vda: 64.4 GB, 64424509440 bytes, 125829120 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000b6061

   Device Boot      Start         End      Blocks   Id  System
/dev/vda1   *        2048   125829086    62913519+  83  Linux

Disk /dev/vdb: 0 MB, 466944 bytes, 912 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes


Disk /dev/sda: 107.4 GB, 107374182400 bytes, 209715200 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0xbb370b51

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1            2048   209715199   104856576    5  Extended
/dev/sda5            4096   209715199   104855552   83  Linux
[root@glusterfs-01 ~]#

在所有节点格式化并挂载数据盘

# mkfs.xfs -i size=512 /dev/sda5
# mkdir -p /data/brick1
# echo '/dev/sda5 /data/brick1 xfs defaults 1 2' >> /etc/fstab
# mount -a && mount

[root@glusterfs-01 ~]# mkfs.xfs -i size=512 /dev/sda5
meta-data=/dev/sda5              isize=512    agcount=4, agsize=6553472 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=26213888, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=12799, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@glusterfs-01 ~]# mkdir -p /data/brick1
[root@glusterfs-01 ~]# echo '/dev/sda5 /data/brick1 xfs defaults 1 2' >> /etc/fstab
[root@glusterfs-01 ~]# mount -a && mount
sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime,seclabel)
proc on /proc type proc (rw,nosuid,nodev,noexec,relatime)
devtmpfs on /dev type devtmpfs (rw,nosuid,seclabel,size=917804k,nr_inodes=229451,mode=755)
securityfs on /sys/kernel/security type securityfs (rw,nosuid,nodev,noexec,relatime)
tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev,seclabel)
devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,seclabel,gid=5,mode=620,ptmxmode=000)
tmpfs on /run type tmpfs (rw,nosuid,nodev,seclabel,mode=755)
tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,seclabel,mode=755)
cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd)
pstore on /sys/fs/pstore type pstore (rw,nosuid,nodev,noexec,relatime)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,hugetlb)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,freezer)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,cpuset)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,perf_event)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,pids)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,net_prio,net_cls)
cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,cpuacct,cpu)
cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,memory)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,blkio)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,devices)
configfs on /sys/kernel/config type configfs (rw,relatime)
/dev/vda1 on / type xfs (rw,relatime,seclabel,attr2,inode64,noquota)
rpc_pipefs on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw,relatime)
selinuxfs on /sys/fs/selinux type selinuxfs (rw,relatime)
hugetlbfs on /dev/hugepages type hugetlbfs (rw,relatime,seclabel)
mqueue on /dev/mqueue type mqueue (rw,relatime,seclabel)
debugfs on /sys/kernel/debug type debugfs (rw,relatime)
systemd-1 on /proc/sys/fs/binfmt_misc type autofs (rw,relatime,fd=32,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=13335)
tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,seclabel,size=188220k,mode=700)
/dev/sda5 on /data/brick1 type xfs (rw,relatime,seclabel,attr2,inode64,noquota)
[root@glusterfs-01 ~]#

在所有节点安装GlusterFS软件

[root@glusterfs-01 ~]# yum -y install centos-release-gluster
[root@glusterfs-01 ~]# yum -y install glusterfs-server

[root@glusterfs-02 ~]# yum -y install centos-release-gluster
[root@glusterfs-02 ~]# yum -y install glusterfs-server

[root@glusterfs-03 ~]# yum -y install centos-release-gluster
[root@glusterfs-03 ~]# yum -y install glusterfs-server

在所有节点注册并启动glusterfsd系统服务

[root@glusterfs-01 ~]# systemctl enable glusterfsd
Created symlink from /etc/systemd/system/multi-user.target.wants/glusterfsd.service to /usr/lib/systemd/system/glusterfsd.service.
[root@glusterfs-01 ~]# systemctl start glusterfsd
[root@glusterfs-01 ~]# systemctl status glusterfsd
● glusterfsd.service - GlusterFS brick processes (stopping only)
   Loaded: loaded (/usr/lib/systemd/system/glusterfsd.service; enabled; vendor preset: disabled)
   Active: active (exited) since Tue 2020-05-26 07:28:17 UTC; 8s ago
  Process: 10737 ExecStart=/bin/true (code=exited, status=0/SUCCESS)
 Main PID: 10737 (code=exited, status=0/SUCCESS)

May 26 07:28:17 glusterfs-01 systemd[1]: Starting GlusterFS brick processes (stopping only)...
May 26 07:28:17 glusterfs-01 systemd[1]: Started GlusterFS brick processes (stopping only).
[root@glusterfs-01 ~]# 

[root@glusterfs-02 ~]# systemctl enable glusterfsd
Created symlink from /etc/systemd/system/multi-user.target.wants/glusterfsd.service to /usr/lib/systemd/system/glusterfsd.service.
[root@glusterfs-02 ~]# systemctl start glusterfsd
[root@glusterfs-02 ~]# systemctl status glusterfsd
● glusterfsd.service - GlusterFS brick processes (stopping only)
   Loaded: loaded (/usr/lib/systemd/system/glusterfsd.service; enabled; vendor preset: disabled)
   Active: active (exited) since Tue 2020-05-26 07:29:21 UTC; 11s ago
  Process: 18817 ExecStart=/bin/true (code=exited, status=0/SUCCESS)
 Main PID: 18817 (code=exited, status=0/SUCCESS)

May 26 07:29:20 glusterfs-02 systemd[1]: Starting GlusterFS brick processes (stopping only)...
May 26 07:29:21 glusterfs-02 systemd[1]: Started GlusterFS brick processes (stopping only).
[root@glusterfs-02 ~]# 

[root@glusterfs-03 ~]# systemctl enable glusterfsd
Created symlink from /etc/systemd/system/multi-user.target.wants/glusterfsd.service to /usr/lib/systemd/system/glusterfsd.service.
[root@glusterfs-03 ~]# systemctl start glusterfsd
[root@glusterfs-03 ~]# systemctl status glusterfsd
● glusterfsd.service - GlusterFS brick processes (stopping only)
   Loaded: loaded (/usr/lib/systemd/system/glusterfsd.service; enabled; vendor preset: disabled)
   Active: active (exited) since Tue 2020-05-26 07:30:27 UTC; 7s ago
  Process: 18444 ExecStart=/bin/true (code=exited, status=0/SUCCESS)
 Main PID: 18444 (code=exited, status=0/SUCCESS)

May 26 07:30:27 glusterfs-03 systemd[1]: Starting GlusterFS brick processes (stopping only)...
May 26 07:30:27 glusterfs-03 systemd[1]: Started GlusterFS brick processes (stopping only).
[root@glusterfs-03 ~]#

查看端口监听

[root@glusterfs-01 ~]# netstat -lntuop
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name     Timer
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      1195/master          off (0.00/0/0)
tcp        0      0 0.0.0.0:24007           0.0.0.0:*               LISTEN      1047/glusterd        off (0.00/0/0)
tcp        0      0 0.0.0.0:111             0.0.0.0:*               LISTEN      1/systemd            off (0.00/0/0)
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      1247/sshd            off (0.00/0/0)
tcp6       0      0 ::1:25                  :::*                    LISTEN      1195/master          off (0.00/0/0)
tcp6       0      0 :::111                  :::*                    LISTEN      1/systemd            off (0.00/0/0)
tcp6       0      0 :::22                   :::*                    LISTEN      1247/sshd            off (0.00/0/0)
udp        0      0 0.0.0.0:111             0.0.0.0:*                           1/systemd            off (0.00/0/0)
udp        0      0 127.0.0.1:323           0.0.0.0:*                           647/chronyd          off (0.00/0/0)
udp        0      0 0.0.0.0:802             0.0.0.0:*                           629/rpcbind          off (0.00/0/0)
udp6       0      0 :::111                  :::*                                1/systemd            off (0.00/0/0)
udp6       0      0 ::1:323                 :::*                                647/chronyd          off (0.00/0/0)
udp6       0      0 :::802                  :::*                                629/rpcbind          off (0.00/0/0)
[root@glusterfs-01 ~]#

查看版本信息

[root@glusterfs-01 ~]# glusterfs -V
glusterfs 7.5
Repository revision: git://git.gluster.org/glusterfs.git
Copyright (c) 2006-2016 Red Hat, Inc. <https://www.gluster.org/>
GlusterFS comes with ABSOLUTELY NO WARRANTY.
It is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3
or later), or the GNU General Public License, version 2 (GPLv2),
in all cases as published by the Free Software Foundation.
[root@glusterfs-01 ~]#

将节点加入受信存储池

[root@glusterfs-01 ~]# gluster peer probe glusterfs-02
peer probe: success. 
[root@glusterfs-01 ~]# gluster peer probe glusterfs-03
peer probe: success. 
[root@glusterfs-01 ~]#

查看节点状态

[root@glusterfs-01 ~]# gluster peer status
Number of Peers: 2

Hostname: glusterfs-02
Uuid: 9375a552-1cce-414c-8850-997800dd1f6e
State: Peer in Cluster (Connected)

Hostname: glusterfs-03
Uuid: c490e4ee-03f7-4b83-9456-6cccd101020f
State: Peer in Cluster (Connected)
[root@glusterfs-01 ~]#

[root@glusterfs-02 ~]# gluster peer status
Number of Peers: 2

Hostname: glusterfs-01
Uuid: 605bacf2-abb4-4083-be2b-0d17c843bc68
State: Peer in Cluster (Connected)

Hostname: glusterfs-03
Uuid: c490e4ee-03f7-4b83-9456-6cccd101020f
State: Peer in Cluster (Connected)
[root@glusterfs-02 ~]#

[root@glusterfs-03 ~]# gluster peer status
Number of Peers: 2

Hostname: glusterfs-01
Uuid: 605bacf2-abb4-4083-be2b-0d17c843bc68
State: Peer in Cluster (Connected)

Hostname: glusterfs-02
Uuid: 9375a552-1cce-414c-8850-997800dd1f6e
State: Peer in Cluster (Connected)
[root@glusterfs-03 ~]#

创建一个存储卷(三副本)

[root@glusterfs-01 ~]# mkdir -p /data/brick1/gv0
[root@glusterfs-02 ~]# mkdir -p /data/brick1/gv0
[root@glusterfs-03 ~]# mkdir -p /data/brick1/gv0

[root@glusterfs-01 ~]# gluster volume create gv0 replica 3 glusterfs-01:/data/brick1/gv0 glusterfs-02:/data/brick1/gv0 glusterfs-03:/data/brick1/gv0
volume create: gv0: success: please start the volume to access data
[root@glusterfs-01 ~]#

[root@glusterfs-01 ~]# gluster volume start gv0
volume start: gv0: success
[root@glusterfs-01 ~]#

查看存储卷信息

[root@glusterfs-01 ~]# gluster volume info
 
Volume Name: gv0
Type: Replicate
Volume ID: aaa143ff-c7db-4b12-9d2f-4199c2cf76c9
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 3 = 3
Transport-type: tcp
Bricks:
Brick1: glusterfs-01:/data/brick1/gv0
Brick2: glusterfs-02:/data/brick1/gv0
Brick3: glusterfs-03:/data/brick1/gv0
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: off
[root@glusterfs-01 ~]#

查看卷状态信息

[root@glusterfs-01 ~]# gluster volume status
Status of volume: gv0
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick glusterfs-01:/data/brick1/gv0         49152     0          Y       1580 
Brick glusterfs-02:/data/brick1/gv0         49152     0          Y       10275
Brick glusterfs-03:/data/brick1/gv0         49152     0          Y       10248
Self-heal Daemon on localhost               N/A       N/A        Y       1601 
Self-heal Daemon on glusterfs-03            N/A       N/A        Y       10269
Self-heal Daemon on glusterfs-02            N/A       N/A        Y       10296
 
Task Status of Volume gv0
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@glusterfs-01 ~]#

通过任意节点挂载已创建的三副本文件系统

在GlusterFS集群文件系统中,执行挂载命令时指定的服务器,仅用于获取卷的配置信息。随后客户端将直接与卷配置文件中的服务器进行通信(甚至不包括用于挂载的服务器)。

[root@glusterfs-01 ~]# mount -t glusterfs glusterfs-03:/gv0 /mnt
[root@glusterfs-01 ~]# mount
sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime)
proc on /proc type proc (rw,nosuid,nodev,noexec,relatime)
devtmpfs on /dev type devtmpfs (rw,nosuid,size=917804k,nr_inodes=229451,mode=755)
securityfs on /sys/kernel/security type securityfs (rw,nosuid,nodev,noexec,relatime)
tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev)
devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000)
tmpfs on /run type tmpfs (rw,nosuid,nodev,mode=755)
tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,mode=755)
cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd)
pstore on /sys/fs/pstore type pstore (rw,nosuid,nodev,noexec,relatime)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_prio,net_cls)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpuacct,cpu)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb)
cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory)
configfs on /sys/kernel/config type configfs (rw,relatime)
/dev/vda1 on / type xfs (rw,relatime,attr2,inode64,noquota)
rpc_pipefs on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw,relatime)
systemd-1 on /proc/sys/fs/binfmt_misc type autofs (rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=12616)
hugetlbfs on /dev/hugepages type hugetlbfs (rw,relatime)
mqueue on /dev/mqueue type mqueue (rw,relatime)
debugfs on /sys/kernel/debug type debugfs (rw,relatime)
/dev/sda5 on /data/brick1 type xfs (rw,relatime,attr2,inode64,noquota)
tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,size=188220k,mode=700)
glusterfs-03:/gv0 on /mnt type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072)
[root@glusterfs-01 ~]#

写入20个文件

[root@glusterfs-01 ~]# for i in `seq -w 1 20`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
[root@glusterfs-01 ~]#

确认写入文件数量

[root@glusterfs-01 ~]# ls -lA /mnt/copy* | wc -l
20
[root@glusterfs-01 ~]#

在各个节点的本地挂载点查看写入的文件

[root@glusterfs-01 ~]# ls /data/brick1/gv0/
copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19
copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20
[root@glusterfs-01 ~]#

[root@glusterfs-02 ~]# ls /data/brick1/gv0/
copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19
copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20
[root@glusterfs-02 ~]#

[root@glusterfs-03 ~]# ls /data/brick1/gv0/
copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19
copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20
[root@glusterfs-03 ~]#
10 月 152014
 

1 配置iscsi服务器端

[root@localhost ~]# yum install scsi-target-utils
[root@localhost ~]# service tgtd start
 Starting SCSI target daemon: [ OK ]
[root@localhost ~]# chkconfig tgtd on
[root@localhost ~]#
[root@localhost ~]# mkdir /vdisk
[root@localhost ~]# dd if=/dev/zero of=/vdisk/vdisk.img bs=1M count=500
 500+0 records in
 500+0 records out
 524288000 bytes (524 MB) copied, 2.24159 s, 234 MB/s
[root@localhost ~]#

修改配置文件

[root@localhost ~]# vi /etc/tgt/targets.conf
 <target iqn.2014-10.com.example.target1>
 backing-store /vdisk/vdisk.img
 write-cache on
 </target>

重启服务

[root@localhost ~]# service tgtd restart
 Stopping SCSI target daemon: [ OK ]
 Starting SCSI target daemon: [ OK ]
[root@localhost ~]#

验证

[root@localhost ~]# tgt-admin --show target1
 Target 1: iqn.2014-10.com.example.target1
 System information:
 Driver: iscsi
 State: ready
 I_T nexus information:
 I_T nexus: 2
 Initiator: iqn.1994-05.com.redhat:f8c8fd7aac5c
 Connection: 0
 IP Address: 192.168.254.129
 I_T nexus: 4
 Initiator: iqn.1994-05.com.redhat:67f04a9b2426
 Connection: 0
 IP Address: 192.168.254.128
 LUN information:
 LUN: 0
 Type: controller
 SCSI ID: IET 00010000
 SCSI SN: beaf10
 Size: 0 MB, Block size: 1
 Online: Yes
 Removable media: No
 Prevent removal: No
 Readonly: No
 Backing store type: null
 Backing store path: None
 Backing store flags:
 LUN: 1
 Type: disk
 SCSI ID: IET 00010001
 SCSI SN: beaf11
 Size: 524 MB, Block size: 512
 Online: Yes
 Removable media: No
 Prevent removal: No
 Readonly: No
 Backing store type: rdwr
 Backing store path: /vdisk/vdisk.img
 Backing store flags:
 Account information:
 ACL information:
 ALL
 [root@localhost ~]#

2 配置iscsi客户端

安装,系统已默认安装
[root@localhost ~]# yum install iscsi-initiator-utils
Package iscsi-initiator-utils-6.2.0.873-10.el6.x86_64 already installed and latest version

发现

[root@localhost ~]# iscsiadm -m discovery -t sendtargets -p 192.168.254.130:3260
 Starting iscsid: [ OK ]
 192.168.254.130:3260,1 iqn.2014-10.com.example.target1
[root@localhost ~]#

登录

[root@localhost ~]# iscsiadm -m node -T iqn.2014-10.com.example.target1 --login
 Logging in to [iface: default, target: iqn.2014-10.com.example.target1, portal: 192.168.254.130,3260] (multiple)
 Login to [iface: default, target: iqn.2014-10.com.example.target1, portal: 192.168.254.130,3260] successful.
[root@localhost ~]#

查看新增设备

[root@localhost ~]# fdisk -l
 Disk /dev/sdb: 524 MB, 524288000 bytes
 17 heads, 59 sectors/track, 1020 cylinders
 Units = cylinders of 1003 * 512 = 513536 bytes
 Sector size (logical/physical): 512 bytes / 512 bytes
 I/O size (minimum/optimal): 512 bytes / 512 bytes
 Disk identifier: 0x00000000

查看服务

[root@localhost ~]# service iscsid status
 iscsid (pid 1300) is running...
[root@localhost ~]# chkconfig --list iscsid
 iscsid 0:off 1:off 2:off 3:on 4:on 5:on 6:off
[root@localhost ~]#

3 配置multipath

[root@node1 ~]# rpm -q device-mapper-multipath
 device-mapper-multipath-0.4.9-72.el6.x86_64
[root@node1 ~]# service multipathd status
 multipathd is stopped
[root@node1 ~]# vi /etc/multipath.conf
[root@node1 ~]# service multipathd status
 multipathd is stopped
[root@node1 ~]# service multipathd start
 Starting multipathd daemon: [ OK ]
[root@node1 ~]# multipath -ll
 mpatha (1IET 00010001) dm-2 IET,VIRTUAL-DISK
 size=500M features='0' hwhandler='0' wp=rw
 `-+- policy='round-robin 0' prio=1 status=active
 `- 3:0:0:1 sdb 8:16 active ready running
[root@node1 ~]#

4,安装集群套件

[root@node1 ~]# yum install ricci
[root@node1 ~]# passwd ricci
[root@node1 ~]# chkconfig ricci on
[root@node1 ~]# service ricci start
[root@console ~]# yum install luci
[root@console ~]# chkconfig luci on

新建集群及增加节点并查看服务

[root@node2 ~]# service cman status
 cluster is running.
 [root@node2 ~]# service clvmd status
 clvmd (pid 3353) is running...
 Clustered Volume Groups: (none)
 Active clustered Logical Volumes: (none)
 [root@node2 ~]# service rgmanager status
 rgmanager (pid 3399) is running...
 [root@node2 ~]# service gfs2 status
 GFS2: no entries found in /etc/fstab
 [root@node2 ~]# service modclusterd status
 modclusterd (pid 3051) is running...
 [root@node2 ~]# clustat
 Cluster Status for htcluster @ Wed Oct 15 23:38:26 2014
 Member Status: Quorate
Member Name ID Status
 ------ ---- ---- ------
 node1 1 Online
 node2 2 Online, Local
[root@node2 ~]#

5 格式化文件系统

[root@node1 ~]# yum install gfs2-utils

[root@node1 ~]# mkfs.gfs2 -p lock_dlm -t htcluster:datastore -j 2 /dev/dm-2
This will destroy any data on /dev/dm-2.
It appears to contain: data

Are you sure you want to proceed? [y/n] y

Device: /dev/dm-2
Blocksize: 4096
Device Size 0.49 GB (128000 blocks)
Filesystem Size: 0.49 GB (127997 blocks)
Journals: 2
Resource Groups: 2
Locking Protocol: “lock_dlm”
Lock Table: “htcluster:datastore”
UUID: ea8bba63-df8c-9139-4e2e-e03a70f64891

[root@node1 ~]#
直接挂载(未启动集群
[root@node1 ~]# mount /dev/dm-2 /mnt/
gfs_controld join connect error: Connection refused
error mounting lockproto lock_dlm
[root@node1 ~]#

6,启动集群后,增加fstab配置文件 手动挂载

方法1
[root@node2 ~]# vi /etc/fstab
/dev/dm-2 /mnt gfs2 _netdev 0 0
[root@node2 ~]# mount /dev/dm-2 /mnt/
[root@node2 ~]# service gfs2 status
Configured GFS2 mountpoints:
/mnt
Active GFS2 mountpoints:
/mnt
[root@node2 ~]#

方法2
[root@node1 ~]# vi /etc/fstab
[root@node1 ~]# service gfs2 status
GFS2: service is not running
[root@node1 ~]# service gfs2 start
Mounting GFS2 filesystem (/mnt): [ OK ]
[root@node1 ~]# service gfs2 status
Configured GFS2 mountpoints:
/mnt
Active GFS2 mountpoints:
/mnt
[root@node1 ~]#

查看当前的服务运行级别
[root@node2 ~]# chkconfig –list cman
cman 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]# chkconfig –list rgmanager
rgmanager 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]# chkconfig –list clvmd
clvmd 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]# chkconfig –list modclusterd
modclusterd 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]# chkconfig –list gfs2
gfs2 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@node2 ~]#

验证
[root@node2 ~]# less /var/log/messages
[root@node2 ~]# touch /mnt/frm-n2

[root@node1 ~]# touch /mnt/frm-n1
[root@node1 ~]# ls /mnt/
frm-n1 frm-n2
[root@node1 ~]#
[root@node2 ~]# mount
/dev/mapper/VolGroup-lv_root on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
none on /sys/kernel/config type configfs (rw)
/dev/mapper/mpatha on /mnt type gfs2 (rw,relatime,hostdata=jid=0)
[root@node2 ~]#
手动重启 node2和通过luci启动node1
通过
离开和加入集群产生的配置文件变化
离开后服务被停止,原配置文件无变化
[root@node1 ~]# clustat
Could not connect to CMAN: No such file or directory
[root@node1 ~]# service cman status
corosync is stopped
[root@node1 ~]#

增加resource,确认配置文件

[root@node2 ~]# cat /etc/cluster/cluster.conf
 <?xml version="1.0"?>
 <cluster config_version="3" name="htcluster">
 <clusternodes>
 <clusternode name="node1" nodeid="1"/>
 <clusternode name="node2" nodeid="2"/>
 </clusternodes>
 <cman expected_votes="1" two_node="1"/>
 <rm>
 <resources>
 <clusterfs device="/dev/dm-2" force_unmount="1" fsid="26886" fstype="gfs2" mountpoint="/mnt" name="htgfs2" options="_netdev" self_fence="1"/>
 </resources>
 </rm>
 </cluster>
 [root@node2 ~]#

问题
luci使用ie可能有兼容性问题
两个节点同时关闭后,第一个节点启动clvmd超时,直到第二节点启动至clvmd时继续