CentOS8 install GlusterFS

1. A total of three machines are data1~3

  1. Set node information
[root@data1 /]# export NODE_NAMES=(data1 data2 data3)
[root@data1 /]# export NODE_IPS=(192.168.31.7 192.168.31.8 192.168.31.9)

2. Install gluster source

[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
>  ssh root@${NODE_IP} "yum install centos-release-gluster -y"
>done

3. Install glusterfs components

[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
>  ssh root@${NODE_IP} "yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma glusterfs-geo-replication glusterfs-devel"
>done

possible problems:

Error: 
 Problem 1: cannot install the best candidate for the job
  - nothing provides python3-pyxattr needed by glusterfs-server-7.6-1.el8.x86_64
 Problem 2: package glusterfs-geo-replication-7.6-1.el8.x86_64 requires glusterfs-server = 7.6-1.el8, but none of the providers can be installed
  - cannot install the best candidate for the job
  - nothing provides python3-pyxattr needed by glusterfs-server-7.6-1.el8.x86_64
(try to add '--skip-broken' to skip uninstallable packages or '--nobest' to use not only best candidate packages)

Solution:

  • Solve pyxattr dependency
[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
>  ssh root@${NODE_IP} "dnf --enablerepo=PowerTools install python3-pyxattr"
>done
  • Add --nobest
[root@data1 ~]# for NODE_IP in ${NODE_IPS[@]}
>do
>  ssh root@${NODE_IP} "yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma glusterfs-geo-replication glusterfs-devel --nobest"
>done

4. Modify the working directory of gluster and set up systemctl hosting

[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
>  mkdir /opt/glusterfs
>  ssh root@${NODE_IP} "sed -i 's/var\/lib/opt/g' /etc/glusterfs/glusterd.vol"
>  ssh root@${NODE_IP} "systemctl start glusterd.service"
>  ssh root@${NODE_IP} "systemctl enable glusterd.service"
>  ssh root@${NODE_IP} "systemctl status glusterd.service | grep active"
>done
192.168.31.7
   Active: active (running) since Mon 2020-07-06 03:15:13 UTC; 2h 35min ago
192.168.31.8
   Active: active (running) since Mon 2020-07-06 05:50:51 UTC; 583ms ago
192.168.31.9
   Active: active (running) since Mon 2020-07-06 05:50:54 UTC; 598ms ago

5. Open gluster port

[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
>  ssh root@${NODE_IP} "iptables -I INPUT -p tcp --dport 24007 -j ACCEPT"
>done

6. Add Node

[root@data1 /]# gluster peer probe data2
peer probe: success. 
[root@data1 /]# gluster peer probe data3
peer probe: success. 
[root@data1 opt]# gluster peer status  # 查看节点状态
Number of Peers: 2

Hostname: data2
Uuid: d38c773f-01d3-4dcf-87af-b09c62717849
State: Peer in Cluster (Connected)

Hostname: data3
Uuid: 472fa46f-837f-4d5c-a68f-d8cd35c822ea
State: Peer in Cluster (Connected)

If the following error occurs

[root@data1 /]# gluster peer probe data2
peer probe: failed: Probe returned with Transport endpoint is not connected

It may be because the gluster service in the connected node is not started or the port is not open

7. Create a volume with three copies distributed in 6 directories

[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}; 
>do  
>  echo ${NODE_IP}
>  ssh root@${NODE_IP} "mkdir -p /data3/gfs_data/vol01"
>  ssh root@${NODE_IP} "mkdir -p /data4/gfs_data/vol01" 
>done
192.168.31.7
192.168.31.8
192.168.31.9
[root@data1 /]# 
[root@data1 /]# gluster volume create vol01 replica 3 arbiter 1 transport tcp \
>data1:/data3/gfs_data/vol01 \
>data1:/data4/gfs_data/vol01 \
>data2:/data3/gfs_data/vol01 \
>data2:/data4/gfs_data/vol01 \
>data3:/data3/gfs_data/vol01 \
>data3:/data4/gfs_data/vol01 force
volume create: vol01: success: please start the volume to access data
[root@data1 /]# 
[root@data1 /]# gluster volume start vol01
volume start: vol01: success
[root@data1 /]# 
[root@data1 opt]# gluster volume info
 
Volume Name: vol01
Type: Distributed-Replicate
Volume ID: c34302b9-bf08-467c-8118-019a53a2e321
Status: Started
Snapshot Count: 0
Number of Bricks: 2 x (2 + 1) = 6
Transport-type: tcp
Bricks:
Brick1: data1:/data3/gfs_data/vol01
Brick2: data1:/data4/gfs_data/vol01
Brick3: data2:/data3/gfs_data/vol01 (arbiter)
Brick4: data2:/data4/gfs_data/vol01
Brick5: data3:/data3/gfs_data/vol01
Brick6: data3:/data4/gfs_data/vol01 (arbiter)
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: off
[root@data1 /]# 
[root@data1 vol01]# gluster volume status vol01
Status of volume: vol01
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick data1:/data3/gfs_data/vol01           49152     0          Y       78025
Brick data1:/data4/gfs_data/vol01           49153     0          Y       78045
Brick data2:/data3/gfs_data/vol01           49152     0          Y       34425
Brick data2:/data4/gfs_data/vol01           49153     0          Y       34445
Brick data3:/data3/gfs_data/vol01           49152     0          Y       15601
Brick data3:/data4/gfs_data/vol01           49153     0          Y       15621
Self-heal Daemon on localhost               N/A       N/A        Y       78066
Self-heal Daemon on data3                   N/A       N/A        Y       15642
Self-heal Daemon on data2                   N/A       N/A        Y       34466
 
Task Status of Volume vol01
------------------------------------------------------------------------------
There are no active volume tasks

If the following error occurs:

volume create: vol01: failed: parent directory /data3/gfs_data is already part of a volume

Note that the volume has been mounted in the gfs_data path before, and the old configuration file needs to be deleted

[root@data1 gfs_data]# ls -a
.  ..  .glusterfs
[root@data1 gfs_data]# rm -rf .glusterfs/
[root@data1 gfs_data]# setfattr -x trusted.glusterfs.volume-id /data3/gfs_data/
[root@data1 gfs_data]# setfattr -x trusted.gfid /data3/gfs_data/  # 如果旧的卷创建失败,这里会找不到文件
setfattr: /data3/gfs_data/: No such attribute

Turn off time synchronization

gluster volume set vol01 ctime off

Guess you like

Origin blog.csdn.net/JosephThatwho/article/details/107152048