CentOS8 instala GlusterFS

1. Un total de tres máquinas son datos1 ~ 3

  1. Establecer información de nodo
[root@data1 /]# export NODE_NAMES=(data1 data2 data3)
[root@data1 /]# export NODE_IPS=(192.168.31.7 192.168.31.8 192.168.31.9)

2. Instale la fuente de gluster

[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
>  ssh root@${NODE_IP} "yum install centos-release-gluster -y"
>done

3. Instale los componentes de glusterfs

[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
>  ssh root@${NODE_IP} "yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma glusterfs-geo-replication glusterfs-devel"
>done

Posibles problemas:

Error: 
 Problem 1: cannot install the best candidate for the job
  - nothing provides python3-pyxattr needed by glusterfs-server-7.6-1.el8.x86_64
 Problem 2: package glusterfs-geo-replication-7.6-1.el8.x86_64 requires glusterfs-server = 7.6-1.el8, but none of the providers can be installed
  - cannot install the best candidate for the job
  - nothing provides python3-pyxattr needed by glusterfs-server-7.6-1.el8.x86_64
(try to add '--skip-broken' to skip uninstallable packages or '--nobest' to use not only best candidate packages)

Solución:

  • Resolver la dependencia de pyxattr
[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
>  ssh root@${NODE_IP} "dnf --enablerepo=PowerTools install python3-pyxattr"
>done
  • Agregar --nobest
[root@data1 ~]# for NODE_IP in ${NODE_IPS[@]}
>do
>  ssh root@${NODE_IP} "yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma glusterfs-geo-replication glusterfs-devel --nobest"
>done

4. Modifique el directorio de trabajo de gluster y configure el alojamiento systemctl

[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
>  mkdir /opt/glusterfs
>  ssh root@${NODE_IP} "sed -i 's/var\/lib/opt/g' /etc/glusterfs/glusterd.vol"
>  ssh root@${NODE_IP} "systemctl start glusterd.service"
>  ssh root@${NODE_IP} "systemctl enable glusterd.service"
>  ssh root@${NODE_IP} "systemctl status glusterd.service | grep active"
>done
192.168.31.7
   Active: active (running) since Mon 2020-07-06 03:15:13 UTC; 2h 35min ago
192.168.31.8
   Active: active (running) since Mon 2020-07-06 05:50:51 UTC; 583ms ago
192.168.31.9
   Active: active (running) since Mon 2020-07-06 05:50:54 UTC; 598ms ago

5. Abra el puerto del gluster

[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}
>do
>  ssh root@${NODE_IP} "iptables -I INPUT -p tcp --dport 24007 -j ACCEPT"
>done

6. Agregar nodo

[root@data1 /]# gluster peer probe data2
peer probe: success. 
[root@data1 /]# gluster peer probe data3
peer probe: success. 
[root@data1 opt]# gluster peer status  # 查看节点状态
Number of Peers: 2

Hostname: data2
Uuid: d38c773f-01d3-4dcf-87af-b09c62717849
State: Peer in Cluster (Connected)

Hostname: data3
Uuid: 472fa46f-837f-4d5c-a68f-d8cd35c822ea
State: Peer in Cluster (Connected)

Si ocurre el siguiente error

[root@data1 /]# gluster peer probe data2
peer probe: failed: Probe returned with Transport endpoint is not connected

Puede deberse a que el servicio de gluster en el nodo conectado no se inicia o el puerto no está abierto

7. Cree un volumen con tres copias distribuidas en 6 directorios

[root@data1 /]# for NODE_IP in ${NODE_IPS[@]}; 
>do  
>  echo ${NODE_IP}
>  ssh root@${NODE_IP} "mkdir -p /data3/gfs_data/vol01"
>  ssh root@${NODE_IP} "mkdir -p /data4/gfs_data/vol01" 
>done
192.168.31.7
192.168.31.8
192.168.31.9
[root@data1 /]# 
[root@data1 /]# gluster volume create vol01 replica 3 arbiter 1 transport tcp \
>data1:/data3/gfs_data/vol01 \
>data1:/data4/gfs_data/vol01 \
>data2:/data3/gfs_data/vol01 \
>data2:/data4/gfs_data/vol01 \
>data3:/data3/gfs_data/vol01 \
>data3:/data4/gfs_data/vol01 force
volume create: vol01: success: please start the volume to access data
[root@data1 /]# 
[root@data1 /]# gluster volume start vol01
volume start: vol01: success
[root@data1 /]# 
[root@data1 opt]# gluster volume info
 
Volume Name: vol01
Type: Distributed-Replicate
Volume ID: c34302b9-bf08-467c-8118-019a53a2e321
Status: Started
Snapshot Count: 0
Number of Bricks: 2 x (2 + 1) = 6
Transport-type: tcp
Bricks:
Brick1: data1:/data3/gfs_data/vol01
Brick2: data1:/data4/gfs_data/vol01
Brick3: data2:/data3/gfs_data/vol01 (arbiter)
Brick4: data2:/data4/gfs_data/vol01
Brick5: data3:/data3/gfs_data/vol01
Brick6: data3:/data4/gfs_data/vol01 (arbiter)
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: off
[root@data1 /]# 
[root@data1 vol01]# gluster volume status vol01
Status of volume: vol01
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick data1:/data3/gfs_data/vol01           49152     0          Y       78025
Brick data1:/data4/gfs_data/vol01           49153     0          Y       78045
Brick data2:/data3/gfs_data/vol01           49152     0          Y       34425
Brick data2:/data4/gfs_data/vol01           49153     0          Y       34445
Brick data3:/data3/gfs_data/vol01           49152     0          Y       15601
Brick data3:/data4/gfs_data/vol01           49153     0          Y       15621
Self-heal Daemon on localhost               N/A       N/A        Y       78066
Self-heal Daemon on data3                   N/A       N/A        Y       15642
Self-heal Daemon on data2                   N/A       N/A        Y       34466
 
Task Status of Volume vol01
------------------------------------------------------------------------------
There are no active volume tasks

Si ocurre el siguiente error:

volume create: vol01: failed: parent directory /data3/gfs_data is already part of a volume

Explique que el volumen se ha montado en la ruta gfs_data anteriormente y que el archivo de configuración anterior debe eliminarse

[root@data1 gfs_data]# ls -a
.  ..  .glusterfs
[root@data1 gfs_data]# rm -rf .glusterfs/
[root@data1 gfs_data]# setfattr -x trusted.glusterfs.volume-id /data3/gfs_data/
[root@data1 gfs_data]# setfattr -x trusted.gfid /data3/gfs_data/  # 如果旧的卷创建失败,这里会找不到文件
setfattr: /data3/gfs_data/: No such attribute

Desactivar la sincronización horaria

gluster volume set vol01 ctime off

Supongo que te gusta

Origin blog.csdn.net/JosephThatwho/article/details/107152048
Recomendado
Clasificación