MFS高可用(pcs集群管理与fence自动跳电重启)

版权声明:皆为本人原创,复制必究 https://blog.csdn.net/m493096871/article/details/89042814

server1 master   server4  高可用

serever 2 3  chunk

真机  client

具体查看上片文章

server1

yum install moosefs-cgi-3.0.103-1.rhsystemd.x86_64.rpm moosefs-cgiserv-3.0.103-1.rhsystemd.x86_64.rpm moosefs-cli-3.0.103-1.rhsystemd.x86_64.rpm moosefs-master-3.0.103-1.rhsystemd.x86_64.rpm -y


server2 3 

 yum install -y moosefs-chunkserver-3.0.103-1.rhsystemd.x86_64.rpm

server4

yum install  moosefs-master-3.0.103-1.rhsystemd.x86_64

[root@server1 mfs]# du -sh /var/lib/mfs
3.6M    /var/lib/mfs

server1   4

yum install -y  pacemaker corosync  pcs

yum.repo

[HighAvailability]
name=HighAvailability
baseurl=http://172.25.11.250/rhel7.3/addons/HighAvailability
gpgcheck=0

[ResilientStorage]
name=HighAvailability
baseurl=http://172.25.11.250/rhel7.3/addons/ResilientStorage
gpgcheck=0

做好免密 1->4  

ssh-copy-id  server4  server1

systemctl isolate multiuser  关闭图形

systemcctl isolate  graphical  启动图形  相当于 init 3  4

systemctl  start   enalbe  pcsd

[root@server1 mfs]# systemctl start pcsd
[root@server1 mfs]# systemctl enable pcsd

passwd hacluster

server1上

pcs  cluster auth  server1  server4

[root@server1 mfs]# pcs  cluster auth  server1  server4
Username: hacluster
Password:
server4: Authorized
server1: Authorized

pcs cluster setup   --name mycluster server1  server4

pcs  cluster start   server1   server4

[root@server1 mfs]# pcs status  corosync

Membership information
----------------------
    Nodeid      Votes Name
         1          1 server1 (local)
         2          1 server4

journalctl  | grep -i error

pcs property set stonith-enabled=false

crm_verify -L -V       主备集群

pcs  cluster start  server1   server4

pcs  cluster start  --all

[root@server1 mfs]# pcs status
Cluster name: mycluster
Stack: corosync
Current DC: server1 (version 1.1.15-11.el7-e174ec8) - partition with quorum
Last updated: Fri Apr  5 14:16:28 2019        Last change: Fri Apr  5 14:15:31 2019 by root via cibadmin on server1

2 nodes and 0 resources configured

Online: [ server1 server4 ]

No resources
Daemon Status:
  corosync: active/disabled
  pacemaker: active/disabled
  pcsd: active/enabled

[root@server1 mfs]# corosync-cfgtool  -s
Printing ring status.
Local node ID 1
RING ID 0
    id    = 172.25.11.1
    status    = ring 0 active with no faults

[root@server4 3.0.103]# corosync-cfgtool  -s
Printing ring status.
Local node ID 2
RING ID 0
    id    = 172.25.11.4
    status    = ring 0 active with no faults

[root@server1 mfs]# cat /etc/corosync/corosync.conf
totem {
    version: 2
    secauth: off
    cluster_name: mycluster
    transport: udpu
}

nodelist {
    node {
        ring0_addr: server1
        nodeid: 1
    }

    node {
        ring0_addr: server4
        nodeid: 2
    }
}

client

[root@foundation11 mfs]# umount  /mnt/mfs/mfsmeta
[root@foundation11 mfs]# umount  /mnt/mfs

server1-4

systemctl stop  moosefs-chunkserver

systemctl stop  moosefs-master

修改hosts  为对应的虚拟IP   mfsmaster

172.25.11.100 mfsmaster

######

server2

yum install targetcli

systemctl start targetcli

tatgetcli

create  my_disk1  /dev/vdb

create  iqn.2019-04.com.example:server3

tpg1/luns  create

server1  

yum install iscsi-* -y

iscsiadm -m discovery -t st -p 172.25.11.3

systemctl restart iscsid

iscsiadm -m node -o delete

iscsiadm -m node -l

cat /etc/iscsi/initiatorname.iscsi

server1

mkfs.xfs  /dev/sdb 

mount /dev/sdb  /mnt

cd /var/lib/mfs/

cp -p * /mnt/

chown mfs.mfs  /mnt

systemctl start moosefs-master

再停

server4 

yum install iscsi-* -y

cat /etc/iscsi/initiatorname.iscsi

iscsiadm -m discovery -t st -p 172.25.11.3

iscsiadm -m node -l

mount  /dev/sdb  /var/lib/mfs

systemctl start moosefs-master      stop

server1

挂载iscsi设备

pcs resource  create vip ocf:heartbeat:IPaddr2 ip=172.25.11.100 cidr_netmask=32 op monitor interval=30s

pcs resource create mfsdata  ocf:heartbeat:Filesystem device=/dev/sdb directory=/var/lib/mfs fstype=xfs op monitor interval=30s

pcs resource  create mfsd  systemd:moosefs-master op monitor interval=1min

pcs resource group  add mfsgroup  vip mfsdata  mfsd

pcs cluster stop server1  开始迁移

server2  3

systemctl  start moosefs-chunkserver

client

mfsmount

cd /mnt/mfs 

crm_mon

Stack: corosync
Current DC: server1 (version 1.1.15-11.el7-e174ec8) - partition with quorum
Last updated: Fri Apr  5 16:55:45 2019          Last change: Fri Apr  5 16:55:42 2019 by root via cibadmin on serv
er1

2 nodes and 3 resources configured

Online: [ server1 server4 ]

Active resources:

 Resource Group: mfsgroup
     vip        (ocf::heartbeat:IPaddr2):    Started server1
     mfsd    (systemd:moosefs-master):    Started server1
mfsdata (ocf::heartbeat:Filesystem):    Started server4

通过fence 解决内核错误无法自动关机

client 真机

echo c > /proc/sysrq-trigger

server1 4

yum list fence_virtd

yum install fence-virtd  -y fence-virt

cd /etc/cluster/

dd  if=/dev/urandom  of=fence_xvm.key  bs=128 count=1

fence_virtd -c

添加网卡选择如果默认不是br0的时候不要全回车

systemctl start fence_virtd

server1  4

mkdir  /etc/cluster

scp   fence_xvm.key   server4:/etc/cluster 
 

pcs stonith create  vmfence  fence_xvm pcmk_host_map="server1:server1;server4:server4"  op monitor interval=1min

pcs property set stonith-enabled=true 

map是"主机名:域名"的映射

[root@foundation11 mfs]# virsh list
 Id    Name                           State
----------------------------------------------------
 17    server1                        running
 22    server3                        running
 27    server4                        running

crm_verify -L  -V  校验是否有错误

fence_xvm  -H server4   

会自动重启server4

systemctl status pcsd

pcs cluster start server4 

crm_mon

Stack: corosync
Current DC: server1 (version 1.1.15-11.el7-e174ec8) - partition with quorum
Last updated: Fri Apr  5 17:19:15 2019          Last change: Fri Apr  5 17:18:58 2019 by root via cibadmin on serv
er1

2 nodes and 4 resources configured

Online: [ server1 server4 ]

Active resources:

 Resource Group: mfsgroup
     vip        (ocf::heartbeat:IPaddr2):    Started server1
     mfsd    (systemd:moosefs-master):    Started server1
mfsdata (ocf::heartbeat:Filesystem):    Started server4
vmfence (stonith:fence_xvm):    Started server4

测试的时候

server4

echo c > /proc/sysrq-trigger

会自动重起

 cat /etc/fence_virt.conf

fence_virtd {
    listener = "multicast";
    backend = "libvirt";
    module_path = "/usr/lib64/fence-virt";
}

listeners {
    multicast {
        key_file = "/etc/cluster/fence_xvm.key";
        address = "225.0.0.12";
        interface = "br0";
        family = "ipv4";
        port = "1229";
    }

}

backends {
    libvirt {
        uri = "qemu:///system";
    }

}

猜你喜欢

转载自blog.csdn.net/m493096871/article/details/89042814