RHCS+nginx实现高可用负载均衡及fence机制的添加

1.高可用性:应用/服务故障切换-通过创建n个节点的服务器集群来实现关键应用和服务的故障切换
2.负载均衡:IP 负载均衡-对一群服务器上收到的 IP 网络请求进行负载均衡
这里写图片描述

实验环境

iptables和selinux关闭
fence:物理机 172.25.69.250
主节点:server1 172.25.69.1
副节点:server2 172.25.69.2

rhcs基本环境搭建

server1:
[root@server1 ~]# vim /etc/hosts
[root@server1 ~]# scp -r /usr/local/nginx/ server2:/usr/local/
[root@server1 ~]# scp /etc/yum.repos.d/rhel-source.repo server2:/etc/yum.repos.d/   //配置yum源
[root@server1 ~]# cat /etc/yum.repos.d/rhel-source.repo 
[rhel-source]
name=Red Hat Enterprise Linux $releasever - $basearch - Source
baseurl=http://172.25.69.250/rhel6.5
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
[HighAvailability]
name=HighAvailability
baseurl=http://172.25.69.250/rhel6.5/HighAvailability
gpgcheck=0
[LoadBalancer]
name=LoadBalancer
baseurl=http://172.25.69.250/rhel6.5/LoadBalancer
gpgcheck=0
[ResilientStorage]
name=ResilientStorage
baseurl=http://172.25.69.250/rhel6.5/ResilientStorage
gpgcheck=0
[ScalableFileSystem]
name=ScalableFileSystem
baseurl=http://172.25.69.250/rhel6.5/ScalableFileSystem
gpgcheck=0
[root@server1 ~]# yum install -y luci ricci
[root@server1 ~]# chkconfig luci on
[root@server1 ~]# chkconfig ricci on
[root@server1 ~]# passwd ricci
[root@server1 ~]# /etc/init.d/ricci start
[root@server1 ~]# /etc/init.d/luci start
[root@server1 ~]# netstat -antlp | grep ricci
tcp        0      0 :::11111                    :::*                        LISTEN      1322/ricci          
[root@server1 ~]# nginx
[root@server1 ~]# netstat -antlp | grep nginx
tcp        0      0 0.0.0.0:80                  0.0.0.0:*                   LISTEN      1328/nginx    

server2:
[root@server2 ~]# ln -s /usr/local/nginx/sbin/nginx /usr/local/sbin/
[root@server2 ~]# useradd -u 500 nginx
[root@server2 ~]# nginx
[root@server2 ~]# netstat -antlp | grep nginx
tcp        0      0 0.0.0.0:80                  0.0.0.0:*                   LISTEN      1091/nginx          
[root@server2 ~]# yum install -y ricci
[root@server2 ~]# /etc/init.d/ricci start
[root@server2 ~]# netstat -antlp | grep ricci
tcp        0      0 :::11111                    :::*                        LISTEN      1291/ricci          
[root@server2 ~]# nginx
[root@server2 ~]# chkconfig ricci on
[root@server2 ~]# passwd ricci

物理机:
[root@foundation54 ~]# vim /etc/hosts //添加本地解析
172.25.69.1 server1
172.25.69.2 server2
浏览器:

浏览器访问:https://server1:8084,luci自动开启8084端口接受集群节点的11111端口的数据包
点击Advanced进入
这里写图片描述
点击Confirm…
这里写图片描述
使用server1的root密码登陆
这里写图片描述
点击Create创建集群,Add添加已经创建好的集群,集群名称不要用数字,不超过15个字符,其中的密码是ricci用户的密码
这里写图片描述
这里写图片描述
注意:
1)Use the Same Password for All Nodes的前面如果有对号,则表示集群节点的密码相同,集群节点的密码为刚才为 ricci 的设定的密码
2) Download Packages:表示自动下载需要的安装包
3)Use Locally Installed Packages:表示根据本地已下载的安装包来下载
4)Reboot Nodes Before Joining Cluster:创建集群后会重启动集群节点,所以之前要将集群管理工具(luci和ricci设置为开机自启动)
5)Enable Shared Storage Support:开启集群共享存储功能

这里写图片描述
这里写图片描述
rgmanager资源代理,启动虚拟ip,启动脚本
Modclusterd状态监控
clvmd存储,集群逻辑化卷
ccs
fence处理网络和节点问题,存储的安全性,保证存储不出现“脑裂”的情况
注意:过程中不可打开NetworkManager
当节点不能正常加载的时候开启cman、rgmanager、ricci、modclusterd、clvmd服务

物理机
[root@foundation69 ~]# yum install fence-virtd-multicast fence-virtd fence-virtd-libvirt -y
[root@foundation69 ~]# fence_virtd -c  //编写新的fence 信息
Module search path [/usr/lib64/fence-virt]: /usr/lib64/fence-virt //模块默认位置
Listener module [multicast]: multicast
Multicast IP Address [225.0.0.12]: 225.0.0.12
Using ipv4 as family.   //默认的
Multicast IP Port [1229]: 1229
Interface [virbr0]: br0
//这里br0是因为虚拟服务器受主机控制的网卡是br0
Key File [/etc/cluster/fence_xvm.key]: /etc/cluster/fence_xvm.key
Backend module [libvirt]: libvirt
[root@foundation69 ~]# cat /etc/fence_virt.conf  //此时查看配置文件
backends {
    libvirt {
        uri = "qemu:///system";
    }

}

listeners {
    multicast {
        port = "1229";
        family = "ipv4";
        interface = "br0";
        address = "225.0.0.12";
        key_file = "/etc/cluster/fence_xvm.key";
    }

}

fence_virtd {
    module_path = "/usr/lib64/fence-virt";
    backend = "libvirt";
    listener = "multicast";
}

[root@foundation69 ~]# mkdir /etc/cluster
[root@foundation69 ~]# dd if=/dev/urandom of=/etc/cluster/fence_xvm.key bs=128 count=1
[root@foundation69 ~]# file /etc/cluster/fence_xvm.key //查看文件类型
/etc/cluster/fence_xvm.key: data
[root@foundation69 ~]# systemctl restart fence_virtd
[root@foundation69 ~]# systemctl enable fence_virtd
[root@foundation69 ~]# netstat -anulp | grep fence_virtd
udp        0      0 0.0.0.0:1229            0.0.0.0:*                           8657/fence_virtd    
[root@foundation69 Desktop]# scp /etc/cluster/fence_xvm.key server1:/etc/cluster/
[root@foundation69 Desktop]# scp /etc/cluster/fence_xvm.key server1:/etc/cluster/

这里写图片描述
创建成功后:
这里写图片描述

[root@server1 ~]# cat /etc/cluster/cluster.conf //查看配置信息
<?xml version="1.0"?>
<cluster config_version="2" name="ying">
    <clusternodes>
        <clusternode name="server1" nodeid="1"/>
        <clusternode name="server2" nodeid="2"/>
    </clusternodes>
    <cman expected_votes="1" two_node="1"/>
    <fencedevices>
        <fencedevice agent="fence_xvm" name="vmfence"/>
    </fencedevices>
</cluster>

点击server1,指定主fence
这里写图片描述这里写图片描述
这里写图片描述
这里写图片描述
查看UUID:
这里写图片描述
这里写图片描述
配置完如下图:
这里写图片描述
server2配置同server1

[root@server1 ~]# cat /etc/cluster/cluster.conf //查看配置信息
<?xml version="1.0"?>
<cluster config_version="6" name="ying">
    <clusternodes>
        <clusternode name="server1" nodeid="1">
            <fence>
                <method name="fence1">
                    <device domain="1db9e489-f33a-425e-949e-a5d53b7bd0b5" name="vmfence"/>
                </method>
            </fence>
        </clusternode>
        <clusternode name="server2" nodeid="2">
            <fence>
                <method name="fence2">
                    <device domain="1db9e489-f33a-425e-949e-a5d53b7bd0b5" name="vmfence"/>
                </method>
            </fence>
        </clusternode>
    </clusternodes>
    <cman expected_votes="1" two_node="1"/>
    <fencedevices>
        <fencedevice agent="fence_xvm" name="vmfence"/>
    </fencedevices>
</cluster>

这里写图片描述
这里写图片描述
数字1,2:表示优先级,数字越小优先级越高
No Failback:表示失败的,回来时不会切换
添加共享服务:
VIP:ip保证和其他不重复,默认10秒钟检测ip是否坏的
Montor Link 监控链接
这里写图片描述
Nginx:
这里写图片描述
server1、server2均制作脚本 /etc/init.d/nginx:

[root@server1 ~]# vim /etc/init.d/nginx
#!/bin/bash
# nginx Startup script for the Nginx HTTP Server
# it is v.0.0.2 version.
# chkconfig: - 85 15
# description: Nginx is a high-performance web and proxy server.
#              It has a lot of features, but it's not for everyone.
# processname: nginx
# pidfile: /var/run/nginx.pid
# config: /usr/local/nginx/conf/nginx.conf
nginxd=/usr/local/nginx/sbin/nginx
nginx_config=/usr/local/nginx/conf/nginx.conf
nginx_pid=/var/run/nginx.pid
RETVAL=0
prog="nginx"
# Source function library.
. /etc/rc.d/init.d/functions
# Source networking configuration.
. /etc/sysconfig/network
# Check that networking is up.
[ ${NETWORKING} = "no" ] && exit 0
[ -x $nginxd ] || exit 0
# Start nginx daemons functions.
start() {
if [ -e $nginx_pid ];then
   echo "nginx already running...."
   exit 1
fi
   echo -n $"Starting $prog: "
   daemon $nginxd -c ${nginx_config}
   RETVAL=$?
   echo
   [ $RETVAL = 0 ] && touch /var/lock/subsys/nginx
   return $RETVAL
}
# Stop nginx daemons functions.
stop() {
        echo -n $"Stopping $prog: "
        killproc $nginxd
        RETVAL=$?
        echo
        [ $RETVAL = 0 ] && rm -f /var/lock/subsys/nginx /var/run/nginx.pid
}
# reload nginx service functions.
reload() {
    echo -n $"Reloading $prog: "
    #kill -HUP `cat ${nginx_pid}`
    killproc $nginxd -HUP
    RETVAL=$?
    echo
}
# See how we were called.
case "$1" in
start)
        start
        ;;
stop)
        stop
        ;;
reload)
        reload
        ;;
restart)
        stop
        start
        ;;
status)
        status $prog
        RETVAL=$?
        ;;

*)
        echo $"Usage: $prog {start|stop|restart|reload|status|help}"
        exit 1
esac
exit $RETVAL
[root@server1 ~]# chmod a+x /etc/init.d/nginx
[root@server1 ~]# /etc/init.d/nginx status
nginx is stopped
[root@server1 ~]# /etc/init.d/nginx start
Starting nginx:                                            [  OK  ]
[root@server1 ~]# /etc/init.d/nginx reload
Reloading nginx:                                           [  OK  ]
[root@server1 ~]# /etc/init.d/nginx stop

server2同server1
[root@server2 ~]# /etc/init.d/nginx status
nginx is stopped

添加资源
这里写图片描述
这里写图片描述
这里写图片描述
这里写图片描述
Run Exclusive:运行独占,只能在server1和server2上运行
注意下面添加的资源顺序就是集群启动后服务启动的顺序,所以要先添加vip,再添加nginx,添加时选择Add Resource
这里写图片描述
这里写图片描述
添加完之后就会显示服务运行中……
这里写图片描述

高可用

server1:
[root@server1 ~]# cat /usr/local/nginx/html/ying.html  //添加nginx发布页面内容
<h1>www.ying.org-server1<h1>

server2:
[root@server2 ~]# cat /usr/local/nginx/html/ying.html
<h1>www.ying.org-server2<h1>

这里写图片描述

[root@server1 ~]# clustat
Cluster Status for ying @ Wed Aug  1 22:23:00 2018
Member Status: Quorate

 Member Name                            ID   Status
 ------ ----                            ---- ------
 server1                                    1 Online, Local, rgmanager
 server2                                    2 Online, rgmanager

 Service Name                  Owner (Last)                  State         
 ------- ----                  ----- ------                  -----         
 service:nginx                 server1                       started    
如果关闭server1的服务浏览器就会看到server2的发布内容
[root@server1 ~]# /etc/init.d/nginx stop

这里写图片描述
这里写图片描述

server1:
[root@server1 ~]# clustat
Cluster Status for ying @ Wed Aug  1 23:26:44 2018
Member Status: Quorate

 Member Name                            ID   Status
 ------ ----                            ---- ------
 server1                                    1 Online, Local, rgmanager
 server2                                    2 Online, rgmanager

 Service Name                  Owner (Last)                  State         
 ------- ----                  ----- ------                  -----         
 service:nginx                 server2                       started 

server2:
[root@server2 ~]# clustat
Cluster Status for ying @ Thu Aug  2 04:16:35 2018
Member Status: Quorate

 Member Name                             ID   Status
 ------ ----                             ---- ------
 server1                                     1 Online, rgmanager
 server2                                     2 Online, Local, rgmanager

 Service Name                   Owner (Last)                   State         
 ------- ----                   ----- ------                   -----         
 service:nginx                  server1                        started       
[root@server2 ~]# clusvcadm -d nginx
Local machine disabling service:nginx...Success
[root@server2 ~]# clusvcadm -e nginx
Local machine trying to enable service:nginx...Success
service:nginx is now running on server1
[root@server2 ~]# clusvcadm -r nginx -m server2
Trying to relocate service:nginx to server4...Success
service:nginx is now running on server2
[root@server2 ~]# clustat
Cluster Status for ying @ Thu Aug  2 04:19:39 2018
Member Status: Quorate

 Member Name                             ID   Status
 ------ ----                             ---- ------
 server1                                     1 Online, rgmanager
 server2                                     2 Online, Local, rgmanager

 Service Name                   Owner (Last)                   State         
 ------- ----                   ----- ------                   -----         
 service:nginx                  server2                        started       

负载均衡

server3:
[root@server3 ~]# /etc/init.d/httpd start
[root@server3 ~]# cat /var/www/html/index
www.westos.org-server3

server4:
[root@server4 ~]# /etc/init.d/httpd start
[root@server4 ~]# cat /var/www/html/index
www.westos.org

浏览器测试:
这里写图片描述这里写图片描述

fence测试:
[root@server1 ~]# clustat
Cluster Status for ying @ Thu Aug  2 06:16:00 2018
Member Status: Quorate

 Member Name                           ID   Status
 ------ ----                           ---- ------
 Server1                                  2 Online, Local, rgmanager
 Server2                                  1 Online, rgmanager


 Service Name                 Owner (Last)                 State         
 ------- ----                 ----- ------                 -----         
 service:nginx                server2                      started       
[root@server1 ~]# fence_node server2  //server2会自动重启,服务会变成server1
fence server2 success
[root@server1 ~]# clustat
Cluster Status for ying @ Thu Aug  2 06:46:53 2018
Member Status: Quorate

 Member Name                           ID   Status
 ------ ----                           ---- ------
 Server1                                  2 Online, Local, rgmanager
 Server2                                  1 Online, rgmanager


 Service Name                 Owner (Last)                 State         
 ------- ----                 ----- ------                 -----         
 service:nginx                server1                     started   

这里写图片描述

扫描二维码关注公众号,回复: 5809571 查看本文章
[root@server2~]# clustat
Cluster Status for ying @ Thu Aug  2 06:34:09 2018
Member Status: Quorate

 Member Name                           ID   Status
 ------ ----                           ---- ------
 server1                                   1 Online, rgmanager
 Server2                                   2 Online, Local, rgmanager

 Service Name                 Owner (Last)                 State         
 ------- ----                 ----- ------                 -----         
 service:nginx                server2                      started  
[root@server2~]# echo c > /proc/sysrq-trigger //破坏内核;服务切换到server1

rhcs实现VIP+存储

server3:
[root@server3 ~]# yum install -y scsi-*
[root@server3 ~]# vim /etc/tgt/targets.conf
 38 <target iqn.2018-08.com.example:server.target1>
 39     backing-store /dev/vdb
 40     initiator-address 172.25.69.1
 41     initiator-address 172.25.69.2
 42 </target>
[root@server3 ~]# /etc/init.d/tgtd start
[root@server3 ~]# chkconfig tgtd on  //一定要设置开机自启,物理机的fence服务也要设置开机自启

server1:
[root@server1 ~]# yum install iscsi-*
[root@server1 ~]# iscsiadm -m discovery -t st -p 172.25.69.3
[root@server1 ~]# iscsiadm -m node -l
[root@server1 ~]# fdisk -l
Disk /dev/sda: 21.5 GB, 21474836480 bytes
[root@server1 ~]# /etc/init.d/clvmd status
[root@server1 ~]# lvs
  LV      VG       Attr       LSize   Pool Origin Data%  Move Log Cpy%Sync Convert
  lv_root VolGroup -wi-ao----  18.54g                                             
  lv_swap VolGroup -wi-ao---- 992.00m                                             
[root@server1 ~]# pvcreate /dev/sda
[root@server1 ~]# vgcreate clustervg /dev/sda
[root@server1 ~]# lvcreate -L 4G -n demo clustervg
[root@server1 ~]# mkfs.ext4 /dev/clustervg/demo

server2:
[root@server2 ~]# yum install iscsi-* -y
[root@server2 ~]# iscsiadm -m discovery -t st -p 172.25.69.3
[root@server2 ~]# iscsiadm -m node -l
[root@server2 ~]# fdisk -l
Disk /dev/sda: 21.5 GB, 21474836480 bytes
[root@server2 ~]# pvs
  PV         VG        Fmt  Attr PSize  PFree 
  /dev/sda   clustervg lvm2 a--  20.00g 20.00g
  /dev/vda2  VolGroup  lvm2 a--  19.51g     0 
[root@server2 ~]# vgs
  VG        #PV #LV #SN Attr   VSize  VFree 
  VolGroup    1   2   0 wz--n- 19.51g     0 
  clustervg   1   1   0 wz--nc 20.00g 16.00g
[root@server2 ~]# lvs
  LV      VG        Attr       LSize   Pool Origin Data%  Move Log Cpy%Sync Convert
  lv_root VolGroup  -wi-ao----  18.54g                                             
  lv_swap VolGroup  -wi-ao---- 992.00m                                             
  demo    clustervg -wi-a-----   4.00g                                             

server1:
[root@server1 ~]# clusvcadm -d nginx

进入浏览器删掉:
这里写图片描述

server1:
[root@server1 ~]# yum install mysql mysql-server-y
[root@server1 ~]# cd /var/lib/mysql
[root@server1 mysql]# ls
[root@server1 mysql]# mount /dev/clustervg/demo /var/lib/mysql/
[root@server1 mysql]# df
/dev/mapper/clustervg-demo     4128448  139256   3779480   4% /var/lib/mysql
[root@server1 mysql]# ll -d /var/lib/mysql/
drwxr-xr-x 3 root root 4096 Aug  2 07:41 /var/lib/mysql/
[root@server1 mysql]# chown mysql.mysql /var/lib/mysql/
[root@server1 mysql]# cd
[root@server1 ~]# /etc/init.d/mysqld start
[root@server1 ~]# cd -
[root@server1 mysql]# ls
ibdata1  ib_logfile0  ib_logfile1  lost+found  mysql  mysql.sock  test
[root@server1 mysql]# /etc/init.d/mysqld stop
[root@server1 mysql]# cd
[root@server1 ~]# umount /var/lib/mysql

server2:
[root@server2 ~]# yum install mysql-server -y
[root@server2 ~]# mount /dev/clustervg/demo /var/lib/mysql/
[root@server2 ~]# df
/dev/mapper/clustervg-demo     4128448  160724   3758012   5% /var/lib/mysql
[root@server2 ~]# cd /var/lib/mysql
[root@server2 mysql]# ls
ibdata1  ib_logfile0  ib_logfile1  lost+found  mysql  test
[root@server2 mysql]# /etc/init.d/mysqld start
[root@server2 mysql]# ls
ibdata1  ib_logfile0  ib_logfile1  lost+found  mysql  mysql.sock  test
[root@server2 mysql]# cd
[root@server2 ~]# /etc/init.d/mysqld stop

这里写图片描述
这里写图片描述
这里写图片描述
这里写图片描述
这里写图片描述
这里写图片描述
这里写图片描述

[root@server1 ~]# df
/dev/mapper/clustervg-demo     4128448  160724   3758012   5% /var/lib/mysql
[root@server1 ~]# clustat  //查看在使用server1
Cluster Status for ying @ Thu Aug  2 09:03:20 2018
Member Status: Quorate

 Member Name                            ID   Status
 ------ ----                            ---- ------
 server1                                    1 Online, Local, rgmanager
 server2                                    2 Online, rgmanager

 Service Name                  Owner (Last)                  State         
 ------- ----                  ----- ------                  -----         
 service:mysql                 server1                       started     
[root@server1 ~]# mysql  //server1可以打开数据库
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 2
Server version: 5.1.71 Source distribution

Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> show databases;
+---------------------+
| Database            |
+---------------------+
| information_schema  |
| #mysql50#lost+found |
| mysql               |
| test                |
+---------------------+
4 rows in set (0.00 sec)

mysql> exit
Bye

rhcs实现同步

server1:
[root@server1 ~]# clusvcadm -d mysql  //禁用

在浏览器:
//移除Service Groups里面的Filesystem
//删掉Resources里面的Filesystem
//用命令方式添加gfs2文件系统

server1:
[root@server1 ~]# mount /dev/clustervg/demo /var/lib/mysql/
[root@server1 ~]# df
/dev/mapper/clustervg-demo     4128448  160724   3758012   5% /var/lib/mysql
[root@server1 ~]# vgs
  VG        #PV #LV #SN Attr   VSize  VFree 
  VolGroup    1   2   0 wz--n- 19.51g     0 
  clustervg   1   1   0 wz--nc 20.00g 16.00g
[root@server1 ~]# lvextend -L +4G /dev/clustervg/demo  //扩展
[root@server1 ~]# resize2fs /dev/clustervg/demo
[root@server1 ~]# df
/dev/mapper/clustervg-demo     8256952  161744   7675832   3% /var/lib/mysql
[root@server1 ~]# df -h
/dev/mapper/clustervg-demo    7.9G  158M  7.4G   3% /var/lib/mysql
[root@server1 ~]# cd /var/lib/mysql
[root@server1 mysql]# ls
ibdata1  ib_logfile0  ib_logfile1  lost+found  mysql  test

##测试数据是否同步
[root@server2 ~]# mount /dev/clustervg/demo /var/lib/mysql  //先挂载在server2上
[root@server2 ~]# cd /var/lib/mysql


[root@server1 mysql]# cp /etc/passwd .  //在server1上面复制一份新的文件
[root@server1 mysql]# ls
ibdata1  ib_logfile0  ib_logfile1  lost+found  mysql  passwd  test


[root@server2 mysql]# ls  //server2上面没有
ibdata1  ib_logfile0  ib_logfile1  lost+found  mysql  test
[root@server2 ~]# umount /var/lib/mysql  //解挂之后重新挂载
[root@server2 ~]# mount /dev/clustervg/demo /var/lib/mysql
[root@server2 ~]# cd /var/lib/mysql
[root@server2 mysql]# ls  //可以查看到新复制的文件,表明数据不能同步
ibdata1  ib_logfile0  ib_logfile1  lost+found  mysql  passwd  test
[root@server2 mysql]# cd
[root@server2 ~]# umount /var/lib/mysql

##解决数据同步
server1:
[root@server1 ~]# umount /var/lib/mysql
[root@server1 ~]# lvremove /dev/clustervg/demo
[root@server1 ~]# lvcreate -L 4G -n demo clustervg
[root@server1 ~]# mkfs.gfs2 -h //查看规则
[root@server1 ~]# mkfs.gfs2 -j 3 -p lock_dlm -t ying:mygfs2 /dev/clustervg/demo    //-t 名称:标签;格式化
[root@server1 ~]# mount /dev/clustervg/demo /var/lib/mysql
[root@server1 ~]# cd /var/lib/mysql
[root@server1 mysql]# ls
[root@server1 mysql]# chown mysql.mysql .
[root@server1 mysql]# ll
total 0
[root@server1 mysql]# ll -d .
drwxr-xr-x 2 mysql mysql 3864 Aug  2 09:21 .
[root@server1 mysql]# /etc/init.d/mysqld start
[root@server1 mysql]# ls
ibdata1  ib_logfile0  ib_logfile1  mysql  mysql.sock  test
[root@server1 mysql]# /etc/init.d/mysqld stop
[root@server1 mysql]# cd
[root@server1 ~]# umount /var/lib/mysql
[root@server1 mysql]# vim /etc/fstab
/dev/clustervg/demo     /var/lib/mysql          gfs2    _netdev         0 0
[root@server1 ~]# mount -a
[root@server1 ~]# df
/dev/mapper/clustervg-demo     4193856  418884   3774972  10% /var/lib/mysql


server2:
[root@server2 mysql]# vim /etc/fstab
/dev/clustervg/demo     /var/lib/mysql          gfs2    _netdev         0 0
[root@server2 ~]# mount -a


server1:
[root@server1 ~]# clusvcadm -e mysql  //激活
[root@server1 ~]# clustat
Cluster Status for ying @ Thu Aug  2 09:29:28 2018
Member Status: Quorate

 Member Name                            ID   Status
 ------ ----                            ---- ------
 server1                                    1 Online, Local, rgmanager
 Server2                                    2 Online, rgmanager

 Service Name                  Owner (Last)                  State         
 ------- ----                  ----- ------                  -----         
 service:mysql                 server1                       started       
[root@server1 ~]# clusvcadm -r mysql -m server2  //切换到server2
[root@server1 ~]# clustat
Cluster Status for ying @ Thu Aug  2 09:51:47 2018
Member Status: Quorate

 Member Name                           ID   Status
 ------ ----                           ---- ------
 server1                                   1 Online, Local, rgmanager
 Server2                                   2 Online, rgmanager

 Service Name                 Owner (Last)                 State         
 ------- ----                 ----- ------                 -----         
 service:mysql                server2                      started   
[root@server1 mysql]# touch haha  //建立一个新的文件haha测试数据是否同步
[root@server1 mysql]# ls
haha  ibdata1  ib_logfile0  ib_logfile1  mysql  mysql.sock  test



[root@server2 ~]# cd /var/lib/mysql
[root@server2 mysql]# ls  //查看到有文件haha,表示可以数据同步
haha  ibdata1  ib_logfile0  ib_logfile1  mysql  mysql.sock  test

猜你喜欢

转载自blog.csdn.net/Ying_smile/article/details/81974114
今日推荐