1. Four virtual machines are used in this experiment, three of which have been master-slave replication in advance.
192.168.9.8 mha mha-manager,mha-node
192.168.9.9 master mha-node,mysql
192.168.9.10 slave1 mha-node,mysql
192.168.9.11 slave2 mha-node,mysql
2. Arrangement of all nodes
[root@mha ~]# vim /etc/hosts
Add to
192.168.9.8 mha
192.168.9.9 master
192.168.9.10 slave1
192.168.9.11 slave2
[root@mha ~]# yum -y install perl-DBD-MySQL
[root@mha tools]# rpm -ivh mha4mysql-node-0.56-0.el6.noarch.rpm
3. Deploy on three mysql databases
The three configuration files are the same
vim /etc/my.cnf
[client]
port = 3306
socket = /tmp/mysql.sock
[mysqld]
server-id = 2
port = 3306
log-bin=/usr/local/mysql/data/bin-log
basedir = /usr/local/mysql
datadir = /usr/local/mysql/data
relay_log_purge=0
[root@slave3 ~]# mysql -uroot -p123123
mysql> grant all privileges on *.* to mha@'192.168.9.%' identified by 'mha';
mysql> select user,host from mysql.user;
4. Create software soft links for all nodes
[root@mha tools]# ln -s /usr/local/mysql/bin/mysqlbinlog /usr/bin/mysqlbinlog
[root@slave3 ~]# ln -s /usr/local/mysql/bin/mysql /usr/bin/mysql
5. Configure mha management node
[root@mha rools]# yum -y localinstall perl-*
[root@mha tools]# rpm -ivh mha4mysql-manager-0.56-0.el6.noarch.rpm
6.mha edit configuration file
[root@mha tools]# mkdir -p /etc/mha
[root@mha tools]# mkdir -p /var/log/mha/app1
[root@mha tools]# mkdir -p /var/data/binlog
[root@mha tools]# vim /etc/mha/app1.cnf
[server default]
manager_log=/var/log/mha/app1/manager
manager_workdir=/var/log/mha/app1
master_binlog_dir=/usr/local/mysql/data/
password=mha
ping_interval=2
repl_password=123123
repl_user=repl
ssh_user=root
user=mha
[server 1]
hostname=192.168.9.9
port=3306
[server2]
candidate_master=1
check_repl_delay=0
hostname=192.168.9.10
port=3306
[server3]
hostname=192.168.9.11
port=3306
7. Two slave database deployment database authorization
[root@slave3 ~]# mysql -uroot -p123123
mysql> grant replication slave on *.* to ‘repl’@'192.168.9.%' identified by '123123';
8. Deploy ssh trust on all nodes
[root@mha ~]# ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa >/dev/null 2>&1
[root@mha ~]# ssh-copy-id -i /root/.ssh/id_rsa.pub [email protected]
[root@mha ~]# ssh-copy-id -i /root/.ssh/id_rsa.pub [email protected]
[root@mha ~]# ssh-copy-id -i /root/.ssh/id_rsa.pub [email protected]
[root@mha ~]# ssh-copy-id -i /root/.ssh/id_rsa.pub [email protected]
[root@mha ~]# vim /etc/ssh/sshd_config
43 PubkeyAuthentication yes
44 RSAAuthentication yes
47 AuthorizedKeysFile .ssh/authorized_keys
[root@mha ~]# systemctl restart sshd
9. Start the test
[root@mha ~]# masterha_check_ssh --conf=/etc/mha/app1.cnf
[root@mha ~]# masterha_check_repl --conf=/etc/mha/app1.cnf
10. Start MHA
[root@mha ~]# nohup masterha_manager --conf=/etc/mha/app1.cnf --remove_dead_master_conf --ignore_last_failover < /dev/null > /var/log/mha/app1/manager.log 2>&1 &
[root@mha ~]# masterha_check_status --conf=/etc/mha/app1.cnf
11. Test the
main library is stopped
[root@master ~]# /etc/init.d/mysqld stop
Log in from the library 1
[root@slave1 ~]# mysql -uroot -p123123
mysql> show slave status\G;
Log in from the library 2
[root@slave2 ~]# mysql -uroot -p123123
mysql> show slave status\G;
(testing successfully)
After restoring the main library, reauthorize the main library as a slave library to point to the new main library.
The new main library authorizes 192.168.9.10
The restored former main library authorization and view 192.168.9.9