版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/sr_1114/article/details/80200383
Keepalived实现高可用和负载均衡
keepalived是一个类似于layer3, 4 & 7交换机制的软件,也就是我们平时说的第3层、第4层和第7层交换。Keepalived是自动完成,不需人工干涉。Keepalived的作用是检测服务器的状态,如果有一台web服务器宕机,或工作出现故障,Keepalived将检测到,并将有故障的服务器从系统中剔除,同时使用其他服务器代替该服务器的工作,当服务器工作正常后Keepalived自动将服务器加入到服务器群中,这些工作全部自动完成,不需要人工干涉,需要人工做的只是修复故障的服务器。
本篇文章主要讲述了用keepalived-1.4.3实现高可用和负载均衡。
操作系统:Linux redhat6.5
高可用结点:server1(master) server4(backup)
负载均衡节点:server2 server3
源码包:keepalived-1.4.3.tar.gz
1.keepalived源码安装,从网络上下载源码包。
[root@server1 ~]# cd keepalived-1.4.3
[root@server1 keepalived-1.4.3]# ./configure --prefix=/usr/local/keepalived --with-init=SYSV
Use IPVS Framework : Yes
Use VRRP Framework : Yes
[oot@server1 keepalived-1.4.3]# make
[oot@server1 keepalived-1.4.3]# make install
[root@server1 ~]# tar zxf keepalived-1.4.3.tar.gz
[root@server1 ~]yum install openssl-devel libnl3-devel ipset-devel iptables-devel libnfnetlink-devel gcc
No package libnl3-devel available. 可以不要
No package ipset-devel available. 可以不要
No package libnfnetlink-devel available.
[root@server1 ~]# yum install libnfnetlink-devel-1.0.0-1.el6.x86_64.rpm -y
[root@server1 keepalived]# ln -s /usr/local/keepalived/etc/rc.d/init.d/keepalived /etc/init.d/
[root@server1 keepalived]# ln -s /usr/local/keepalived/etc/sysconfig/keepalived /etc/sysconfig/
[root@server1 keepalived]# ln -s /usr/local/keepalived/etc/keepalived/ /etc/
[root@server1 keepalived]# ln -s /usr/local/keepalived/sbin/keepalived /bin/
[root@server1 keepalived]# chmod +x /usr/local/keepalived/etc/rc.d/init.d/keepalived
[root@server1 keepalived]# /etc/init.d/keepalived status
keepalived is stopped
#slave安装keepalived可以和源码相同,也可以使用以下方式
[root@server1 keepalived]# cd /usr/local/
[root@server1 local]# scp -r keepalived/ server4:/usr/local/
[root@server4 ~]# ln -s /usr/local/keepalived/etc/rc.d/init.d/keepalived /etc/init.d/
[root@server4 ~]# ln -s /usr/local/keepalived/etc/sysconfig/keepalived /etc/sysconfig/
[root@server4 ~]# ln -s /usr/local/keepalived/etc/keepalived/ /etc/
[root@server4 ~]# ln -s /usr/local/keepalived/sbin/keepalived /bin/
[root@server4 ~]# chmod +x /usr/local/keepalived/etc/rc.d/init.d/keepalived
2.master配置
[root@server1 ~]# cd /etc/keepalived/
[root@server1 keepalived]# ls
keepalived.conf samples
[root@server1 keepalived]# vim keepalived.conf
global_defs {
#当keepalived有动作时发邮件给本机
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
#vrrp_strict #当参数不被禁止掉时,每次加载keepalived都会给火墙中写入访问vip的包全部丢弃
vrrp_garp_interval 0
vrrp_gna_interval 0
}
#高可用模块
vrrp_instance VI_1 {
state MASTER #主,slave配置为BACKUP
interface eth0
virtual_router_id 60 #每个人的路由id要不同
priority 100 #优先级,master的优先级应该高于slave
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.25.60.100
}
}
#负载均衡(LVS)模块
virtual_server 172.25.60.100 80 {
delay_loop 6
lb_algo rr #轮叫的调度算法
lb_kind DR #LVS模式
#persistence_timeout 50 #持续链接,需要持续链接的服务要打开该参数
protocol TCP
real_server 172.25.60.2 80 {
weight 1 #权重
TCP_CHECK{
connect_timeout 3
retry 3
delay_before_retry 3
}
}
real_server 172.25.60.3 80 {
weight 1
TCP_CHECK{
connect_timeout 3
retry 3
delay_before_retry 3
}
}
}
3.slave配置
[root@server1 keepalived]# scp keepalived.conf server4:
[root@server4 keepalived]# vim keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
#vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.25.60.100
}
}
virtual_server 172.25.60.100 80 {
delay_loop 6
lb_algo rr
lb_kind DR
#persistence_timeout 50
protocol TCP
real_server 172.25.60.2 80 {
weight 1
TCP_CHECK{
connect_timeout 3
retry 3
delay_before_retry 3
}
}
real_server 172.25.60.3 80 {
weight 1
TCP_CHECK{
connect_timeout 3
retry 3
delay_before_retry 3
}
}
}
4.安装ipvsadm
[root@server1 keepalived]# vim /etc/yum.repos.d/rhel-source.repo
[rhel-source]
name=Red Hat Enterprise Linux $releasever - $basearch - Source
baseurl=http://172.25.60.250/rhel6.5
enabled=1
igpgcheck=0
[HighAvailability]
name=HighAvailability
baseurl=http://172.25.60.250/rhel6.5/HighAvailability
gpgcheck=0
[LoadBalancer]
name=LoadBalancer
baseurl=http://172.25.60.250/rhel6.5/LoadBalancer
gpgcheck=0
[ResilientStorage]
name=ResilientStorage
baseurl=http://172.25.60.250/rhel6.5/ResilientStorage
gpgcheck=0
[ScalableFileSystem]
name=ScalableFileSystem
baseurl=http://172.25.60.250/rhel6.5/ScalableFileSystem
gpgcheck=0
[root@server1 keepalived]# scp /etc/yum.repos.d/rhel-source.repo server4:/etc/yum.repos.d/
[root@server1 keepalived]# yum install ipvsadm -y
[root@server1 keepalived]# /etc/init.d/ipvsadm start
[root@server4 keepalived]# yum install ipvsadm -y
[root@server4 keepalived]# /etc/init.d/ipvsadm start
5.后端服务器配置
[root@server2 ~]# ip addr add 172.25.60.100 dev eth0
[root@server3 ~]# ip addr add 172.25.60.100 dev eth0
[root@server2 ~]# /etc/init.d/httpd start
[root@server2 ~]# echo server2 > /var/www/html/index.html
[root@server3 ~]# /etc/init.d/httpd start
[root@server3 ~]# echo server3 > /var/www/html/index.html
6.开启keepalived并测试
[root@server1 ~]# /etc/init.d/keepalived start
[root@server4 ~]# /etc/init.d/keepalived start
[root@server1 ~]# ipvsadm -l
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 172.25.60.100:http rr
-> server2:http Route 1 0 0
-> server3:http Route 1 0 0
[root@server1 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:c0:26:c9 brd ff:ff:ff:ff:ff:ff
inet 172.25.60.1/24 brd 172.25.60.255 scope global eth0
inet 172.25.60.100/32 scope global eth0
inet6 fe80::5054:ff:fec0:26c9/64 scope link
valid_lft forever preferred_lft forever
[root@foundation60 kiosk]# curl 172.25.60.100
server2
[root@foundation60 kiosk]# curl 172.25.60.100
server3
[root@foundation60 kiosk]# curl 172.25.60.100
server2
[root@foundation60 kiosk]# curl 172.25.60.100
server3
#检测高可用,当master挂掉之后,slave接管所有资源。Master启动后master重新接管资源。
[root@server1 ~]# /etc/init.d/keepalived stop
Stopping keepalived: [ OK ]
[root@server1 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:c0:26:c9 brd ff:ff:ff:ff:ff:ff
inet 172.25.60.1/24 brd 172.25.60.255 scope global eth0
inet6 fe80::5054:ff:fec0:26c9/64 scope link
valid_lft forever preferred_lft forever
[root@server1 ~]# ipvsadm -l
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
[root@server4 keepalived]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:91:3e:60 brd ff:ff:ff:ff:ff:ff
inet 172.25.60.4/24 brd 172.25.60.255 scope global eth0
inet 172.25.60.100/32 scope global eth0
inet6 fe80::5054:ff:fe91:3e60/64 scope link
valid_lft forever preferred_lft forever
You have new mail in /var/spool/mail/root
[root@server4 keepalived]# ipvsadm -l
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 172.25.60.100:http rr
-> server2:http Route 1 0 0
-> server3:http Route 1 0 0
[root@foundation60 kiosk]# curl 172.25.60.100
server3
[root@foundation60 kiosk]# curl 172.25.60.100
server2
[root@foundation60 kiosk]# curl 172.25.60.100
server3
[root@foundation60 kiosk]# curl 172.25.60.100
server2
#测试负载均衡,keepalived自带健康检查
[root@server2 ~]# /etc/init.d/httpd stop
Stopping httpd: [ OK ]
[root@foundation60 kiosk]# curl 172.25.60.100
server3
[root@foundation60 kiosk]# curl 172.25.60.100
server3
[root@foundation60 kiosk]# curl 172.25.60.100
server3
[root@foundation60 kiosk]# curl 172.25.60.100
server3
[root@server3 ~]# /etc/init.d/httpd start
Starting httpd: httpd: Could not reliably determine the server's fully qualified domain name, using 172.25.60.3 for ServerName
[root@foundation60 kiosk]# curl 172.25.60.100
server2
[root@foundation60 kiosk]# curl 172.25.60.100
server3
[root@foundation60 kiosk]# curl 172.25.60.100
server2
[root@foundation60 kiosk]# curl 172.25.60.100
server3
[ OK ]