Article Directory
LVS_Director + Keepalived experiment guide
CPU name | ip | system | use |
---|---|---|---|
client | 172.16.147.1 | mac | Client |
lvs-keepalived-master | 172.16.147.154 | centos7.5 | Distributor |
lvs-keepalived-slave | 172.16.147.155 | centos7.5 | Distributor equipment |
test-nginx1 | 172.16.147.153 | centos7.5 | web1 |
test-nginx2 | 172.16.147.156 | centos7.5 | web2 |
vip | 172.16/147.101 |
LVS_Director + KeepAlived
KeepAlived在该项目中的功能:
1. 管理IPVS的路由表(包括对RealServer做健康检查)
2. 实现调度器的HA
http://www.keepalived.org
Keepalived所执行的外部脚本命令建议使用绝对路径
实施步骤:
1. 主/备调度器安装软件
[root@lvs-keepalived-master ~]# yum -y install ipvsadm keepalived
[root@lvs-keepalived-slave ~]# yum -y install ipvsadm keepalived
2. Keepalived
lvs-master
[root@lvs-keepalived-master ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lvs-keepalived-master #辅助改为lvs-backup
}
vrrp_instance VI_1 {
state MASTER
interface ens33 #VIP绑定接口
virtual_router_id 80 #VRID 同一组集群,主备一致
priority 100 #本节点优先级,辅助改为50
advert_int 1 #检查间隔,默认为1s
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.16.147.101/24 # 可以写多个vip
}
}
virtual_server 172.16.147.101 80 {
#LVS配置
delay_loop 3
lb_algo rr #LVS调度算法
lb_kind DR #LVS集群模式(路由模式)
nat_mask 255.255.255.0
protocol TCP #健康检查使用的协议
real_server 172.16.147.153 80 {
weight 1
inhibit_on_failure #当该节点失败时,把权重设置为0,而不是从IPVS中删除
TCP_CHECK {
#健康检查
connect_port 80 #检查的端口
connect_timeout 3 #连接超时的时间
}
}
real_server 172.16.147.156 80 {
weight 1
inhibit_on_failure
TCP_CHECK {
connect_timeout 3
connect_port 80
}
}
}
[root@lvs-keepalived-slave ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lvs-keepalived-slave
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
nopreempt #不抢占资源
virtual_router_id 80
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.16.147.101/24
}
}
virtual_server 172.16.147.101 80 {
delay_loop 3
lb_algo rr
lb_kind DR
nat_mask 255.255.255.0
protocol TCP
real_server 172.16.147.153 80 {
weight 1
inhibit_on_failure
TCP_CHECK {
connect_port 80
connect_timeout 3
}
}
real_server 172.16.147.156 80 {
weight 1
inhibit_on_failure
TCP_CHECK {
connect_timeout 3
connect_port 80
}
}
}
3. 启动KeepAlived(主备均启动)
[root@lvs-keepalived-master ~]# systemctl start keepalived
[root@lvs-keepalived-master ~]# systemctl enable keepalived
[root@lvs-keepalived-master ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 172.16.147.101:80 rr persistent 20
-> 172.16.147.153:80 Route 1 0 0
-> 172.16.147.156:80 Route 0 0 0
4. 所有RS配置(nginx1,nginx2)
配置好网站服务器,测试所有RS
[root@test-nginx1 ~]# yum install -y nginx
[root@test-nginx2 ~]# yum install -y nginx
[root@test-nginx1 ~]# echo "ip addr add dev lo 172.16.147.101/32" >> /etc/rc.local
[root@test-nginx1 ~]# echo "net.ipv4.conf.all.arp_ignore = 1" >> /etc/sysctl.conf
[root@test-nginx1 ~]# sysctl -p
[root@test-nginx1 ~]# echo "web1..." >> /usr/share/nginx/html/index.html
[root@test-nginx1 ~]# systemctl start nginx
[root@test-nginx1 ~]# chmod +x /etc/rc.local
LB集群测试
所有分发器和Real Server都正常
主分发器故障及恢复
LVS_Director + Keepalived experimental operation
1. Use LVS for load balancing and Keepalived for high availability
Keepalived natively supports LVS, so the operation of Keepalived will omit the operation of LVS. The reason for downloading ipvsadm is to see Keeplived's native support for LVS, that is, Keepalived can automatically generate a list of LVS virtual hosts
2. Web-server configuration
[root@real-server1 ~]# yum install -y nginx
[root@real-server1 ~]# echo "real-server1" >> /usr/share/nginx/html/index.html
两台机器都安装,按顺序添加不同的主机名以示区分
[root@real-server1 ~]# ip addr add dev lo 172.16.147.200/32 #在lo接口上绑定VIP
[root@real-server1 ~]# echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore #忽略arp广播
[root@real-server1 ~]# echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce #匹配精确ip地址回包
[root@real-server1 ~]# systemctl start nginx
[root@real-server1 ~]# systemctl enable nginx
=============================================================================
因为:realServer的vip有了,接着就是同一个网段中拥有两个vip, 客户端在网关发送arp广播需找vip时需要让realServer不接受响应.
解决:
echo 1 >/proc/sys/net/ipv4/conf/eth0/arp_ignore
arp_ignore 设置为1,意味着当别人的arp请求过来的时候,如果接收的设备没有这个ip,就不做出响应(这个ip在lo上,lo不是接收设备的进口)
echo 2 >/proc/sys/net/ipv4/conf/eth0/arp_announce
使用最好的ip来回应,什么是最好的ip?同一个网段内子网掩码最长的
3. Configuration of lvs-master
[root@lvs-master ~]# yum -y install ipvsadm keepalived
! Configuration File for keepalived
global_defs {
router_id lvs-keepalived-master #辅助改为lvs-backup
}
vrrp_instance VI_1 {
state MASTER
interface ens37 #VIP绑定接口
virtual_router_id 80 #VRID 同一组集群,主备一致
priority 100 #本节点优先级,辅助改为50
advert_int 1 #检查间隔,默认为1s
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.138.200/24 # 可以写多个vip
}
}
virtual_server 192.168.138.200 80 {
#LVS配置
delay_loop 3
lb_algo rr #LVS调度算法
lb_kind DR #LVS集群模式(路由模式)
nat_mask 255.255.255.0
protocol TCP #健康检查使用的协议
real_server 192.168.138.132 80 {
weight 1
inhibit_on_failure #当该节点失败时,把权重设置为0,而不是从IPVS中删除
TCP_CHECK {
#健康检查
connect_port 80 #检查的端口
connect_timeout 3 #连接超时的时间
}
}
real_server 192.168.138.135 80 {
weight 1
inhibit_on_failure
TCP_CHECK {
connect_timeout 3
connect_port 80
}
}
}
4. Configuration of lvs-slave
! Configuration File for keepalived
global_defs {
router_id lvs-keepalived-slave
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
nopreempt #不抢占资源
virtual_router_id 80
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.138.200/24
}
}
virtual_server 192.168.138.200 80 {
delay_loop 3
lb_algo rr
lb_kind DR
nat_mask 255.255.255.0
protocol TCP
real_server 192.168.138.132 80 {
weight 1
inhibit_on_failure
TCP_CHECK {
connect_port 80
connect_timeout 3
}
}
real_server 192.168.138.135 80 {
weight 1
inhibit_on_failure
TCP_CHECK {
connect_timeout 3
connect_port 80
}
}
}
5. Add an advanced usage HTTP_GET (md5sum verification prevents the page from being tampered with)
The first one is that the experimental results of LVS should not be verified in the cluster. Go to the browser to verify.
Add an advanced usage to
prevent web pages from being tampered.
You can check whether the web pages have been changed with the md5 value of index.html and the status_code of nginx.
This configuration means, Only when the value of md5 of /index.html is the same as digest, and the return value code of the server is 200, the jump is performed (HTTP_GET includes, TCP_CHECK)
virtual_server 10.3.131.221 80 {
delay_loop 6
lb_algo rr
lb_kind DR
nat_mask 255.255.255.0
persistence_timeout 20
protocol TCP
sorry_server 2.2.2.2 80
real_server 10.3.131.30 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
digest 481bf8243931326614960bdc17f99b00
}
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 2
}
}
}
virtual_server 192.168.0.200 80 {
delay_loop 6
lb_algo rr
lb_kind DR
nat_mask 255.255.255.0
persistence_timeout 20
protocol TCP
real_server 192.168.0.107 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
digest 66ee606d5019d75f83836eeb295c6b6f
status_code 200
}
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 2
}
}
real_server 192.168.0.108 80 {
weight 1
inhibit_on_failure
HTTP_GET {
url {
path /index.html
digest 699d00db64614eb287931b977d5c047f
status_code 200
}
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 2
}
}
}
The real server that does not meet the conditions will automatically go down, and it is better to go down than to publish maliciously tampered web content