LVS-DR和keepalived,nfs

1. The experiment needs:
1 load scheduler lvs-dr: 192.168.9.8
2 application servers web1, 2: 192.168.9.9/10
1 nfs server nfs: 192.168.9.11

Insert picture description here
2. Build nfs service

[root@nfs ~]# yum -y install nfs-utils rpcbind 
[root@nfs ~]# systemctl start rpcbind
[root@nfs ~]# systemctl start nfs
[root@nfs ~]# systemctl enable nfs
[root@nfs ~]# mkdir /www/
[root@nfs ~]# useradd admin
[root@nfs ~]# grep admin /etc/passwd
[root@nfs ~]# chown admin:admin /www/
[root@nfs ~]# vim /etc/exports
 /www 192.168.9.0/24(rw)
[root@nfs ~]# cd /www/
[root@nfs www]# ls
[root@nfs www]# vim index.html

Insert picture description here
3.web1, 2 install httpd and mount to nfs

[root@web1 ~]# yum -y install httpd
[root@web1 ~]# systemctl start httpd
[root@web1 ~]# yum -y install nfs-utils rpcbind
[root@web1 ~]# yum -y install nfs-utils rpcbind^C
[root@web1 ~]# systemctl start rpcbind
[root@web1 ~]# systemctl start nfs
[root@web1 ~]# systemctl enable nfs
[root@web1 ~]# showmount -e 192.168.9.11

Insert picture description here

[root@web1 ~]# mount 192.168.9.11:/www/ /var/www/html/
[root@web1 ~]# df -hT

Insert picture description here
4.lvs-dr build

[root@lvs-dr ~]# yum -y install ipvsadm
[root@lvs-dr ~]# ipvsadm -C
[root@lvs-dr ~]# ipvsadm -ln

Insert picture description here

[root@lvs-dr ~]# vim /opt/vip.sh  //设置vip配置脚本
#!/bin/bash
#vip
VIP="192.168.9.66"
/sbin/ifconfig ens33:vip $VIP broadcast $VIP netmask 255.255.255.255
/sbin/route add -host $VIP dev ens33:vip
[root@lvs-dr ~]# chmod +x /opt/vip.sh 
[root@lvs-dr ~]# /opt/vip.sh 
[root@lvs-dr ~]# ip a show ens33

Insert picture description here

[root@lvs-dr ~]# route -n  //查看路由条目

Insert picture description here

[root@lvs-dr ~]# echo "/opt/vip.sh" >> /etc/rc.local

[root@lvs-dr ~]# ipvsadm -A -t 192.168.9.66:80 -s rr
[root@lvs-dr ~]# ipvsadm -a -t 192.168.9.66:80 -r 192.168.9.9:80 -g
[root@lvs-dr ~]# ipvsadm -a -t 192.168.9.66:80 -r 192.168.9.10:80 -g
[root@lvs-dr ~]# ipvsadm -ln

Insert picture description here

[root@lvs-dr ~]# ipvsadm-save > /etc/sysconfig/ipvsadm  //永久生效

5. Configure web1, 2 server

[root@web1 ~]# vim /opt/lvs-dr   //添加本地IP并修改路由协议,避免apr后到优先和ip紊乱
#!/bin/bash 
# lvs-dr 
VIP="192.168.9.66"
/sbin/ifconfig lo:vip $VIP broadcast $VIP netmask 255.255.255.255
/sbin/route add -host $VIP dev lo:vip
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
[root@web1 ~]# vim /opt/lvs-dr
[root@web1 ~]# chmod +x /opt/lvs-dr 
[root@web1 ~]# /opt/lvs-dr 
[root@web1 ~]# echo "/opt/lvs-dr" >> /etc/rc.local 
[root@web1 ~]# ip a

Insert picture description here

[root@web1 ~]# route -n

Insert picture description here

[root@web1 ~]# scp /opt/lvs-dr 192.168.9.10:/opt  //给web2 也干一个配置文件,同样操作
[root@web2 ~]# /opt/lvs-dr
[root@web2 ~]# ip a
[root@web2 ~]# route -n
[root@web2 ~]# echo "/opt/lvs-dr" >> /etc/rc.local

6. Test
Insert picture description here

[root@lvs-dr ~]# ipvsadm -L -c -n

Insert picture description here

keepalived + LVS-DR+nfs
Insert picture description here
1.keepalived

A health check tool specially designed for LVS and HA
· Support automatic failover (Failover)
· Support node health check (Health Checking)

2. Experimental environment
Master load balancer lvsdr-master 192.168.9.8
Slave load balancer lvsdr-slave 192.168.9.7
web1 web1 192.168.9.9
web2 web2 192.168.9.10
nfs nfs 192.168.9.11
vip 192.168.9.66

3.web and nfs configuration

web1, 2, nfs, the configuration is the same as above.

4. Primary load balancer configuration

[root@lvsdr-master ~]# modprobe ip_vs
[root@lvsdr-master ~]# cat /proc/net/ip_vs

Insert picture description here

[root@lvsdr-master ~]# yum -y install keepalived ipvsadm
[root@lvsdr-master ~]# cd /etc/keepalived/
[root@lvsdr-master keepalived]# cp keepalived.conf{,.ori}   //修改前做备份
[root@lvsdr-master keepalived]# vim keepalived.conf

Detailed configuration file, which will be done later

! Configuration File for keepalived 
global_defs { 
//全局配置 
notification_email { 
[email protected] 
//报警邮件地址,每行一个 
} 
notification_email_from [email protected] //设置邮件的发送地址 
smtp_server 172.0.0.1 //设置 smtp server 地址 
smtp_connect_timeout 30 
//设置连接 smtp 服务器超时时间,30 秒 
router_id LVS_DEVEL_BLM 
//运行 Keepalived 服务器标识。发邮件时显示在邮件标题中 
的信息,Backup(Slave)服务器将此项改为 LVS_DEVEL_BLS27 
} 
vrrp_instance VI_1 { 
//vrrp 实例定义部分 
state MASTER 
//指定 Keepalived 的角色,MASTER 表示主服务器,BACKUP 或 SLAVE 
表示备用服务器。 
interface eth1 
//指定 HA 检测网络的接口 
virtual_router_id 51 
//虚拟路由标识,这个标识是一个数字,并且同一个 vrrp 实例 
使用唯一的标识,即同一个 vrrp_instance 下,MASTER 和 BACKUP 必须是一致的 
priority 100 
//优先级 1-254,数字越大优先级越高,主服务器一定要高过备份服 
务器,且两者之间的数值差越小越好。如此 MASTER 优先级为 100,BACKUP 可设为 99 
advert_int 2 
//设定 MASTER 与 BACKUP 负载均衡器之间同步检查的间隔时间 2 秒 
authentication { 
//设定验证类型和密码 
auth_type PASS 
//设置验证类型,主要有 PASS 和 AH 两种 
auth_pass 1111 
//设置验证密码,在一个 vrrp_instance 下,MASTER 与 BACKUP 
必须使用相同的密码才能正常通信 
} 
virtual_ipaddress { 
//设置虚拟 IP 地址,可以设置多个虚拟 IP 地址,每行一个 
192.168.1.66 
} 
} 
virtual_server 192.168.1.66 80 { //设置虚拟服务器,需要指定虚拟 ip 地址和服务端口,ip 
与端口之间用空格隔开 
delay_loop 2 
//设置健康检查时间,2 秒 
lb_algo rr 
//设置负载调度算法,这里设置为 rr,即轮询算法 
lb_kind DR 
//设置 LVS 实现负载均衡的机制,可以有 NAT、TUN 和 DR 三个模式 
! nat_mask 255.255.255.0 
//若非使用 NAT 模式,此行需要注解掉 
! persistence_timeout 300 
//存留超时时间,300 秒,即客户机连接成功后,300 秒后 
才会切换服务器。 
protocol TCP 
//指定转发协议,TCP 或 UDP 
real_server 192.168.1.3 80 { 设置虚拟服务器,需要指定虚拟 ip 地址和服务端口,ip 与 
端口之间用空格隔开 
weight 1 //配置服务节点的权值,权值大小用数字表示,数字越大,权值越高,设 
置权值的大小可以为不同性能的服务器分配不同的负载,可以对性能高的服务器设置较高 
的权值,而对性能较低的服务器设置相对较低的权值,这样就合理的利用和分配了系统资 
TCP_CHECK { // realserve 的状态检测设置部分,单位是秒 
connect_timeout 10 
//10 秒无响应超时 
nb_get_retry 3 
//重试次数 
delay_before_retry 3 //两次重试的间隔为 3 秒 
connect_port 80 //测试连接的端口 
} 
} real_server 192.168.1.4 80 { 
weight 1 
TCP_CHECK { 
connect_timeout 10 
nb_get_retry 3 
delay_before_retry 3 
connect_port 80 
} 
} 
} 

Local modified configuration file

! Configuration File for keepalived

global_defs {
   notification_email {
		[email protected]
   }
   notification_email_from [email protected]
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_MASTER
   vrrp_skip_check_adv_addr
   vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.9.66
    }
}

virtual_server 192.168.9.66 80 {
    delay_loop 2
    lb_algo rr
    lb_kind DR
    persistence_timeout 50
    protocol TCP

    real_server 192.168.9.9 80 {
        weight 1
		TCP_CHECK {
			connect_timeout 10
			nb_get_retry 3
			delay_before_retry 3
			connect_port 80
        }
    }
    real_server 192.168.9.10 80 {
        weight 1
		TCP_CHECK {
			connect_timeout 10
			nb_get_retry 3
			delay_before_retry 3
			connect_port 80
        }
}
}
[root@lvsdr-master ~]# systemctl start keepalived
[root@lvsdr-master ~]# ipvsadm -ln

Insert picture description here

[root@lvsdr-master ~]# ip a

Insert picture description here
4. From the load balancer

[root@lvsdr-slave ~]# yum -y install ipvsadm keepalived
[root@lvsdr-slave ~]# cp /etc/keepalived/keepalived.conf{,.ori}
[root@lvsdr-slave ~]# scp 192.168.9.8:/etc/keepalived/keepalived.conf
 /etc/keepalived/keepalived.conf
[root@lvsdr-slave ~]# vim /etc/keepalived/keepalived.conf

The revised content is as follows

! Configuration File for keepalived

global_defs {
   notification_email {
		[email protected]
   }
   notification_email_from [email protected]
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_SLAVE
   vrrp_skip_check_adv_addr
   vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VI_1 {
    state SLAVE
    interface ens33
    virtual_router_id 51
    priority 99
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.9.66
    }
}

virtual_server 192.168.9.66 80 {
    delay_loop 2
    lb_algo rr
    lb_kind DR
    persistence_timeout 50
    protocol TCP

    real_server 192.168.9.9 80 {
        weight 1
		TCP_CHECK {
			connect_timeout 10
			nb_get_retry 3
			delay_before_retry 3
			connect_port 80
        }
    }
    real_server 192.168.9.10 80 {
        weight 1
		TCP_CHECK {
			connect_timeout 10
			nb_get_retry 3
			delay_before_retry 3
			connect_port 80
        }
}
}
[root@lvsdr-slave ~]# systemctl start keepalived
[root@lvsdr-slave ~]# ip a

Insert picture description here
It's right without 9.66

[root@lvsdr-slave ~]# ipvsadm -ln

Insert picture description here
5. Test
Insert picture description here
All the functions here are implemented, but the web page cannot be accessed, and the systemctl stop fiewalld firewall is also turned off. It just can't get out, and finally use iptables -F to clear the firewall rules before you can visit. Uncomfortable. . . .

High availability: stop the main lvs, check the secondary IP, 9.66 will appear, turn on the main lvs, and then check again, the IP is restored as before.
Insert picture description here
Insert picture description here
Health detection: stop a httpd, ipvsadm -ln, and find that the IP is no longer stopped. Restore as before.

[root@web2 ~]# systemctl stop httpd

Insert picture description here
Insert picture description here

[root@web2 ~]# systemctl start httpd

Insert picture description here

Guess you like

Origin blog.csdn.net/qq_39109226/article/details/111706122