LVS # LVS_Director + Keepalived

LVS_Director + guía de experimentos de Keepalived

Nombre de la CPU ip sistema utilizar
cliente 172.16.147.1 Mac Cliente
lvs-keepalived-master 172.16.147.154 centos7.5 Distribuidor
lvs-keepalived-esclavo 172.16.147.155 centos7.5 Equipo distribuidor
prueba-nginx1 172.16.147.153 centos7.5 web1
prueba-nginx2 172.16.147.156 centos7.5 web2
VIP 172.16 / 147.101
LVS_Director + KeepAlived

KeepAlived在该项目中的功能:
1. 管理IPVS的路由表(包括对RealServer做健康检查)
2. 实现调度器的HA
http://www.keepalived.org

Keepalived所执行的外部脚本命令建议使用绝对路径

实施步骤:
1. 主/备调度器安装软件
[root@lvs-keepalived-master ~]# yum -y install ipvsadm keepalived 
[root@lvs-keepalived-slave ~]# yum -y install ipvsadm keepalived
2. Keepalived
lvs-master
[root@lvs-keepalived-master ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
    
    
   router_id lvs-keepalived-master    #辅助改为lvs-backup
}

vrrp_instance VI_1 {
    
    
    state MASTER
    interface ens33                #VIP绑定接口
    virtual_router_id 80         #VRID 同一组集群,主备一致          
    priority 100            #本节点优先级,辅助改为50
    advert_int 1            #检查间隔,默认为1s
    authentication {
    
    
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    
    
        172.16.147.101/24  # 可以写多个vip
    }
}

virtual_server 172.16.147.101 80 {
    
        #LVS配置
	delay_loop 3
	lb_algo rr     #LVS调度算法
	lb_kind DR     #LVS集群模式(路由模式)
	nat_mask 255.255.255.0
	protocol TCP      #健康检查使用的协议
	real_server 172.16.147.153 80 {
    
    
		weight 1
		inhibit_on_failure   #当该节点失败时,把权重设置为0,而不是从IPVS中删除
		TCP_CHECK {
    
              #健康检查
			connect_port 80   #检查的端口
			connect_timeout 3  #连接超时的时间
			}
		}
	real_server 172.16.147.156 80 {
    
    
		weight 1
		inhibit_on_failure
		TCP_CHECK {
    
    
			connect_timeout 3
			connect_port 80
			}
		}
}

[root@lvs-keepalived-slave ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
    
    
   router_id lvs-keepalived-slave
}

vrrp_instance VI_1 {
    
    
    state BACKUP
    interface ens33
    nopreempt                    #不抢占资源
    virtual_router_id 80
    priority 50
    advert_int 1
    authentication {
    
    
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    
    
        172.16.147.101/24
    }
}
virtual_server 172.16.147.101 80 {
    
    
	delay_loop 3
	lb_algo rr
	lb_kind DR
	nat_mask 255.255.255.0
	protocol TCP
	real_server 172.16.147.153 80 {
    
    
		weight 1
		inhibit_on_failure
		TCP_CHECK {
    
    
			connect_port 80
			connect_timeout 3
			}
		}
	real_server 172.16.147.156 80 {
    
    
		weight 1
		inhibit_on_failure
		TCP_CHECK {
    
    
			connect_timeout 3
			connect_port 80
			}
		}
}
3. 启动KeepAlived(主备均启动)
[root@lvs-keepalived-master ~]# systemctl start keepalived
[root@lvs-keepalived-master ~]# systemctl enable keepalived

[root@lvs-keepalived-master ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.16.147.101:80 rr persistent 20
  -> 172.16.147.153:80           Route   1      0          0         
  -> 172.16.147.156:80           Route   0      0          0

4. 所有RS配置(nginx1,nginx2)
配置好网站服务器,测试所有RS
[root@test-nginx1 ~]# yum install -y nginx
[root@test-nginx2 ~]# yum install -y nginx
[root@test-nginx1 ~]# echo "ip addr add dev lo 172.16.147.101/32" >> /etc/rc.local
[root@test-nginx1 ~]# echo "net.ipv4.conf.all.arp_ignore = 1" >> /etc/sysctl.conf
[root@test-nginx1 ~]# sysctl -p
[root@test-nginx1 ~]# echo "web1..." >> /usr/share/nginx/html/index.html
[root@test-nginx1 ~]# systemctl start nginx
[root@test-nginx1 ~]# chmod +x /etc/rc.local

LB集群测试
所有分发器和Real Server都正常

主分发器故障及恢复

Operación experimental LVS_Director + Keepalived

1. Utilice LVS para el equilibrio de carga y Keepalived para una alta disponibilidad

Keepalived es compatible de forma nativa con LVS, por lo que la operación de Keepalived omitirá la operación de LVS. La razón para descargar ipvsadm es ver el soporte nativo de Keeplived para LVS, es decir, Keepalived puede generar automáticamente una lista de hosts virtuales LVS

2. Configuración del servidor web

[root@real-server1 ~]# yum install -y nginx
[root@real-server1 ~]# echo "real-server1" >> /usr/share/nginx/html/index.html
两台机器都安装,按顺序添加不同的主机名以示区分
[root@real-server1 ~]# ip addr add dev lo 172.16.147.200/32   #在lo接口上绑定VIP
[root@real-server1 ~]# echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore  #忽略arp广播
[root@real-server1 ~]# echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce #匹配精确ip地址回包
[root@real-server1 ~]# systemctl start nginx 
[root@real-server1 ~]# systemctl enable  nginx 
=============================================================================
因为:realServer的vip有了,接着就是同一个网段中拥有两个vip, 客户端在网关发送arp广播需找vip时需要让realServer不接受响应.  
解决:
echo 1 >/proc/sys/net/ipv4/conf/eth0/arp_ignore 
arp_ignore 设置为1,意味着当别人的arp请求过来的时候,如果接收的设备没有这个ip,就不做出响应(这个ip在lo上,lo不是接收设备的进口)
echo 2 >/proc/sys/net/ipv4/conf/eth0/arp_announce   
使用最好的ip来回应,什么是最好的ip?同一个网段内子网掩码最长的

3. Configuración de lvs-master

 [root@lvs-master ~]# yum -y install ipvsadm keepalived
 ! Configuration File for keepalived

global_defs {
    
    
   router_id lvs-keepalived-master    #辅助改为lvs-backup
}

vrrp_instance VI_1 {
    
    
    state MASTER
    interface ens37               #VIP绑定接口
    virtual_router_id 80         #VRID 同一组集群,主备一致          
    priority 100            #本节点优先级,辅助改为50
    advert_int 1            #检查间隔,默认为1s
    authentication {
    
    
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    
    
        192.168.138.200/24  # 可以写多个vip
    }
}

virtual_server 192.168.138.200 80 {
    
        #LVS配置
	delay_loop 3
	lb_algo rr     #LVS调度算法
	lb_kind DR     #LVS集群模式(路由模式)
	nat_mask 255.255.255.0
	protocol TCP      #健康检查使用的协议
	real_server 192.168.138.132 80 {
    
    
		weight 1
		inhibit_on_failure   #当该节点失败时,把权重设置为0,而不是从IPVS中删除
		TCP_CHECK {
    
              #健康检查
			connect_port 80   #检查的端口
			connect_timeout 3  #连接超时的时间
			}
		}
	real_server 192.168.138.135 80 {
    
    
		weight 1
		inhibit_on_failure
		TCP_CHECK {
    
    
			connect_timeout 3
			connect_port 80
			}
		}
}

4. Configuración de lvs-slave

! Configuration File for keepalived

global_defs {
    
    
   router_id lvs-keepalived-slave
}

vrrp_instance VI_1 {
    
    
    state BACKUP
    interface ens33
    nopreempt                    #不抢占资源
    virtual_router_id 80
    priority 50
    advert_int 1
    authentication {
    
    
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    
    
        192.168.138.200/24
    }
}
virtual_server 192.168.138.200 80 {
    
    
	delay_loop 3
	lb_algo rr
	lb_kind DR
	nat_mask 255.255.255.0
	protocol TCP
	real_server 192.168.138.132 80 {
    
    
		weight 1
		inhibit_on_failure
		TCP_CHECK {
    
    
			connect_port 80
			connect_timeout 3
			}
		}
	real_server 192.168.138.135 80 {
    
    
		weight 1
		inhibit_on_failure
		TCP_CHECK {
    
    
			connect_timeout 3
			connect_port 80
			}
		}
}

5. Agregue un HTTP_GET de uso avanzado (la verificación md5sum evita que la página sea manipulada)

La primera es que los resultados experimentales de LVS no deben verificarse en el clúster. Vaya al navegador para verificar.
Agregue un uso avanzado para
evitar que las páginas web sean manipuladas.
Puede verificar si las páginas web se han modificado con el valor md5 de index.html y el status_code de nginx.
Inserte la descripción de la imagen aquí
Inserte la descripción de la imagen aquí
Esta configuración significa, Solo cuando el valor de md5 de /index.html es el mismo que el resumen, y el código de valor de retorno del servidor es 200, se realiza el salto (HTTP_GET incluye, TCP_CHECK)


virtual_server 10.3.131.221 80 {
    
    
    delay_loop 6
    lb_algo rr
    lb_kind DR
    nat_mask 255.255.255.0
    persistence_timeout 20
    protocol TCP
    sorry_server 2.2.2.2 80
  
    real_server 10.3.131.30 80 {
    
    
        weight 1
        inhibit_on_failure 
        HTTP_GET {
    
     
            url {
    
    
              path /index.html
              digest 481bf8243931326614960bdc17f99b00
            }
            connect_port 80
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 2
        }
    }


}

virtual_server 192.168.0.200 80 {
    
    
    delay_loop 6
    lb_algo rr
    lb_kind DR
    nat_mask 255.255.255.0
    persistence_timeout 20
    protocol TCP
  
    real_server 192.168.0.107 80 {
    
    
        weight 1
        inhibit_on_failure 
        HTTP_GET {
    
     
            url {
    
    
              path /index.html
              digest 66ee606d5019d75f83836eeb295c6b6f
     		status_code 200
	       }
            connect_port 80
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 2
        }
    }

    real_server 192.168.0.108 80 {
    
    
        weight 1
        inhibit_on_failure 
        HTTP_GET {
    
     
            url {
    
    
              path /index.html
              digest 699d00db64614eb287931b977d5c047f
	      status_code 200
            }
            connect_port 80
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 2
        }
    }

}

El servidor real que no cumpla con las condiciones se apagará automáticamente, y es mejor hacerlo que publicar contenido web manipulado maliciosamente
Inserte la descripción de la imagen aquí

Supongo que te gusta

Origin blog.csdn.net/kakaops_qing/article/details/109134591
Recomendado
Clasificación