一、实验原理
数据包进来和出去同时都做了DNAT和SNAT的转换;
与DR、TUN、NAT相比,只有它具有抗攻击性;(五组元进行双向hash)
性能比较:DR :> TUN > NAT > fullnat
二、实验部署
(1)实验环境:
下载软件 kernel-2.6.32-220.23.1.el6.src.rpm
newt-devel-0.52.11-3.el6.x86_64.rpm
asciidoc-8.4.5-4.1.el6.noarch.rpm
slang-devel-2.2.1-1.el6.x86_64.rpm (依赖性)
Lvs-fullnat-synproxy.tar.gz
lvs-tools.tar.gz
所有的主机都为rhel6.5,selinux为disabled,防火墙关闭,策略清除;
server1: LB( VIP:172.25.74.1 ; RIP:172.25.45.1 )
server2: RS( IP:172.25.45.2 )
server3: RS( IP:172.25.45.3 )
(2)实验过程:
a. 添加fullnat模块;
[root@server1 ~]# yum install rpm-build -y
[root@server1 mnt]# rpm -ivh kernel-2.6.32-220.23.1.el6.src.rpm
[root@server1 ~]# cd rpmbuild/SPECS
[root@server1 SPECS]# ls
kernel.spec
[root@server1 SPECS]# rpmbuild -bp kernel.spec ##预编译
[root@server1 SPECS]# rpmbuild -bp kernel.spec ##这里需要解决依赖性
[root@server1 SPECS]# yum install gcc redhat-rpm-config patchutils xmlto asciidoc elfutils-libelf-devel zlib-devel binutils-devel newt-devel python-devel perl-ExtUtils-Embed hmaccalc -y
[root@server1 SPECS]# rpmbuild -bp kernel.sperror: Failed build dependencies: ##还有2个依赖性没有解决,需要通过官网下载
[root@server1 ~]# yum install newt-devel-0.52.11-3.el6.x86_64.rpm asciidoc-8.4.5-4.1.el6.noarch.rpm slang-devel-2.2.1-1.el6.x86_64.rpm -y
##Package: newt-devel-0.52.11-3.el6.x86_64 (/newt-devel-0.52.11-3.el6.x86_64)
Requires: slang-devel(要安装newt,需要解决它的依赖性slang
[root@server1 SPECS]# rpmbuild -bp kernel.spec
##在这个过程中会出现卡顿,需要再开启一个窗口,执行以下命令:
[root@server1 ~]# yum install -y rng-tools-2-13.el6_2.x86_64
[root@server1 ~]# rngd -r /dev/urandom
[root@server1 ~]# tar zxf Lvs-fullnat-synproxy.tar.gz
[root@server1 ~ ]# cd rpmbuild/BUILD/kernel-2.6.32-220.23.1.el6/linux-2.6.32-220.23.1.el6.x86_64
[root@server1 linux-2.6.32-220.23.1.el6.x86_64]# cp /root/lvs-fullnat-synproxy/lvs-2.6.32-220.23.1.el6.patch . ##复制补丁
[root@server1 linux-2.6.32-220.23.1.el6.x86_64]# patch -p1 < lvs-2.6.32-220.23.1.el6.patch ##打补丁
[root@server1 linux-2.6.32-220.23.1.el6.x86_64]# cat .config| grep IP_VS
CONFIG_IP_VS=m
CONFIG_IP_VS_IPV6=y
# CONFIG_IP_VS_DEBUG is not set
CONFIG_IP_VS_TAB_BITS=22 2^22
CONFIG_IP_VS_PROTO_TCP=y
CONFIG_IP_VS_PROTO_UDP=y
CONFIG_IP_VS_PROTO_AH_ESP=y
CONFIG_IP_VS_PROTO_ESP=y
CONFIG_IP_VS_PROTO_AH=y
CONFIG_IP_VS_RR=m
CONFIG_
[root@server1 linux-2.6.32-220.23.1.el6.x86_64]# make ##编译(大约时长为1小时)
[root@server1 linux-2.6.32-220.23.1.el6.x86_64]# make modules_install
[root@server1 linux-2.6.32-220.23.1.el6.x86_64]# make install
[root@server1 linux-2.6.32-220.23.1.el6.x86_64]# vim /boot/grub/grub.conf
[root@server1 linux-2.6.32-220.23.1.el6.x86_64]# reboot ##重启
[root@server1 ~]# cd lvs-fullnat-synproxy/
[root@server1 lvs-fullnat-synproxy ]# tar zxf lvs-tools.tar.gz
[root@server1 lvs-fullnat-synproxy ]# cd tools/
[root@server1 lvs-fullnat-synproxy ]# cd keepalived/
[root@server1 keepalived]# ./configure --with-kernel-dir="/lib/modules/`uname -r`/build" ##可能缺少openssl,安装即可
[root@server1 keepalived]# yum install openssl-devel popt-devel -y
[root@server1 keepalived]# ./configure --with-kernel-dir="/lib/modules/`uname -r`/build" ##如果出现以下信息,安装正常;
------------------------
Keepalived version : 1.2.2
Compiler : gcc
Compiler flags : -g -O2
Extra Lib : -lpopt -lssl -lcrypto
Use IPVS Framework : Yes
IPVS sync daemon support : Yes
IPVS use libnl : No
Use VRRP Framework : Yes
Use Debug flags : No
[root@server1 keepalived]# make
[root@server1 keepalived]# make install
[root@server1 ipvsadm]# ipvsadm --help ##查看帮助,看是否可以出现fullnat模式;因为之前的内核ipvsadm不自带fullnat模式,以上操作是添加fullnat模式
--fullnat -b fullnat mode
B:配置LVS
server1添加2个网卡
[root@server1 network-scripts]# vim ifcfg-eth1
[root@server1 network-scripts]# /etc/init.d/network restart
[root@server1 network-scripts]# ipvsadm -C ##清除所有策略
[root@server1 network-scripts]# ipvsadm -A -t 172.25.74.1:80 -s rr
[root@server1 network-scripts]# ipvsadm -a -t 172.25.74.1:80 -r 172.25.45.2:80 -b
[root@server1 network-scripts]# ipvsadm -a -t 172.25.74.1:80 -r 172.25.45.3:80 -b
[root@server1 network-scripts]# ipvsadm -P -t 172.25.74.1:80 -z 172.25.45.1:80 ##添加本地ip,这里也可以写回环接口127.0.0.1
[root@server1 network-scripts]# ipvsadm -G -t 172.25.74.1:80 ##get
VIP:VPORT TOTAL SNAT_IP CONFLICTS CONNS
172.25.74.1:80 1
172.25.45.1 0 0
#delete 删除
ipvsadm -Q -t service-address -z local-address
[root@server1 network-scripts]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4194304)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 172.25.74.1:80 wrr
-> 172.25.45.2:80 FullNat 1 0 0
-> 172.25.45.3:80 FullNat 1 0 0
[root@server1 network-scripts]# echo 1 > /proc/sys/net/ipv4/ip_forward ##开启ip转发
server2:
[root@server2 ~]# route add default gw 172.25.45.1 ##添加LB的IP作为RS的网关
[root@server2 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
172.25.45.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
172.25.45.0 0.0.0.0 255.255.255.0 U 0 0 0 tunl0
169.254.0.0 0.0.0.0 255.255.0.0 U 1002 0 0 eth0
169.254.0.0 0.0.0.0 255.255.0.0 U 1003 0 0 tunl0
0.0.0.0 172.25.45.1 0.0.0.0 UG 0 0 0 eth0
[root@server2 ~]# /etc/init.d/httpd start
server3:
[root@server3 ~]# route add default gw 172.25.45.1
[root@server3 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
172.25.45.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
172.25.45.0 0.0.0.0 255.255.255.0 U 0 0 0 tunl0
169.254.0.0 0.0.0.0 255.255.0.0 U 1002 0 0 eth0
169.254.0.0 0.0.0.0 255.255.0.0 U 1003 0 0 tunl0
0.0.0.0 172.25.45.1 0.0.0.0 UG 0 0 0 eth0
[root@server3 ~]# /etc/init.d/httpd start
C:客户端测试:
[root@foundation45 ~]# ip addr add 172.25.74.10 dev br0 ##保证客户端和LB的VIP在同一个网段
[root@foundation45 ~]# curl 172.25.74.1 ##在调度器的机查看寻访记录
附:
问题:
因为没有添加本地IP(local adress): ipvsadm -P -t 172.25.74.1:80 -z 172.25.45.1:80 ##添加本地ip,这里也可以写回环接口127.0.0.1 所以很可能是策略的问题; |