lb

yum install keepalived nginx -y
yum install keepalived nginx -t



========================================================
========================================================
nginx配置文件
加上这一段:

stream {

   log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
    access_log  /var/log/nginx/k8s-access.log  main;

    upstream k8s-apiserver {
        server 192.168.1.63:6443;
        server 192.168.1.64:6443;
    }
    server {
        listen 6443;
        proxy_pass k8s-apiserver;
    }
}


keepalived配置文件


! Configuration File for keepalived 
 
global_defs { 
   notification_email { 
     [email protected] 
     [email protected] 
     [email protected] 
   } 
   notification_email_from [email protected]  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_MASTER 
} 

vrrp_script check_nginx {
    script "/usr/local/nginx/sbin/check_nginx.sh"  ###检测脚本
}

vrrp_instance VI_1 { 
    state BACKUP
    interface enp0s3    ##改成你网卡的设备名去配置文件看看在写
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 (主备一致)
    priority 100      # 优先级,主比备高,备写90
    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒 
    authentication { 
        auth_type PASS      ##密码验证
        auth_pass 1111 
    }  
    virtual_ipaddress {     ####vip地址
        192.168.1.60/24 
    } 
    track_script {      #指定脚本检测
        check_nginx
    } 
}


systemctl start nginx
sysremctl start keepalived 

总结:
1,ip add查看master节点,会发现多出来一个ip地址60,这个就是vip,当master正常运行时,backup是没有这个ip的
2,可以用ping命令检测,当master节点挂了,60这个ip会漂移到bakcup节点继续提供服务
3,中间会有1次ping断层

========================================================
========================================================
将node01和node02 添加到负载均衡集群中
1,node01操作

root@k8s-node01: /opt/kubernetes/cfg 16:00:20
$ grep 60 *
bootstrap.kubeconfig:    server: https://192.168.1.60:6443
bootstrap.kubeconfig:    token: 0fb61c46f8991b718eb38d27b605b008
kubelet.kubeconfig:    server: https://192.168.1.60:6443
kube-proxy.kubeconfig:    server: https://192.168.1.60:6443
root@k8s-node01: /opt/kubernetes/cfg 16:00:25
$ 
##将grep出来的60位置原来是master节点ip,全部替换成60,指向负载ip地址,node-2同样操作
重启node节点kubelet,kube-proxy

2,验证
lb-master查看日志,node节点通过两个master链接lb
root@lb-master: /opt 14:36:30
$ tail -f /var/log/nginx/k8s-access.log 
192.168.1.65 192.168.1.64:6443 - [25/Mar/2019:11:43:02 +0800] 200 1119
192.168.1.65 192.168.1.63:6443 - [25/Mar/2019:13:27:07 +0800] 200 1119
192.168.1.66 192.168.1.63:6443 - [25/Mar/2019:13:27:07 +0800] 200 1119
192.168.1.65 192.168.1.63:6443 - [25/Mar/2019:13:27:07 +0800] 200 1566
192.168.1.66 192.168.1.63:6443 - [25/Mar/2019:13:27:07 +0800] 200 1118
192.168.1.65 192.168.1.64:6443 - [25/Mar/2019:13:27:07 +0800] 200 1566
192.168.1.66 192.168.1.64:6443 - [25/Mar/2019:13:27:07 +0800] 200 1566
192.168.1.66 192.168.1.64:6443 - [25/Mar/2019:13:27:07 +0800] 200 1118
192.168.1.65 192.168.1.64:6443 - [25/Mar/2019:13:27:07 +0800] 200 1117
192.168.1.65 192.168.1.64:6443 - [25/Mar/2019:13:27:07 +0800] 200 1117


========================================================
========================================================
k8s双master节点
1,将master节点配置文件systemctl管理工具考到master02上
scp -r /opt/kubernetes/ root@192.168.1.64:/opt/
scp /usr/lib/systemd/system/{kube-apiserver,kube-scheduler,kube-controller-manager}.service root@192.168.1.64:/usr/lib/systemd/system/
scp /usr/bin/kubectl root@192.168.1.64:/usr/bin/

2,修改master02节点kube-apiserver,kube-scheduler,kube-controller-manager配置文件ip地址改成master02的

root@master02: /opt/kubernetes/cfg 15:47:12
$ pwd
/opt/kubernetes/cfg
root@master02: /opt/kubernetes/cfg 15:47:12
$ ls
kube-apiserver  kube-controller-manager  kube-scheduler  token.csv
##这是已经改完了的了,正常是grep 63,会显示文件中含有master01节点ip63的所有。
root@master02: /opt/kubernetes/cfg 15:47:15
$ grep 64 *
kube-apiserver:--bind-address=192.168.1.64 \
kube-apiserver:--secure-port=6443 \
kube-apiserver:--advertise-address=192.168.1.64 \
root@master02: /opt/kubernetes/cfg 15:47:19
$ 
ps: 
 因为kube-ctroller-manager  kube-scheduler配置文件写的都是127.0.0.1所有没有改动
3,启动master02
systemctl restart kube-apiserver
systemctl restart kube-ctroller-manager
systemctl restart kube-scheduler

4,验证
root@master02: /opt/kubernetes/cfg 15:50:18
$ kubectl get node
NAME           STATUS   ROLES    AGE     VERSION
192.168.1.65   Ready    <none>   63m     v1.13.4
192.168.1.66   Ready    <none>   4d22h   v1.13.4
root@master02: /opt/kubernetes/cfg 15:50:22
$ kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr-XMad_RYrooh4SENAIOWeD2VIGEZOR-5jVG3QASPBZzA   65m   kubelet-bootstrap   Approved,Issued
root@master02: /opt/kubernetes/cfg 15:50:25
$ kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}   
root@master02: /opt/kubernetes/cfg 15:50:28
$ kubectl get pod
NAME                     READY   STATUS             RESTARTS   AGE
java-84767655bc-5rlth    0/1     CrashLoopBackOff   15         57m
nginx-7cdbd8cdc9-2jwmj   1/1     Running            0          51m
nginx-7cdbd8cdc9-bwp9v   1/1     Running            0          57m
nginx-7cdbd8cdc9-zc2rl   1/1     Running            0          57m
root@master02: /opt/kubernetes/cfg 15:50:30
$ 

========================================================
========================================================

猜你喜欢

转载自www.cnblogs.com/Carr/p/10595518.html
lb