7.kubernetes入门-高可用

kubernetes入门-高可用

架构如下

部署高可用  就是多一台master  通过vip漂移实现单点故障的解决方案

api-server的连接   有两种方案

1.controller-manager scheduler 可以连接本机的api-server    

2.controller-manager scheduler 连接vip

这里选用方案1

因为vip在其中一台上存在  这样另一台的controller-manager scheduler 就不用出网络  

node节点就必须使用vip地址    其kubletproxy都需要连接api-server

1.清空k8s所有资源

2.所有安装etcd高可用集群(etcd天生适合做集群 集群模式即为高可用模式)

yum -y install etcd

3.删除master节点的etcd数据

systemctl stop etcd rm -rf /var/lib/etcd/xxxxx(忘了,配置文件中定义)

4.所有节点修改etcd配置文件

3:ETCD_DATA_DIR="/var/lib/etcd/"
5:ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
6:ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"

9:ETCD_NAME="node1"  ##节点名字   所有节点不要一样
20:ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.100.14:2380"   ##节点同步数据的地址
21:ETCD_ADVERTISE_CLIENT_URLS="http://192.168.100.14:2379"         ##节点对外提供服务的地址
26:ETCD_INITIAL_CLUSTER="node1=http://192.168.100.14:2380,node2=http://192.168.100.15:2380,node3=http://192.168.100.16:2380"
27:ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
28:ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_STRICT_RECONFIG_CHECK="true"

##检查集群健康状态
[root@k8s-master etcd]# etcdctl cluster-health
member 64bd63aa56a32a5d is healthy: got healthy result from http://192.168.100.15:2379
member 7e704f88b84c5897 is healthy: got healthy result from http://192.168.100.14:2379
member ebffce836f747aca is healthy: got healthy result from http://192.168.100.16:2379
cluster is healthy 

注意:加了井号说明的每个节点要注意修改

5.修改flanneldetcd地址  改为多个   并创建key

vim /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS="http://192.168.100.14:2379,http://192.168.100.15:2379,http://192.168.100.16:2379"
[root@k8s-master etcd]# scp -rp /etc/sysconfig/flanneld 192.168.100.15:/etc/sysconfig/flanneld
[root@k8s-master etcd]# scp -rp /etc/sysconfig/flanneld 192.168.100.16:/etc/sysconfig/flanneld

##任意节点执行都行
[root@k8s-master etcd]#etcdctl mk /atomic.io/network/config    '{ "Network":  "172.18.0.0/16"  }'

[root@k8s-master etcd]#systemctl restart flanneld.service
[root@k8s-master etcd]#systemctl restart docker

6.安装masterk8s-node1

[root@k8s-node1 ~]# systemctl stop kubelet.service kube-proxy.service 
[root@k8s-node1 ~]# systemctl disable kubelet.service kube-proxy.service 
[root@k8s-node1 ~]# yum -y install kubernetes-master.x86_64 
[root@k8s-node1 ~]# systemctl stop kubelet.service kube-proxy.service 
[root@k8s-node1 ~]# systemctl disable kubelet.service kube-proxy.service 
Removed symlink /etc/systemd/system/multi-user.target.wants/kubelet.service.
Removed symlink /etc/systemd/system/multi-user.target.wants/kube-proxy.service.

7.修改两个master配置文件

[root@k8s-master etcd]# vim /etc/kubernetes/apiserver 
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.100.14:2379,http://192.168.100.15:2379,http://192.168.100.16:2379"
[root@k8s-master etcd]# vim /etc/kubernetes/config 
KUBE_MASTER="--master=http://127.0.0.1:8080"    ##本机的master   
[root@k8s-master etcd]#scp -rp /etc/kubernetes/apiserver 192.168.100.15:/etc/kubernetes/apiserver
[root@k8s-master etcd]#scp -rp /etc/kubernetes/apiserver 192.168.100.15:/etc/kubernetes/config

##重启
[root@k8s-master etcd]#systemctl restart kube-apiserver.service kube-controller-manager.service kube-scheduler.service
 
[root@k8s-node1 ~]# systemctl restart kube-apiserver.service kube-controller-manager.service kube-scheduler.service 
[root@k8s-node1 ~]# systemctl enable kube-apiserver.service kube-controller-manager.service kube-scheduler.service 

8.两个master节点安装keepalived  配置

[root@k8s-master etcd]# yum -y install keepalived.x86_64 
[root@k8s-master ~]# vi /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   router_id 14
}

vrrp_instance VI_1 {
    state BACKUP
    interface eno16777736
    virtual_router_id 51
    priority 150
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
       192.168.100.13
    }
}


[root@k8s-node1 ~]# vi /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   router_id 15
}

vrrp_instance VI_1 {
    state BACKUP
    interface eno16777736
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
       192.168.100.13
    }
}

##启动
systemctl start keepalived.service 
systemctl enable keepalived.service

测试:

停掉master节点     查看node1master新) 节点   vip是否漂移

[root@k8s-node1 ~]#ip addr
2: eno16777736: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:9a:ab:23 brd ff:ff:ff:ff:ff:ff
    inet 192.168.100.15/24 brd 192.168.100.255 scope global dynamic eno16777736
       valid_lft 1625sec preferred_lft 1625sec
    inet6 fe80::20c:29ff:fe9a:ab23/64 scope link 
       valid_lft forever preferred_lft forever

[root@k8s-master etcd]# systemctl stop keepalived.service 


[root@k8s-node1 ~]#ip addr
2: eno16777736: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:9a:ab:23 brd ff:ff:ff:ff:ff:ff
    inet 192.168.100.15/24 brd 192.168.100.255 scope global dynamic eno16777736
       valid_lft 1581sec preferred_lft 1581sec
    inet 192.168.100.13/32 scope global eno16777736
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe9a:ab23/64 scope link 
       valid_lft forever preferred_lft forever

  

完整的高可用还要写一个脚本   监控api-server是否挂了   如果api-server挂了  执行vip漂移   这里做到机器之间的高可用

9.修改node2节点连接为vip

[root@k8s-node2 ~]# vi /etc/kubernetes/config
KUBE_MASTER="--master=http://192.168.100.10:8080"
[root@k8s-node2 ~]# vi /etc/kubernetes/kubelet 
KUBELET_API_SERVER="--api-servers=http://192.168.100.10:8080"

移除
[root@k8s-node1 ~]# kubectl get nodes
NAME        STATUS     AGE
k8s-node1   NotReady   1h
k8s-node2   Ready      1h
[root@k8s-node1 ~]# kubectl delete node k8s-node1 
node "k8s-node1" deleted
[root@k8s-node1 ~]# kubectl get nodes
NAME        STATUS    AGE
k8s-node2   Ready     1h

10.测试

宕机第一个master节点  master依然可以工作    至此高可用完成

[root@k8s-master etcd]# poweroff

[root@k8s-node2 ~]# kubectl -s 192.168.100.13:8080 get node
NAME        STATUS    AGE
k8s-node2   Ready     1h

  

  

猜你喜欢

转载自www.cnblogs.com/hsgoose/p/12924383.html