傻瓜式搭建高可用kubernetes

高可用kubernetes搭建11.26

环境ubuntu18.04 kubernetes版本1.18.3

设置k8s和docker源

cat <<EOF >>/etc/apt/sources.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
# 写入两个 key
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | apt-key add -
curl -fsSL https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -

docker国外源

deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable

安装一些依赖

apt -y install apt-transport-https ca-certificates curl software-properties-common
apt-get install -y kubeadm=1.18.3-00 kubelet=1.18.3-00 kubectl=1.18.3-00

docker 源

add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
apt update
apt-get install docker-ce docker-ce-cli containerd.io
echo 1 > /proc/sys/net/ipv4/ip_forward

时间同步

免密钥登入

关闭swap

安装keeplive

在要作为master的节点上改keeplive文件:

修改:

interface # 网卡名

virtual_ipaddress # 你要设置的虚拟ip

real_server # master节点ip

如下

master1:


global_defs {
    
    
   router_id LVS_DEVEL
}
vrrp_instance VI_1 {
    
    
    state BACKUP
    nopreempt
    interface ens33
    virtual_router_id 80
    priority 100
    advert_int 1
    authentication {
    
    
        auth_type PASS
        auth_pass just0kk
    }
    virtual_ipaddress {
    
    
        10.1.28.100
    }
}
virtual_server 10.1.28.100 6443 {
    
    
    delay_loop 6
    lb_algo loadbalance
    lb_kind DR
    net_mask 255.255.255.0
    persistence_timeout 0
    protocol TCP
    real_server 10.1.28.135 6443 {
    
    
        weight 1
        SSL_GET {
    
    
            url {
    
    
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 10.1.28.21 6443 {
    
    
        weight 1
        SSL_GET {
    
    
            url {
    
    
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 10.1.28.63 6443 {
    
    
        weight 1
        SSL_GET {
    
    
            url {
    
    
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

master2:

global_defs {
    
    
   router_id LVS_DEVEL
}
vrrp_instance VI_1 {
    
    
    state BACKUP
    nopreempt
    interface ens33
    virtual_router_id 80
    priority 50
    advert_int 1
    authentication {
    
    
        auth_type PASS
        auth_pass just0kk
    }
    virtual_ipaddress {
    
    
        10.1.28.100
    }
}
virtual_server 10.1.28.100 6443 {
    
    
    delay_loop 6
    lb_algo loadbalance
    lb_kind DR    net_mask 255.255.255.0
    persistence_timeout 0
    protocol TCP
    real_server 10.1.28.135 6443 {
    
    
        weight 1
        SSL_GET {
    
    
            url {
    
    
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 10.1.28.21 6443 {
    
    
        weight 1
        SSL_GET {
    
    
            url {
    
    
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 10.1.28.63 6443 {
    
    
        weight 1
        SSL_GET {
    
    
            url {
    
    
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

master3:

global_defs {
    
    
   router_id LVS_DEVEL
}
vrrp_instance VI_1 {
    
    
    state BACKUP
    nopreempt
    interface ens33
    virtual_router_id 80
    priority 30
    advert_int 1
    authentication {
    
    
        auth_type PASS
        auth_pass just0kk
    }
    virtual_ipaddress {
    
    
        10.1.28.100
    }
}
virtual_server 10.1.28.100 6443 {
    
    
    delay_loop 6
    lb_algo loadbalance
    lb_kind DR
    net_mask 255.255.255.0
    persistence_timeout 0
    protocol TCP
    real_server 10.1.28.135 6443 {
    
    
        weight 1
        SSL_GET {
    
    
            url {
    
    
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 10.1.28.21 6443 {
    
    
        weight 1
        SSL_GET {
    
    
            url {
    
    
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 10.1.28.63 6443 {
    
    
        weight 1
        SSL_GET {
    
    
            url {
    
    
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

然后按顺序依次启动: 执行以下

systemctl enable keepalived && systemctl start keepalived && systemctl status keepalived

执行完可以在master1 上 ip a 看一下,会多一个虚拟ip

准备开始初始化k8s

编写初始化文件

需要修改的地方:

kubernetesVersion # 写kubeadm 组件的 版本

controlPlaneEndpoint # 写虚拟ip 加端口

certSANs # 写master节点的ip

vim kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.18.3
controlPlaneEndpoint: 10.1.28.100:6443
# imageRepository: registry.aliyuncs.com/google_containers  如果有镜像可以不写
apiServer:
 certSANs:
 - 10.1.28.135
 - 10.1.28.21
 - 10.1.28.63
 - 10.1.28.100
networking:
 podSubnet: 10.244.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind:  KubeProxyConfiguration
mode: ipvs

kubeadm init --config kubeadm-config.yaml # 开始初始化

成功之后执行:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown ( i d − u ) : (id -u): (idu):(id -g) $HOME/.kube/config

flannel:

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

另外几台master使用那条带–control-plane的join 加入集群


获取镜像脚本:

images=(
    kube-apiserver:v1.18.3
    kube-controller-manager:v1.18.3
    kube-scheduler:v1.18.3
    kube-proxy:v1.18.3
    pause:3.2
    etcd:3.4.3
    coredns:1.6.7
)

for imageName in ${images[@]} ; do
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
done

おすすめ

転載: blog.csdn.net/weixin_44946147/article/details/121551563