suse 12 Binary Deployment Kubernetets 1.19.7-Extra Story-Add Node Node

0、Prospect summary

  • Operations required by the master node:
    • Update kube-apiservercertificate, the new node is added to the certificate ip
  • The operation required by the node node:
    • Deployment flannel, docker, kubelet,kube-proxy

1. Prepare the node environment

1.0, modify the configuration script parameters

  • If the cluster is not deployed according to my blog, this step is not necessary
  • The following operations only need to operate on the k8s-01 node
k8s-01:~ # cd /opt/k8s/bin/
k8s-01:/opt/k8s/bin # vim k8s-env.sh         
# 修改NODE_IPS为需要增加的node节点ip
export NODE_IPS=( 192.168.72.44 192.168.72.45 )

# 修改NODE_NAMES为需要增加的node节点主机名
export NODE_NAMES=( k8s-06 k8s-07 )

1.1, configuration without password

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    expect -c "
    spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@${host}
        expect {
                \"*yes/no*\" {send \"yes\r\"; exp_continue}
                \"*Password*\" {send \"123.com\r\"; exp_continue}
                \"*Password*\" {send \"123.com\r\";}
               }"
done

1.2, add hosts resolution

k8s-01:~ # cat >> /etc/hosts <<EOF
> 192.168.72.44 k8s-06
> 192.168.72.45 k8s-07
> EOF
  • Distribute to other nodes
#!/usr/bin/env bash

for host in k8s-02 k8s-03 k8s-04 k8s-05 k8s-06 k8s-07
do
    printf "\e[1;34m${host}\e[0m\n"
    scp /etc/hosts ${host}:/etc/hosts
done

1.3, modify the host name

#!/usr/bin/env bash

for host in 6 7
do
    printf "\e[1;34mk8s-0${host}\e[0m\n"
    ssh root@k8s-0${host} "hostnamectl set-hostname --static k8s-0${host}"
done

1.4, update the PATH variable

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} "echo 'PATH=$PATH:/opt/k8s/bin' >> /etc/profile"
done

1.5, install dependent packages

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} "zypper in -y ntp ipset iptables curl sysstat wget"
done

1.6, turn off the firewall and swap partition

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} "systemctl disable SuSEfirewall2.service --now"
    ssh root@${host} "iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat"
    ssh root@${host} "iptables -P FORWARD ACCEPT"
    ssh root@${host} "swapoff -a"
    ssh root@${host} "sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab"
done

1.7, open the kernel module

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} "modprobe ip_vs_rr"
    ssh root@${host} "modprobe br_netfilter"
    ssh root@${host} "echo 'modprobe ip_vs_rr' >> /etc/rc.local"
    ssh root@${host} "echo 'modprobe br_netfilter' >> /etc/rc.local"
    ssh root@${host} "chmod +x /etc/rc.local"
done

1.8, kernel optimization

  • The k8s kernel optimization file has been independently configured on the k8s-01 node. Therefore, directly scp over to make the configuration effective
#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    scp /etc/sysctl.d/kubernetes.conf ${host}:/etc/sysctl.d/kubernetes.conf
    ssh root@${host} "sysctl -p /etc/sysctl.d/kubernetes.conf"
done

1.9. Create a directory required for deployment

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} "mkdir -p /opt/k8s/bin /etc/kubernetes/cert"
done

2. Update the kube-apiserver certificate

2.0, create a new kubernetes certificate and private key

k8s-01:~ # cd /opt/k8s/ssl/
k8s-01:/opt/k8s/ssl # source /opt/k8s/bin/k8s-env.sh
k8s-01:/opt/k8s/ssl # cat > kubernetes-csr.json <<EOF
{
    
    
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.72.39",
    "192.168.72.40",
    "192.168.72.41",
    "192.168.72.42",
    "192.168.72.43",
    "192.168.72.44",
    "192.168.72.45",
    "${CLUSTER_KUBERNETES_SVC_IP}",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    
    
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
    
    
      "C": "CN",
      "ST": "ShangHai",
      "L": "ShangHai",
      "O": "k8s",
      "OU": "bandian"
    }
  ]
}
EOF

2.1. Generate a new kubernetes certificate and private key

k8s-01:/opt/k8s/ssl # cfssl gencert -ca=/opt/k8s/ssl/ca.pem \
-ca-key=/opt/k8s/ssl/ca-key.pem \
-config=/opt/k8s/ssl/ca-config.json \
-profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

2.2. Distribute a new kubernetes certificate and private key

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${MASTER_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    scp /opt/k8s/ssl/kubernetes*.pem ${host}:/etc/kubernetes/cert/
done

2.3, restart kube-apiserver

  • Make the new certificate and private key take effect
#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${MASTER_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} "systemctl daemon-reload && \
                      systemctl restart kube-apiserver && \
                      systemctl status kube-apiserver | grep Active"
done

3. Deploy flannel network

  • The flannel needs to be configured. It is ready at the beginning. You only need to distribute the files and start the flannel service of the new node.

3.0. Distribute the certificate file to the new node

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} "mkdir -p /etc/flanneld/cert"
    scp /opt/k8s/ssl/ca.pem ${host}:/etc/kubernetes/cert/
    scp /opt/k8s/ssl/flanneld*.pem ${host}:/etc/flanneld/cert/
    scp /opt/k8s/packages/flannel/{
    
    flanneld,mk-docker-opts.sh} ${host}:/opt/k8s/bin/
    scp /opt/k8s/conf/flanneld.service ${host}:/etc/systemd/system/
done

3.1, start flanneld service

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} "systemctl daemon-reload && \
                      systemctl enable flanneld --now && \
                      systemctl status flanneld | grep Active"
done

3.2. Check whether the new node node has a flannel network card

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} "ip a | grep flannel | grep -w inet"
done

4. Deploy docker

  • Same as above, only need to distribute files and start docker

4.0. Distribute files to new nodes

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} "mkdir /etc/docker"
    scp /opt/k8s/packages/docker/* ${host}:/usr/bin/
    scp /opt/k8s/conf/daemon.json ${host}:/etc/docker/
    scp /opt/k8s/conf/docker.service ${host}:/etc/systemd/system/
done

4.1, start the docker service

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} "systemctl daemon-reload && \
                      systemctl enable docker --now && \
                      systemctl status docker | grep Active"
done

4.2. Check whether the docker and flannel network cards of the new node are in the same network segment

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} 'ifconfig | egrep "docker*|flannel*" -A 1'
done

5. Deploy kubelet components

5.0, create kubelet bootstrap kubeconfig file

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for node_name in ${NODE_NAMES[@]}
do
    printf "\e[1;34m${node_name}\e[0m\n"
    # 创建 token
    export BOOTSTRAP_TOKEN=$(kubeadm token create \
    --description kubelet-bootstrap-token \
    --groups system:bootstrappers:${
     
     node_name} \
    --kubeconfig ~/.kube/config)

    # 设置集群参数
    kubectl config set-cluster kubernetes \
    --certificate-authority=/etc/kubernetes/cert/ca.pem \
    --embed-certs=true \
    --server=${KUBE_APISERVER} \
    --kubeconfig=/opt/k8s/ssl/kubelet-bootstrap-${node_name}.kubeconfig

    # 设置客户端认证参数
    kubectl config set-credentials kubelet-bootstrap \
    --token=${BOOTSTRAP_TOKEN} \
    --kubeconfig=/opt/k8s/ssl/kubelet-bootstrap-${node_name}.kubeconfig

    # 设置上下文参数
    kubectl config set-context default \
    --cluster=kubernetes \
    --user=kubelet-bootstrap \
    --kubeconfig=/opt/k8s/ssl/kubelet-bootstrap-${node_name}.kubeconfig

    # 设置默认上下文
    kubectl config use-context default --kubeconfig=/opt/k8s/ssl/kubelet-bootstrap-${node_name}.kubeconfig
done
"查看kubeadm为新节点创建的token"
k8s-01:/opt/k8s/ssl # kubeadm token list --kubeconfig ~/.kube/config
TOKEN                     TTL         EXPIRES                     USAGES                   DESCRIPTION                                                EXTRA GROUPS
6sp12t.btr31aj1hc403tar   23h         2021-02-16T01:34:59+08:00   authentication,signing   kubelet-bootstrap-token                                    system:bootstrappers:k8s-06
bajiy9.b4fhfy8serfmyve0   23h         2021-02-16T01:35:00+08:00   authentication,signing   kubelet-bootstrap-token                                    system:bootstrappers:k8s-07

5.1. Distribute files to new nodes

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for (( i=0; i < 2; i++ ))
do
    sed -e "s/##NODE_IP##/${NODE_IPS[i]}/" /opt/k8s/conf/kubelet.service.template > \
           /opt/k8s/conf/kubelet-${NODE_IPS[i]}.service
    sed -e "s/##NODE_IP##/${NODE_IPS[i]}/" /opt/k8s/conf/kubelet-config.yaml.template > \
           /opt/k8s/conf/kubelet-config-${NODE_IPS[i]}.yaml.template
done

for node_name in ${NODE_NAMES[@]}
do
    printf "\e[1;34m${node_name}\e[0m\n"
    scp /opt/k8s/ssl/kubelet-bootstrap-${node_name}.kubeconfig \
        ${node_name}:/etc/kubernetes/cert/kubelet-bootstrap.kubeconfig
done

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    scp /opt/k8s/bin/kubelet ${host}:/opt/k8s/bin/kubelet
    scp /opt/k8s/conf/kubelet-${host}.service ${host}:/etc/systemd/system/kubelet.service
    scp /opt/k8s/conf/kubelet-config-${host}.yaml.template ${host}:/etc/kubernetes/kubelet-config.yaml
    scp /opt/k8s/packages/pause.tar ${host}:/opt/k8s/
    ssh root@${host} "docker load -i /opt/k8s/pause.tar"
done

5.2, start the kubelet service

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} "mkdir -p ${K8S_DIR}/kubelet/kubelet-plugins/volume/exec/"
    ssh root@${host} "systemctl daemon-reload && \
                      systemctl enable kubelet --now && \
                      systemctl status kubelet | grep Active"
done

5.3. Check whether the new node is ready

k8s-01:~ # kubectl get node
NAME            STATUS   ROLES    AGE   VERSION
192.168.72.39   Ready    <none>   2d    v1.19.7
192.168.72.40   Ready    <none>   2d    v1.19.7
192.168.72.41   Ready    <none>   2d    v1.19.7
192.168.72.42   Ready    <none>   2d    v1.19.7
192.168.72.43   Ready    <none>   2d    v1.19.7
192.168.72.44   Ready    <none>   81s   v1.19.7
192.168.72.45   Ready    <none>   79s   v1.19.7

5.4, ​​manually approve server cert csr

k8s-01:~ # kubectl get csr | grep Pending | awk '{print $1}' | xargs kubectl certificate approve

6. Deploy kube-proxy

  • Similarly, you only need to start kube-proxy after distributing the files

6.0. Distribute files to new nodes

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for (( i=0; i < 2; i++ ))
do
    sed -e "s/##NODE_IP##/${NODE_IPS[i]}/" /opt/k8s/conf/kube-proxy.service.template > \
           /opt/k8s/conf/kube-proxy-${NODE_IPS[i]}.service
    sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" \
    /opt/k8s/conf/kube-proxy-config.yaml.template > /opt/k8s/conf/kube-proxy-config-${NODE_IPS[i]}.yaml.template
done

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    scp /opt/k8s/ssl/kube-proxy.kubeconfig ${host}:/etc/kubernetes/cert
    scp /opt/k8s/conf/kube-proxy-${host}.service ${host}:/etc/systemd/system/kube-proxy.service
    scp /opt/k8s/conf/kube-proxy-config-${host}.yaml.template \
        ${host}:/etc/kubernetes/kube-proxy-config.yaml
    scp /opt/k8s/packages/conntrack ${host}:/opt/k8s/bin/
    scp /opt/k8s/packages/kubernetes/server/bin/kube-proxy ${host}:/opt/k8s/bin/
    ssh root@${host} "chmod +x /opt/k8s/bin/*"
done

6.1, start kube-proxy service

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} "mkdir -p ${K8S_DIR}/kube-proxy"
	ssh root@${host} "modprobe ip_vs_rr"
    ssh root@${host} "systemctl daemon-reload && \
                      systemctl enable kube-proxy --now && \
                      systemctl status kube-proxy | grep Active"
done

6.2, view kube-proxy port

#!/usr/bin/env bash
source /opt/k8s/bin/k8s-env.sh

for host in ${NODE_IPS[@]}
do
    printf "\e[1;34m${host}\e[0m\n"
    ssh root@${host} "ss -nltp | grep kube-proxy"
done
  • At this point, the expansion of the kubernetes cluster is over

Guess you like

Origin blog.csdn.net/u010383467/article/details/113813033