修改node节点名称

https://my.oschina.net/u/3390908/blog/1649764

1、查看原来node


[root@lab2 ~]# kubectl get no
NAME         STATUS    ROLES     AGE       VERSION
10.1.1.111   Ready     node      8d        v1.11.0
10.1.1.68    Ready     node      8d        v1.11.0
10.1.1.8     Ready     master    8d        v1.11.0




2、修改--hostname-override=lab1

[root@lab2 ~]# vi /etc/kubernetes/kubelet

KUBELET_HOSTNAME="--hostname-override=lab1"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1"
KUBELET_CONFIG="--config=/etc/kubernetes/kubelet-config.yml"
KUBELET_ARGS="--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.conf --kubeconfig=/etc/kubernetes/kubelet.conf --cert-dir=/etc/kubernetes/pki --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d --cluster-dns=10.96.0.10 --cluster-domain=cluster.local "


3、重启kubelet 等三分钟查看

[root@lab2 ~]# systemctl daemon-reload && systemctl restart kubelet



[root@lab2 ~]# kubectl get no
NAME         STATUS     ROLES     AGE       VERSION
10.1.1.111   Ready      node      8d        v1.11.0
10.1.1.68    NotReady   node      8d        v1.11.0
10.1.1.8     Ready      master    8d        v1.11.0
lab1         NotReady   <none>    9m        v1.11.0
lab2         NotReady   <none>    7m        v1.11.0
lab3         NotReady   <none>    7m        v1.11.0


4、再重启kubelet  等三分钟查看


[root@lab2 ~]# systemctl daemon-reload && systemctl restart kubelet


[root@lab2 ~]# kubectl get no
NAME         STATUS     ROLES     AGE       VERSION
10.1.1.111   NotReady   node      8d        v1.11.0
10.1.1.62    NotReady   <none>    8m        v1.11.0
10.1.1.68    NotReady   node      8d        v1.11.0
10.1.1.8     NotReady   master    8d        v1.11.0
lab1         Ready      <none>    13m       v1.11.0
lab2         Ready      <none>    12m       v1.11.0
lab3         Ready      <none>    12m       v1.11.0






停掉所有node节点的 kubelet

[root@lab3 ~]# systemctl stop kubelet





删除 除了master之外的所有node

[root@lab1 ~]# kubectl delete no 10.1.1.8
[root@lab1 ~]# kubectl delete no 10.1.1.68
[root@lab1 ~]# kubectl delete no 10.1.1.111
[root@lab1 ~]# kubectl delete no lab2
[root@lab1 ~]# kubectl delete no lab3





删除所有节点的boot证书

[root@lab1 ~]# rm -rf /etc/kubernetes/kubelet.conf
[root@lab1 ~]# rm -rf /etc/kubernetes/pki/kubelet*





重启所有节点的kubelet
[root@lab1 ~]# systemctl restart kubelet




获取csr

[root@lab1 ~]# kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-W8jDxg0LEZZw6U8V1WudhEBRP0qt4ybPSJ-P8XfDzlo   7s        kubelet-bootstrap   Pending
node-csr-rgnoMPuDdtvfoogsY_zbf1R3KMU_B8M8cBv75AHw8fE   10s       kubelet-bootstrap   Pending
node-csr-vO6qQvzlI974qUHXVpPu6kMP4eBfKMcBECwqY7xWYu4   17s       kubelet-bootstrap   Pending




通过证书请求

[root@lab1 ~]# kubectl certificate approve node-csr-W8jDxg0LEZZw6U8V1WudhEBRP0qt4ybPSJ-P8XfDzlo
[root@lab1 ~]# kubectl certificate approve node-csr-rgnoMPuDdtvfoogsY_zbf1R3KMU_B8M8cBv75AHw8fE
[root@lab1 ~]# kubectl certificate approve node-csr-vO6qQvzlI974qUHXVpPu6kMP4eBfKMcBECwqY7xWYu4






5、设置角色


[root@lab2 ~]# kubectl label nodes lab1 node-role.kubernetes.io/master=
[root@lab2 ~]# 
[root@lab2 ~]# kubectl label nodes lab2 node-role.kubernetes.io/node=
node/lab2 labeled
[root@lab2 ~]# kubectl label nodes lab3 node-role.kubernetes.io/node=
node/lab3 labeled

# 设置 master 一般情况下不接受负载
kubectl taint nodes lab1 node-role.kubernetes.io/master=true:NoSchedule

[root@lab1 ~]# kubectl get nodes
NAME      STATUS    ROLES     AGE       VERSION
lab1      Ready     <none>    15m       v1.11.0
lab2      Ready     <none>    14m       v1.11.0
lab3      Ready     <none>    14m       v1.11.0






6、解决后遗症


执行:

[root@lab1 coredns]# kubectl delete -f coredns.yaml



现象:

[root@lab2 k8s]# kubectl get pods -n kube-system
NAME                       READY     STATUS              RESTARTS   AGE
coredns-6c65fc5cbb-7xvzs   0/1       ContainerCreating   0          2m
coredns-6c65fc5cbb-kh4zg   0/1       ContainerCreating   0          2m



原因:

修改node名称后的遗留症状




查看pod日志,最后部分

[root@lab2 k8s]# kubectl describe po coredns-6c65fc5cbb-7xvzs -n kube-system

"cni0" already has an IP address different from 10.244.5.1/24



解决:

所有节点执行如下:

rm -rf /var/lib/cni/flannel/* && rm -rf /var/lib/cni/networks/cbr0/* && ip link delete cni0
rm -rf /var/lib/cni/networks/cni0/*

猜你喜欢

转载自www.cnblogs.com/effortsing/p/10014628.html