K8S関連の問題

継続的に更新

1.マスターホストの再起動後のポート6443の問題(sawpoffの問題)

[root@k8s-master ~]# kubectl get node
The connection to the server 10.0.0.80:6443 was refused - did you specify the right host or port?

# 通过 systemctl status kubelet.service 查看状态
[root@k8s-master ~]systemctl status kubelet.service
● kubelet.service - kubelet: The Kubernetes Node Agent
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  Drop-In: /usr/lib/systemd/system/kubelet.service.d
           └─10-kubeadm.conf
   Active: activating (auto-restart) (Result: exit-code) since Tue 2020-12-08 22:19:05 CST; 6s ago
     Docs: https://kubernetes.io/docs/
  Process: 1949 ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS (code=exited, status=255)
 Main PID: 1949 (code=exited, status=255)  # 这里出现错误
 
 #关闭swapoff分区关闭 重启即可
 swapoff -a
 systemctl restart kubelet.service

次に、ノードマシンは切断後にマスターに参加できません

swapoff -a  # 关闭分区 保险起见 如果分区了的话

kubeadm reset  # 重新配置

kubeadm join....   # 重新生成token加入即可

画像-20201209082115778

3つ目は、kube-system(名前空間)のノードを表示する

ここで主に使用されます

 kubectl get pods -n kube-system # 查看 kube-system(命名空间)中的节点
 --------------------------------------------------------------------
NAME                                 READY   STATUS    RESTARTS   AGE
coredns-7dcc599b9f-gt4lz             1/1     Running   2          24h
coredns-7dcc599b9f-psxlg             1/1     Running   2          24h
etcd-k8s-master                      1/1     Running   2          24h
kube-apiserver-k8s-master            1/1     Running   2          24h
kube-controller-manager-k8s-master   1/1     Running   2          24h
kube-flannel-ds-bkvp4                1/1     Running   0          3h15m
kube-flannel-ds-mwrf6                1/1     Running   2          24h
kube-flannel-ds-s46tm                1/1     Running   1          23h
kube-proxy-b4g6t                     1/1     Running   1          23h
kube-proxy-nc2kz                     1/1     Running   0          3h15m
kube-proxy-qxx5l                     1/1     Running   2          24h
kube-scheduler-k8s-master            1/1     Running   2          24h
# 如果重启次数过多 代表有问题

kubectl get pods -n kube-system  -o wide
 --------------------------------------------------------------------
NAME                                 READY   STATUS    RESTARTS   AGE     IP            NODE          NOMINATED NODE   READINESS GATES
coredns-7dcc599b9f-gt4lz             1/1     Running   2          24h     10.244.0.10   k8s-master    <none>           <none>
coredns-7dcc599b9f-psxlg             1/1     Running   2          24h     10.244.0.8    k8s-master    <none>           <none>
etcd-k8s-master                      1/1     Running   2          24h     10.0.0.80     k8s-master    <none>           <none>
kube-apiserver-k8s-master            1/1     Running   2          24h     10.0.0.80     k8s-master    <none>           <none>
kube-controller-manager-k8s-master   1/1     Running   2          24h     10.0.0.80     k8s-master    <none>           <none>
kube-flannel-ds-bkvp4                1/1     Running   0          3h17m   10.0.0.81     k8s-node-01   <none>           <none>
kube-flannel-ds-mwrf6                1/1     Running   2          24h     10.0.0.80     k8s-master    <none>           <none>
kube-flannel-ds-s46tm                1/1     Running   1          24h     10.0.0.82     k8s-node-02   <none>           <none>
kube-proxy-b4g6t                     1/1     Running   1          24h     10.0.0.82     k8s-node-02   <none>           <none>
kube-proxy-nc2kz                     1/1     Running   0          3h17m   10.0.0.81     k8s-node-01   <none>           <none>
kube-proxy-qxx5l                     1/1     Running   2          24h     10.0.0.80     k8s-master    <none>           <none>
kube-scheduler-k8s-master            1/1     Running   2          24h     10.0.0.80     k8s-master 

#  地址不一致往往时 /etc/hosts 问题没对应

画像-20201209112901994

おすすめ

転載: blog.csdn.net/A1L__/article/details/110914933