部署第二个master节点

我的第二个master01节点ip 192.168.56.14

1、在master01节点执行,直接拷贝整个目录就行

[root@linux-node1 opt]# scp -r /opt/* [email protected]:/opt/
[email protected]'s password: 
etcd                                                                                       100%  509    39.1KB/s   00:00    
etcd                                                                                       100%   18MB  21.5MB/s   00:00    
etcdctl                                                                                    100%   15MB  17.1MB/s   00:00    
ca-key.pem                                                                                 100% 1675    40.9KB/s   00:00    
ca.pem                                                                                     100% 1265   110.6KB/s   00:00    
server-key.pem                                                                             100% 1679    58.7KB/s   00:00    
server.pem                                                                                 100% 1338   873.1KB/s   00:00    
kube-apiserver                                                                             100%  929    33.7KB/s   00:00    
token.csv                                                                                  100%   84    75.9KB/s   00:00    
kube-scheduler                                                                             100%   94     8.8KB/s   00:00    
kube-controller-manager                                                                    100%  483    29.6KB/s   00:00    
kube-apiserver                                                                             100%  184MB  29.0MB/s   00:06    
kube-controller-manager                                                                    100%  156MB  16.5MB/s   00:09    
kube-scheduler                                                                             100%   55MB  27.3MB/s   00:02    
kubectl                                                                                    100%   55MB  18.2MB/s   00:03    
kubelet                                                                                    100%  169MB  14.9MB/s   00:11    
ca-key.pem                                                                                 100% 1679     3.6KB/s   00:00    
ca.pem                                                                                     100% 1359     6.4KB/s   00:00    
server-key.pem                                                                             100% 1675   473.6KB/s   00:00    
server.pem                                                                                 100% 1643   589.4KB/s   00:00 


#启动文件
[root@linux-node1 opt]# scp /usr/lib/systemd/system/kube-apiserver.service /usr/lib/systemd/system/kube-scheduler.service /usr/lib/systemd/system/kube-controller-manager.service [email protected]:/usr/lib/systemd/system/
[email protected]'s password: 
kube-apiserver.service                                                                     100%  282    17.4KB/s   00:00    
kube-scheduler.service                                                                     100%  281    68.8KB/s   00:00    
kube-controller-manager.service                                                            100%  317   288.6KB/s   00:00  
拷贝etcd 、kubernetes、启动文件

2、在master02节点修改

[root@linux-node3 cfg]# pwd
/opt/kubernetes/cfg

[root@linux-node3 cfg]# vim kube-apiserver 

#这两个地址
--bind-address=192.168.56.14 \
--advertise-address=192.168.56.14 \
修改apiserverip

3、启动kube-apiserver、kube-controller-manager、kube-scheduler

[root@linux-node3 cfg]# systemctl restart kube-apiserver.service
[root@linux-node3 cfg]# systemctl restart kube-controller-manager.service
[root@linux-node3 cfg]# systemctl restart  kube-scheduler.service
[root@linux-node3 cfg]# 
[root@linux-node3 cfg]# ps -ef|grep kube
root       2045      1 39 00:31 ?        00:00:01 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://192.168.56.11:2379,https://192.168.56.12:2379,https://192.168.56.13:2379 --bind-address=192.168.56.14 --secure-port=6443 --advertise-address=192.168.56.14 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
root       2054      1 15 00:31 ?        00:00:00 /opt/kubernetes/bin/kube-controller-manager --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect=true --address=127.0.0.1 --service-cluster-ip-range=10.0.0.0/24 --cluster-name=kubernetes --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s
root       2066      1  1 00:31 ?        00:00:00 /opt/kubernetes/bin/kube-scheduler --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect
root       2077   1758  0 00:31 pts/0    00:00:00 grep --color=auto kube
启动服务

4、检查是否正常

[root@linux-node3 cfg]# kubectl get nodes
NAME            STATUS   ROLES    AGE   VERSION
192.168.56.12   Ready    <none>   84m   v1.12.10
192.168.56.13   Ready    <none>   46m   v1.12.10
[root@linux-node3 cfg]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr-bIE2vtpw1IAEl4TpIxVgXdmiSHtX8nNqmbaMlzXGGa4   86m   kubelet-bootstrap   Approved,Issued
node-csr-rC1rxYz_xtQc2YZpodClGK9RDgYCR1ikw5x7t-Qv71k   47m   kubelet-bootstrap   Approved,Issued
[root@linux-node3 cfg]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
[root@linux-node3 cfg]# 
检查集群状态

猜你喜欢

转载自www.cnblogs.com/zhaojingyu/p/12322372.html