a35.ansible 生产实战案例 -- 基于二进制包安装kubernetes v1.22 -- 集群升级(二)

18.升级kubernetes

把yaml文件先备份了

[root@k8s-master01 ~]# mkdir bak
[root@k8s-master01 ~]# mv *.yaml bak/
[root@k8s-master01 ~]# ls bak/
admin.yaml  bootstrap.secret.yaml  calico-etcd.yaml  components.yaml  coredns.yaml  recommended.yaml

18.1 etcd

18.1.1 升级etcd

[root@ansible-server ansible]# mkdir -p roles/etcd-update/{files,tasks}
[root@ansible-server ansible]# cd roles/etcd-update/
[root@ansible-server etcd-update]# ls
files  tasks

[root@ansible-server etcd-update]# wget https://github.com/etcd-io/etcd/releases/download/v3.5.1/etcd-v3.5.1-linux-amd64.tar.gz

[root@ansible-server etcd-update]# tar -xf etcd-v3.5.1-linux-amd64.tar.gz --strip-components=1 -C files/ etcd-v3.5.1-linux-amd64/etcd{,ctl}
[root@ansible-server etcd-update]# ls files/
etcd  etcdctl
[root@ansible-server etcd-update]# rm -f etcd-v3.5.1-linux-amd64.tar.gz

[root@ansible-server etcd-update]# vim tasks/upgrade_etcd01.yml
- name: stop etcd
  systemd:
    name: etcd
    state: stopped
  when:
    - ansible_hostname=="k8s-etcd01"
- name: copy etcd files to etcd01
  copy:
    src: "{
    
    { item }}"
    dest: /usr/local/bin/
    mode: 0755
  loop:
    - etcd
    - etcdctl
  when:
    - ansible_hostname=="k8s-etcd01"
- name: start etcd
  systemd:
    name: etcd
    state: restarted
  when:
    - ansible_hostname=="k8s-etcd01"

[root@ansible-server etcd-update]# vim tasks/upgrade_etcd02.yml
- name: stop etcd
  systemd:
    name: etcd
    state: stopped
  when:
    - ansible_hostname=="k8s-etcd02"
- name: copy etcd files to etcd02
  copy:
    src: "{
    
    { item }}"
    dest: /usr/local/bin/
    mode: 0755
  loop:
    - etcd
    - etcdctl
  when:
    - ansible_hostname=="k8s-etcd02"
- name: start etcd
  systemd:
    name: etcd
    state: restarted
  when:
    - ansible_hostname=="k8s-etcd02"

[root@ansible-server etcd-update]# vim tasks/upgrade_etcd03.yml
- name: stop etcd
  systemd:
    name: etcd
    state: stopped
  when:
    - ansible_hostname=="k8s-etcd03"
- name: copy etcd files to etcd03
  copy:
    src: "{
    
    { item }}"
    dest: /usr/local/bin/
    mode: 0755
  loop:
    - etcd
    - etcdctl
  when:
    - ansible_hostname=="k8s-etcd03"
- name: start etcd
  systemd:
    name: etcd
    state: restarted
  when:
    - ansible_hostname=="k8s-etcd03"

[root@ansible-server etcd-update]# vim tasks/main.yml
- include: upgrade_etcd01.yml
- include: upgrade_etcd02.yml
- include: upgrade_etcd03.yml

[root@ansible-server ansible]# tree roles/etcd-update/
roles/etcd-update/
├── files
│   ├── etcd
│   └── etcdctl
└── tasks
    ├── main.yml
    ├── upgrade_etcd01.yml
    ├── upgrade_etcd02.yml
    └── upgrade_etcd03.yml

2 directories, 6 files

[root@ansible-server ansible]# vim etcd_update_role.yml
---
- hosts: etcd

  roles:
    - role: etcd-update

[root@ansible-server ansible]# ansible-playbook etcd_update_role.yml

18.1.2 验证etcd

[root@k8s-etcd01 ~]# etcdctl --endpoints="172.31.3.108:2379,172.31.3.109:2379,172.31.3.110:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem  endpoint status --write-out=table
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|     ENDPOINT      |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| 172.31.3.108:2379 | a9fef56ff96ed75c |   3.5.1 |  5.4 MB |      true |      false |         4 |      53749 |              53749 |        |
| 172.31.3.109:2379 | 8319ef09e8b3d277 |   3.5.1 |  5.6 MB |     false |      false |         4 |      53749 |              53749 |        |
| 172.31.3.110:2379 | 209a1f57c506dba2 |   3.5.1 |  5.8 MB |     false |      false |         4 |      53749 |              53749 |        |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

18.2 master

18.2.1 升级master

[root@ansible-server ansible]# mkdir -p roles/kubernetes-master-update/{files,tasks,templates,vars}
[root@ansible-server ansible]# cd roles/kubernetes-master-update/
[root@ansible-server kubernetes-master-update]# ls
files  tasks  templates  vars

[root@ansible-server kubernetes-master-update]# wget https://dl.k8s.io/v1.23.5/kubernetes-server-linux-amd64.tar.gz
[root@ansible-server kubernetes-master-update]# tar -xf kubernetes-server-linux-amd64.tar.gz  --strip-components=3 -C files/ kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
[root@ansible-server kubernetes-master-update]# ls files/
kube-apiserver  kube-controller-manager  kubectl  kubelet  kube-proxy  kube-scheduler
[root@ansible-server kubernetes-master-update]# rm -f kubernetes-server-linux-amd64.tar.gz 

#下面MASTER01、MASTER02和MASTER03的IP地址根据自己的更改,HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server kubernetes-master-update]# vim vars/main.yml
MASTER01: 172.31.3.101
MASTER02: 172.31.3.102
MASTER03: 172.31.3.103
HARBOR_DOMAIN: harbor.raymonds.cc
PAUSE_VERSION: 3.6

MASTER_SERVICE:
  - kube-apiserver
  - kube-controller-manager
  - kube-scheduler
  - kube-proxy
  - kubelet

[root@ansible-server kubernetes-master-update]# vim templates/10-kubelet.conf.j2 
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml --pod-infra-container-image={
    
    { HARBOR_DOMAIN }}/google_containers/pause:{
    
    { PAUSE_VERSION }}"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARG

[root@ansible-server kubernetes-master-update]# vim tasks/upgrade_master01.yml
- name: install CentOS or Rocky socat
  yum: 
    name: socat
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - inventory_hostname in groups.ha
- name: install Ubuntu socat
  apt:
    name: socat
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
    - inventory_hostname in groups.ha
- name: download pause image
  shell: |
    docker pull registry.aliyuncs.com/google_containers/pause:{
    
    {
    
     PAUSE_VERSION }}
    docker tag registry.aliyuncs.com/google_containers/pause:{
    
    {
    
     PAUSE_VERSION }} {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/pause:{
    
    {
    
     PAUSE_VERSION }}
    docker rmi registry.aliyuncs.com/google_containers/pause:{
    
    {
    
     PAUSE_VERSION }}
    docker push {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/pause:{
    
    {
    
     PAUSE_VERSION }}
  when:
    - ansible_hostname=="k8s-master01"
- name: down master01
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{
    
    {
    
     MASTER01 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"
- name: stop service
  systemd:
    name: "{
    
    { item }}"
    state: stopped
  loop:
    "{
    
    { MASTER_SERVICE }}"
  when:
    - ansible_hostname=="k8s-master01"
- name: copy kubernetes files to master01
  copy:
    src: "{
    
    { item }}"
    dest: /usr/local/bin/
    mode: 0755
  loop:
    - kube-apiserver
    - kube-controller-manager
    - kubectl
    - kubelet
    - kube-proxy
    - kube-scheduler
  when:
    - ansible_hostname=="k8s-master01"
- name: copy 10-kubelet.conf to master01
  template: 
    src: 10-kubelet.conf.j2
    dest: /etc/systemd/system/kubelet.service.d/10-kubelet.conf
  when:
    - ansible_hostname=="k8s-master01"
- name: start service
  systemd:
    name: "{
    
    { item }}"
    state: restarted
    daemon_reload: yes
  loop:
    "{
    
    { MASTER_SERVICE }}"
  when:
    - ansible_hostname=="k8s-master01"
- name: up master01
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{
    
    {
    
     MASTER01 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubernetes-master-update]# vim tasks/upgrade_master02.yml 
- name: down master02
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{
    
    {
    
     MASTER02 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"
- name: stop service
  systemd:
    name: "{
    
    { item }}"
    state: stopped
  loop:
    "{
    
    { MASTER_SERVICE }}"
  when:
    - ansible_hostname=="k8s-master02"
- name: copy kubernetes files to master02
  copy:
    src: "{
    
    { item }}"
    dest: /usr/local/bin/
    mode: 0755
  loop:
    - kube-apiserver
    - kube-controller-manager
    - kubectl
    - kubelet
    - kube-proxy
    - kube-scheduler
  when:
    - ansible_hostname=="k8s-master02"
- name: copy 10-kubelet.conf to master02
  template: 
    src: 10-kubelet.conf.j2
    dest: /etc/systemd/system/kubelet.service.d/10-kubelet.conf
  when:
    - ansible_hostname=="k8s-master02"
- name: start service
  systemd:
    name: "{
    
    { item }}"
    state: restarted
    daemon_reload: yes
  loop:
    "{
    
    { MASTER_SERVICE }}"
  when:
    - ansible_hostname=="k8s-master02"
- name: up master02
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{
    
    {
    
     MASTER02 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubernetes-master-update]# vim tasks/upgrade_master03.yml 
- name: down master03
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{
    
    {
    
     MASTER03 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"
- name: stop service
  systemd:
    name: "{
    
    { item }}"
    state: stopped
  loop:
    "{
    
    { MASTER_SERVICE }}"
  when:
    - ansible_hostname=="k8s-master03"
- name: copy kubernetes files to master03
  copy:
    src: "{
    
    { item }}"
    dest: /usr/local/bin/
    mode: 0755
  loop:
    - kube-apiserver
    - kube-controller-manager
    - kubectl
    - kubelet
    - kube-proxy
    - kube-scheduler
  when:
    - ansible_hostname=="k8s-master03"
- name: copy 10-kubelet.conf to master03
  template: 
    src: 10-kubelet.conf.j2
    dest: /etc/systemd/system/kubelet.service.d/10-kubelet.conf
  when:
    - ansible_hostname=="k8s-master03"
- name: start service
  systemd:
    name: "{
    
    { item }}"
    state: restarted
    daemon_reload: yes
  loop:
    "{
    
    { MASTER_SERVICE }}"
  when:
    - ansible_hostname=="k8s-master03"
- name: up master03
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{
    
    {
    
     MASTER03 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubernetes-master-update]# vim tasks/main.yml
- include: upgrade_master01.yml
- include: upgrade_master02.yml
- include: upgrade_master03.yml

[root@ansible-server kubernetes-master-update]# cd ../../
[root@ansible-server ansible]# tree roles/kubernetes-master-update/
roles/kubernetes-master-update/
├── files
│   ├── kube-apiserver
│   ├── kube-controller-manager
│   ├── kubectl
│   ├── kubelet
│   ├── kube-proxy
│   └── kube-scheduler
├── tasks
│   ├── main.yml
│   ├── upgrade_master01.yml
│   ├── upgrade_master02.yml
│   └── upgrade_master03.yml
├── templates
│   └── 10-kubelet.conf.j2
└── vars
    └── main.yml

4 directories, 12 files

[root@ansible-server ansible]# vim kubernetes_master_update_role.yml
---
- hosts: master:ha

  roles:
    - role: kubernetes-master-update

[root@ansible-server ansible]# ansible-playbook kubernetes_master_update_role.yml

18.2.2 验证master

[root@k8s-master01 ~]# kubectl get nodes
NAME                         STATUS   ROLES    AGE     VERSION
k8s-master01.example.local   Ready    <none>   4h59m   v1.23.5
k8s-master02.example.local   Ready    <none>   4h59m   v1.23.5
k8s-master03.example.local   Ready    <none>   4h59m   v1.23.5
k8s-node01.example.local     Ready    <none>   4h51m   v1.22.8
k8s-node02.example.local     Ready    <none>   4h51m   v1.22.8
k8s-node03.example.local     Ready    <none>   4h51m   v1.22.8

18.3 升级calico

18.3.1 升级calico

[root@ansible-server ansible]# mkdir -p roles/calico-update/{tasks,vars,templates}
[root@ansible-server ansible]# cd roles/calico-update
[root@ansible-server calico-update]# ls
tasks  templates  vars

#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址,POD_SUBNET改成自己规划的容器网段,MASTER01、MASTER02和MASTER03的IP地址根据自己的更改
[root@ansible-server calico-update]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
POD_SUBNET: 192.168.0.0/12
MASTER01: 172.31.3.101
MASTER02: 172.31.3.102
MASTER03: 172.31.3.103

[root@ansible-server calico-update]# wget https://docs.projectcalico.org/manifests/calico-etcd.yaml -p templates/calico-etcd.yaml.j2

[root@k8s-master01 ~]# vim templates/calico-etcd.yaml.j2
...
spec:
  selector:
    matchLabels:
      k8s-app: calico-node
  updateStrategy:
    type: OnDelete #修改这里,calico不会滚动更新,只有重启了kubelet,才会更新
 template:
    metadata:
      labels:
        k8s-app: calico-node
...
apiVersion: policy/v1 #修改这里为v1
kind: PodDisruptionBudget
metadata:
...

#修改下面内容
[root@ansible-server calico-update]# grep "etcd_endpoints:.*" templates/calico-etcd.yaml.j2 
  etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"

[root@ansible-server calico-update]# sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "{% for i in groups.etcd %}https://{
    
    { hostvars[i].ansible_default_ipv4.address }}:2379{% if not loop.last %},{% endif %}{% endfor %}"#g' templates/calico-etcd.yaml.j2  

[root@ansible-server calico-update]# grep "etcd_endpoints:.*" templates/calico-etcd.yaml.j2
  etcd_endpoints: "{% for i in groups.etcd %}https://{
    
    { hostvars[i].ansible_default_ipv4.address }}:2379{% if not loop.last %},{% endif %}{% endfor %}"	

[root@ansible-server calico-update]# vim tasks/calico_file.yml
- name: copy calico-etcd.yaml file
  template:
    src: calico-etcd.yaml.j2
    dest: /root/calico-etcd.yaml
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico-update]# vim tasks/config.yml
- name: get ETCD_KEY key
  shell:
    cmd: cat /etc/kubernetes/pki/etcd/etcd-key.pem | base64 | tr -d '\n'
  register: ETCD_KEY
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd-key:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (etcd-key:) null'
    replace: '\1 {
    
    { ETCD_KEY.stdout }}'
  when:
    - ansible_hostname=="k8s-master01"
- name: get ETCD_CERT key
  shell:
    cmd: cat /etc/kubernetes/pki/etcd/etcd.pem | base64 | tr -d '\n'
  register: ETCD_CERT
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd-cert:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (etcd-cert:) null'
    replace: '\1 {
    
    { ETCD_CERT.stdout }}'
  when:
    - ansible_hostname=="k8s-master01"
- name: get ETCD_CA key
  shell:
    cmd: cat /etc/kubernetes/pki/etcd/etcd-ca.pem | base64 | tr -d '\n'
  when:
    - ansible_hostname=="k8s-master01"
  register: ETCD_CA
- name: Modify the ".*etcd-ca:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (etcd-ca:) null'
    replace: '\1 {
    
    { ETCD_CA.stdout }}'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_ca:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(etcd_ca:) ""'
    replace: '\1 "/calico-secrets/etcd-ca"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_cert:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(etcd_cert:) ""'
    replace: '\1 "/calico-secrets/etcd-cert"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_key:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(etcd_key:) ""'
    replace: '\1 "/calico-secrets/etcd-key"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*CALICO_IPV4POOL_CIDR.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (- name: CALICO_IPV4POOL_CIDR)'
    replace: '\1'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*192.168.0.0.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '#   (value:) "192.168.0.0/16"'
    replace: '  \1 "{
    
    { POD_SUBNET }}"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the "image:" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(.*image:) docker.io/calico(/.*)'
    replace: '\1 {
    
    { HARBOR_DOMAIN }}/google_containers\2'
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico-update]# vim tasks/download_images.yml
- name: get calico version
  shell:
    chdir: /root
    cmd: awk -F "/"  '/image:/{print $NF}' calico-etcd.yaml
  register: CALICO_VERSION
  when:
    - ansible_hostname=="k8s-master01"
- name: download calico image
  shell: |
    {
    
    % for i in CALICO_VERSION.stdout_lines %}
      docker pull registry.cn-beijing.aliyuncs.com/raymond9/{
    
    {
    
     i }}
      docker tag registry.cn-beijing.aliyuncs.com/raymond9/{
    
    {
    
     i }} {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
      docker rmi registry.cn-beijing.aliyuncs.com/raymond9/{
    
    {
    
     i }}
      docker push {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
    {
    
    % endfor %}
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico-update]# vim tasks/install_calico.yml
- name: install calico
  shell:
    chdir: /root
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig apply -f calico-etcd.yaml"
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico-update]# vim tasks/delete_master01_calico_container.yml 
- name: down master01
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{
    
    {
    
     MASTER01 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"
- name: get calico container
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig get pod -n kube-system -o wide|grep calico |grep master01 |awk -F " " '{print $1}'
  register: CALICO_CONTAINER
  when:
    - ansible_hostname=="k8s-master01"
- name: delete calico container
  shell: |
    kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig delete pod {
    
    {
    
     CALICO_CONTAINER.stdout }} -n kube-system
    sleep 30s
  when:
    - ansible_hostname=="k8s-master01"
- name: up master01
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{
    
    {
    
     MASTER01 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico-update]# vim tasks/delete_master02_calico_container.yml 
- name: down master02
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{
    
    {
    
     MASTER02 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"
- name: get calico container
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig get pod -n kube-system -o wide|grep calico |grep master02 |awk -F " " '{print $1}'
  register: CALICO_CONTAINER
  when:
    - ansible_hostname=="k8s-master01"
- name: delete calico container
  shell: |
    kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig delete pod {
    
    {
    
     CALICO_CONTAINER.stdout }} -n kube-system
    sleep 30s
  when:
    - ansible_hostname=="k8s-master01"
- name: up master02
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{
    
    {
    
     MASTER02 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico-update]# vim tasks/delete_master03_calico_container.yml 
- name: down master03
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/{
    
    {
    
     MASTER03 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"
- name: get calico container
  shell:
    cmd: kubectl get --kubeconfig=/etc/kubernetes/admin.kubeconfig pod -n kube-system -o wide|grep calico |grep master03 |awk -F " " '{print $1}'
  register: CALICO_CONTAINER
  when:
    - ansible_hostname=="k8s-master01"
- name: delete calico container
  shell: |
    kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig delete pod {
    
    {
    
     CALICO_CONTAINER.stdout }} -n kube-system
    sleep 30s
  when:
    - ansible_hostname=="k8s-master01"
- name: up master03
  shell:
    cmd: ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/{
    
    {
    
     MASTER03 }}" | socat stdio /var/lib/haproxy/haproxy.sock"
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico-update]# vim tasks/main.yml
- include: calico_file.yml
- include: config.yml
- include: download_images.yml
- include: install_calico.yml
- include: delete_master01_calico_container.yml
- include: delete_master02_calico_container.yml
- include: delete_master03_calico_container.yml

[root@ansible-server calico-update]# cd ../../
[root@ansible-server ansible]# tree roles/calico-update/
roles/calico-update/
├── tasks
│   ├── calico_file.yml
│   ├── config.yml
│   ├── delete_master01_calico_container.yml
│   ├── delete_master02_calico_container.yml
│   ├── delete_master03_calico_container.yml
│   ├── download_images.yml
│   ├── install_calico.yml
│   └── main.yml
├── templates
│   └── calico-etcd.yaml.j2
└── vars
    └── main.yml

3 directories, 10 files

[root@ansible-server ansible]# vim calico_update_role.yml 
---
- hosts: master:etcd

  roles:
    - role: calico-update

[root@ansible-server ansible]# ansible-playbook calico_update_role.yml

18.3.2 验证calico

[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide|grep calico|grep master01|tail -n1
calico-node-mg2ts                          1/1     Running   0          3m10s   172.31.3.101     k8s-master01.example.local   <none>           <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-zbsmp -n kube-system -o yaml|grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.22.2
    image: harbor.raymonds.cc/google_containers/cni:v3.22.2
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
    image: harbor.raymonds.cc/google_containers/node:v3.22.2
    image: harbor.raymonds.cc/google_containers/cni:v3.22.2
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2

[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide|grep calico|grep master02|tail -n1
calico-node-zbsmp                          1/1     Running   0          3m      172.31.3.102     k8s-master02.example.local   <none>           <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-zbsmp -n kube-system -o yaml|grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.22.2
    image: harbor.raymonds.cc/google_containers/cni:v3.22.2
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
    image: harbor.raymonds.cc/google_containers/node:v3.22.2
    image: harbor.raymonds.cc/google_containers/cni:v3.22.2
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2

[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide|grep calico|grep master03|tail -n1
calico-node-p4c2x                          1/1     Running   0          2m54s   172.31.3.103     k8s-master03.example.local   <none>           <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-p4c2x -n kube-system -o yaml|grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.22.2
    image: harbor.raymonds.cc/google_containers/cni:v3.22.2
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
    image: harbor.raymonds.cc/google_containers/node:v3.22.2
    image: harbor.raymonds.cc/google_containers/cni:v3.22.2
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2

18.4 node

18.4.1 升级node

[root@ansible-server ansible]# mkdir -p roles/kubernetes-node-update/{files,tasks,templates,vars}
[root@ansible-server ansible]# cd roles/kubernetes-node-update/
[root@ansible-server kubernetes-node-update]# ls
files  tasks  templates  vars

[root@ansible-server kubernetes-node-update]# cp /data/ansible/roles/kubernetes-master-update/files/{kubelet,kube-proxy} files/
[root@ansible-server kubernetes-node-update]# ls files/
kubelet  kube-proxy

#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server kubernetes-node-update]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
PAUSE_VERSION: 3.6

NODE_SERVICE:
  - kube-proxy
  - kubelet

[root@ansible-server kubernetes-node-update]# vim templates/10-kubelet.conf.j2
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml --pod-infra-container-image={
    
    { HARBOR_DOMAIN }}/google_containers/pause:{
    
    { PAUSE_VERSION }}"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS

[root@ansible-server kubernetes-node-update]# vim tasks/upgrade_node01.yml
- name: drain node01
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig drain k8s-node01.example.local --delete-emptydir-data --force --ignore-daemonsets
  when:
    - ansible_hostname=="k8s-master01"
- name: stop service
  systemd:
    name: "{
    
    { item }}"
    state: stopped
  loop:
    "{
    
    { NODE_SERVICE }}"
  when:
    - ansible_hostname=="k8s-node01"
- name: copy kubernetes files to node01
  copy:
    src: "{
    
    { item }}"
    dest: /usr/local/bin/
    mode: 0755
  loop:
    - kubelet
    - kube-proxy
  when:
    - ansible_hostname=="k8s-node01"
- name: copy 10-kubelet.conf to node01
  template: 
    src: 10-kubelet.conf.j2
    dest: /etc/systemd/system/kubelet.service.d/10-kubelet.conf
  when:
    - ansible_hostname=="k8s-node01"
- name: start service
  systemd:
    name: "{
    
    { item }}"
    state: restarted
    daemon_reload: yes
  loop:
    "{
    
    { NODE_SERVICE }}"
  when:
    - ansible_hostname=="k8s-node01"
- name: get calico container
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig get pod -n kube-system -o wide|grep calico |grep node01 |tail -n1|awk -F " " '{print $1}'
  register: CALICO_CONTAINER
  when:
    - ansible_hostname=="k8s-master01"
- name: delete calico container
  shell: |
    kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig delete pod {
    
    {
    
     CALICO_CONTAINER.stdout }} -n kube-system
    sleep 60s
  when:
    - ansible_hostname=="k8s-master01"
- name: uncordon node01
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig uncordon k8s-node01.example.local
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubernetes-node-update]# vim tasks/upgrade_node02.yml 
- name: drain node02
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig drain k8s-node02.example.local --delete-emptydir-data --force --ignore-daemonsets
  when:
    - ansible_hostname=="k8s-master01"
- name: stop service
  systemd:
    name: "{
    
    { item }}"
    state: stopped
  loop:
    "{
    
    { NODE_SERVICE }}"
  when:
    - ansible_hostname=="k8s-node02"
- name: copy kubernetes files to node02
  copy:
    src: "{
    
    { item }}"
    dest: /usr/local/bin/
    mode: 0755
  loop:
    - kubelet
    - kube-proxy
  when:
    - ansible_hostname=="k8s-node02"
- name: copy 10-kubelet.conf to node02
  template: 
    src: 10-kubelet.conf.j2
    dest: /etc/systemd/system/kubelet.service.d/10-kubelet.conf
  when:
    - ansible_hostname=="k8s-node02"
- name: start service
  systemd:
    name: "{
    
    { item }}"
    state: restarted
    daemon_reload: yes
  loop:
    "{
    
    { NODE_SERVICE }}"
  when:
    - ansible_hostname=="k8s-node02"
- name: get calico container
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig get pod -n kube-system -o wide|grep calico |grep node02 |tail -n1|awk -F " " '{print $1}'
  register: CALICO_CONTAINER
  when:
    - ansible_hostname=="k8s-master01"
- name: delete calico container
  shell: |
    kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig delete pod {
    
    {
    
     CALICO_CONTAINER.stdout }} -n kube-system
    sleep 60s
  when:
    - ansible_hostname=="k8s-master01"
- name: uncordon node02
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig uncordon k8s-node02.example.local
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubernetes-node-update]# vim tasks/upgrade_node03.yml 
- name: drain node03
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig drain k8s-node03.example.local --delete-emptydir-data --force --ignore-daemonsets
  when:
    - ansible_hostname=="k8s-master01"
- name: stop service
  systemd:
    name: "{
    
    { item }}"
    state: stopped
  loop:
    "{
    
    { NODE_SERVICE }}"
  when:
    - ansible_hostname=="k8s-node03"
- name: copy kubernetes files to node03
  copy:
    src: "{
    
    { item }}"
    dest: /usr/local/bin/
    mode: 0755
  loop:
    - kubelet
    - kube-proxy
  when:
    - ansible_hostname=="k8s-node03"
- name: copy 10-kubelet.conf to node03
  template: 
    src: 10-kubelet.conf.j2
    dest: /etc/systemd/system/kubelet.service.d/10-kubelet.conf
  when:
    - ansible_hostname=="k8s-node03"
- name: start service
  systemd:
    name: "{
    
    { item }}"
    state: restarted
    daemon_reload: yes
  loop:
    "{
    
    { NODE_SERVICE }}"
  when:
    - ansible_hostname=="k8s-node03"
- name: get calico container
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig get pod -n kube-system -o wide|grep calico |grep node03 |tail -n1|awk -F " " '{print $1}'
  register: CALICO_CONTAINER
  when:
    - ansible_hostname=="k8s-master01"
- name: delete calico container
  shell: |
    kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig delete pod {
    
    {
    
     CALICO_CONTAINER.stdout }} -n kube-system
    sleep 60s
  when:
    - ansible_hostname=="k8s-master01"
- name: uncordon node03
  shell:
    cmd: kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig uncordon k8s-node03.example.local
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubernetes-node-update]# vim tasks/main.yml
- include: upgrade_node01.yml
- include: upgrade_node02.yml
- include: upgrade_node03.yml

[root@ansible-server kubernetes-node-update]# cd ../../
[root@ansible-server ansible]# tree roles/kubernetes-node-update/
roles/kubernetes-node-update/
├── files
│   ├── kubelet
│   └── kube-proxy
├── tasks
│   ├── main.yml
│   ├── upgrade_node01.yml
│   ├── upgrade_node02.yml
│   └── upgrade_node03.yml
├── templates
│   └── 10-kubelet.conf.j2
└── vars
    └── main.yml

4 directories, 8 files

[root@ansible-server ansible]# vim kubernetes_node_update_role.yml
---
- hosts: master01:node

  roles:
      - role: kubernetes-node-update

[root@ansible-server ansible]# ansible-playbook kubernetes_node_update_role.yml 

18.4.2 验证node

[root@k8s-master01 ~]# kubectl get nodes
NAME                         STATUS   ROLES    AGE     VERSION
k8s-master01.example.local   Ready    <none>   5h27m   v1.23.5
k8s-master02.example.local   Ready    <none>   5h27m   v1.23.5
k8s-master03.example.local   Ready    <none>   5h27m   v1.23.5
k8s-node01.example.local     Ready    <none>   5h19m   v1.23.5
k8s-node02.example.local     Ready    <none>   5h19m   v1.23.5
k8s-node03.example.local     Ready    <none>   5h19m   v1.23.5

[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide|grep calico|grep node01|tail -n1
calico-node-2mbmg                          1/1     Running   0          4m11s   172.31.3.111     k8s-node01.example.local     <none>           <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-2mbmg -n kube-system -o yaml|grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.22.2
    image: harbor.raymonds.cc/google_containers/cni:v3.22.2
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
    image: harbor.raymonds.cc/google_containers/node:v3.22.2
    image: harbor.raymonds.cc/google_containers/cni:v3.22.2
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2

[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide|grep calico|grep node02|tail -n1
calico-node-pczws                          1/1     Running   0          3m26s   172.31.3.112     k8s-node02.example.local     <none>           <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-pczws -n kube-system -o yaml|grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.22.2
    image: harbor.raymonds.cc/google_containers/cni:v3.22.2
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
    image: harbor.raymonds.cc/google_containers/node:v3.22.2
    image: harbor.raymonds.cc/google_containers/cni:v3.22.2
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2

[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide|grep calico|grep node03|tail -n1
calico-node-xj7dp                          1/1     Running   0          2m40s   172.31.3.113     k8s-node03.example.local     <none>           <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-xj7dp -n kube-system -o yaml|grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.22.2
    image: harbor.raymonds.cc/google_containers/cni:v3.22.2
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
    image: harbor.raymonds.cc/google_containers/node:v3.22.2
    image: harbor.raymonds.cc/google_containers/cni:v3.22.2
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2

18.5 coredns

18.5.1 升级coredns

[root@ansible-server ansible]# mkdir -p roles/coredns-update/{tasks,templates,vars}
[root@ansible-server ansible]# cd roles/coredns-update/
[root@ansible-server coredns-update]# ls
tasks  templates  vars

#下面CLUSTERDNS改成自己规划的service网段的第10个IP地址,HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server coredns-update]# vim vars/main.yml 
CLUSTERDNS: 10.96.0.10                                 
HARBOR_DOMAIN: harbor.raymonds.cc
 
[root@ansible-server coredns-update]# cat templates/coredns.yaml.j2 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
rules:
  - apiGroups:
    - ""
    resources:
    - endpoints
    - services
    - pods
    - namespaces
    verbs:
    - list
    - watch
  - apiGroups:
    - discovery.k8s.io
    resources:
    - endpointslices
    verbs:
    - list
    - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
    
    
        errors
        health {
    
    
          lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
    
    
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf {
    
    
          max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. Default is 1.
  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      affinity:
         podAntiAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
           - labelSelector:
               matchExpressions:
               - key: k8s-app
                 operator: In
                 values: ["kube-dns"]
             topologyKey: kubernetes.io/hostname
      containers:
      - name: coredns
        image: coredns/coredns:1.8.6
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.96.0.10
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP

[root@ansible-server coredns-update]# vim templates/coredns.yaml.j2
...
data:
  Corefile: |
    .:53 {
    
    
        errors
        health {
    
    
          lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
    
    
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf {
    
    
          max_concurrent 1000
        }
        cache 30
        loop ##将loop插件直接删除,避免内部循环
        reload
        loadbalance
    }
...
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: {
    
    {
    
     CLUSTERDNS }} #修改这里
...

[root@ansible-server coredns-update]# vim tasks/coredns_file.yml
- name: copy coredns.yaml file
  template:
    src: coredns.yaml.j2
    dest: /root/coredns.yaml
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server coredns-update]# vim tasks/config.yml
- name: Modify the "image:" line
  replace:
    path: /root/coredns.yaml
    regexp: '(.*image:) coredns(/.*)'
    replace: '\1 {
    
    { HARBOR_DOMAIN }}/google_containers\2'

[root@ansible-server coredns-update]# vim tasks/download_images.yml
- name: get coredns version
  shell:
    chdir: /root
    cmd: awk -F "/"  '/image:/{print $NF}' coredns.yaml
  register: COREDNS_VERSION
- name: download coredns image
  shell: |
    {
    
    % for i in COREDNS_VERSION.stdout_lines %}
      docker pull registry.aliyuncs.com/google_containers/{
    
    {
    
     i }}
      docker tag registry.aliyuncs.com/google_containers/{
    
    {
    
     i }} {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
      docker rmi registry.aliyuncs.com/google_containers/{
    
    {
    
     i }}
      docker push {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
    {
    
    % endfor %}

[root@ansible-server coredns-update]# vim tasks/install_coredns.yml
- name: install coredns
  shell:
    chdir: /root
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig apply -f coredns.yaml"

[root@ansible-server coredns-update]# vim tasks/main.yml
- include: coredns_file.yml
- include: config.yml
- include: download_images.yml
- include: install_coredns.yml

[root@ansible-server coredns-update]# cd ../../
[root@ansible-server ansible]# tree roles/coredns/
roles/coredns/
├── tasks
│   ├── config.yml
│   ├── coredns_file.yml
│   ├── download_images.yml
│   ├── install_coredns.yml
│   └── main.yml
├── templates
│   └── coredns.yaml.j2
└── vars
    └── main.yml

3 directories, 7 files

[root@ansible-server ansible]# vim coredns_role.yml
---
- hosts: master01

  roles:
      - role: coredns-update

[root@ansible-server ansible]# ansible-playbook coredns_update_role.yml

18.5.2 验证coredns

[root@k8s-master01 ~]# kubectl get po -n kube-system -l k8s-app=kube-dns
NAME                      READY   STATUS    RESTARTS   AGE
coredns-787df7b4f-mskj4   1/1     Running   0          22s

18.6 metrics

18.6.1 升级metrics

[root@ansible-server ansible]# mkdir -p roles/metrics-update/{files,vars,tasks}
[root@ansible-server ansible]# cd roles/metrics-update/
[root@ansible-server metrics-update]# ls
files  tasks  vars

[root@ansible-server metrics-update]# wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml -P files/

#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server metrics-update]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc

[root@ansible-server metrics-update]# wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml -P files/

[root@ansible-server metrics-update]# vim files/components.yaml
...
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
#添加下面两行内容
        - --kubelet-insecure-tls
        - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
        - --requestheader-username-headers=X-Remote-User
        - --requestheader-group-headers=X-Remote-Group
        - --requestheader-extra-headers-prefix=X-Remote-Extra- 
...
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
#添加下面两行内容
        - name: ca-ssl
          mountPath: /etc/kubernetes/pki
...
      volumes:
      - emptyDir: {
    
    }
        name: tmp-dir
#添加下面三行内容
      - name: ca-ssl
        hostPath:
          path: /etc/kubernetes/pki
...

[root@ansible-server metrics-update]# vim tasks/metrics_file.yml
- name: copy components.yaml file
  copy:
    src: components.yaml
    dest: /root/components.yaml

[root@ansible-server metrics-update]# vim tasks/config.yml
- name: Modify the "image:" line
  replace:
    path: /root/components.yaml
    regexp: '(.*image:) k8s.gcr.io/metrics-server(/.*)'
    replace: '\1 {
    
    { HARBOR_DOMAIN }}/google_containers\2'

[root@ansible-server metrics-update]# vim tasks/download_images.yml
- name: get metrics version
  shell:
    chdir: /root
    cmd: awk -F "/"  '/image:/{print $NF}' components.yaml
  register: METRICS_VERSION
- name: download metrics image
  shell: |
    {
    
    % for i in METRICS_VERSION.stdout_lines %}
      docker pull registry.aliyuncs.com/google_containers/{
    
    {
    
     i }}
      docker tag registry.aliyuncs.com/google_containers/{
    
    {
    
     i }} {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
      docker rmi registry.aliyuncs.com/google_containers/{
    
    {
    
     i }}
      docker push {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
    {
    
    % endfor %}

[root@ansible-server metrics-update]# vim tasks/install_metrics.yml
- name: install metrics
  shell:
    chdir: /root
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig apply -f components.yaml"

[root@ansible-server metrics-update]# vim tasks/main.yml
- include: metrics_file.yml
- include: config.yml
- include: download_images.yml
- include: install_metrics.yml

[root@ansible-server metrics-update]# cd ../../
[root@ansible-server ansible]# tree roles/metrics-update/
roles/metrics-update/
├── files
│   └── components.yaml
├── tasks
│   ├── config.yml
│   ├── download_images.yml
│   ├── install_metrics.yml
│   ├── main.yml
│   └── metrics_file.yml
└── vars
    └── main.yml

3 directories, 7 files

[root@ansible-server ansible]# vim metrics_update_role.yml 
---
- hosts: master01

  roles:
    - role: metrics-update

[root@ansible-server ansible]# ansible-playbook metrics_update_role.yml

18.6.2 验证metrics

[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep metrics
metrics-server-7575dbff5f-f5rvq            1/1     Running   0          62s

[root@k8s-master01 ~]# kubectl top node 
NAME                         CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master01.example.local   96m          4%     1794Mi          49%       
k8s-master02.example.local   95m          4%     1675Mi          46%       
k8s-master03.example.local   90m          4%     1572Mi          43%       
k8s-node01.example.local     65m          3%     986Mi           27%       
k8s-node02.example.local     58m          2%     960Mi           26%       
k8s-node03.example.local     63m          3%     957Mi           26%        

18.7 dashboard

18.7.1 升级dashboard

[root@ansible-server ansible]# mkdir -p roles/dashboard-update/{files,templates,vars,tasks}
[root@ansible-server ansible]# cd roles/dashboard-update/
[root@ansible-server dashboard-update]# ls
files  tasks  templates  vars

[root@ansible-server dashboard-update]# vim files/admin.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding 
metadata: 
  name: admin-user
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system

[root@ansible-server dashboard-update]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.5.1/aio/deploy/recommended.yaml -P templates/recommended.yaml.j2

[root@ansible-server dashboard-update]# vim templates/recommended.yaml.j2
...
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort #添加这行
  ports:
    - port: 443
      targetPort: 8443
      nodePort: {
    
    {
    
     NODEPORT }} #添加这行
  selector:
    k8s-app: kubernetes-dashboard
...

#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server dashboard-update]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
NODEPORT: 30005

[root@ansible-server dashboard-update]# vim tasks/dashboard_file.yml
- name: copy recommended.yaml file
  template:
    src: recommended.yaml.j2
    dest: /root/recommended.yaml
- name: copy admin.yaml file
  copy:
    src: admin.yaml
    dest: /root/admin.yaml

[root@ansible-server dashboard-update]# vim tasks/config.yml
- name: Modify the "image:" line
  replace:
    path: /root/recommended.yaml
    regexp: '(.*image:) kubernetesui(/.*)'
    replace: '\1 {
    
    { HARBOR_DOMAIN }}/google_containers\2'

[root@ansible-server dashboard-update]# vim tasks/download_images.yml
- name: get dashboard version
  shell:
    chdir: /root
    cmd: awk -F "/"  '/image:/{print $NF}' recommended.yaml
  register: DASHBOARD_VERSION
- name: download dashboard image
  shell: |
    {
    
    % for i in DASHBOARD_VERSION.stdout_lines %}
      docker pull kubernetesui/{
    
    {
    
     i }}
      docker tag kubernetesui/{
    
    {
    
     i }} {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
      docker rmi kubernetesui/{
    
    {
    
     i }}
      docker push {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
    {
    
    % endfor %}

[root@ansible-server dashboard-update]# vim tasks/install_dashboard.yml
- name: install dashboard
  shell:
    chdir: /root
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig apply -f recommended.yaml -f admin.yaml"

[root@ansible-server dashboard-update]# vim tasks/main.yml
- include: dashboard_file.yml
- include: config.yml
- include: download_images.yml
- include: install_dashboard.yml

[root@ansible-server dashboard-update]# cd ../../
[root@ansible-server ansible]# tree roles/dashboard-update/
roles/dashboard-update/
├── files
│   └── admin.yaml
├── tasks
│   ├── config.yml
│   ├── dashboard_file.yml
│   ├── download_images.yml
│   ├── install_dashboard.yml
│   └── main.yml
├── templates
│   └── recommended.yaml.j2
└── vars
    └── main.yml

4 directories, 8 files

[root@ansible-server ansible]# vim dashboard_update_role.yml 
---
- hosts: master01

  roles:
    - role: dashboard-update

[root@ansible-server ansible]# ansible-playbook dashboard_update_role.yml 

18.7.2 登录dashboard

https://172.31.3.101:30005

[root@k8s-master01 ~]#  kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name:         admin-user-token-xtrmb
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: 179e165a-80ae-4db7-b3c3-cf1f5d5047b7

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1411 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6ImdFQlF3cXJIWk9vTVZXejM4LWMxT3RDUUVFdkswRWpuMFhBV1dxVWVrVW8ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXh0cm1iIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIxNzllMTY1YS04MGFlLTRkYjctYjNjMy1jZjFmNWQ1MDQ3YjciLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.BM1Ecw1Mdv3CeYKrA_WwEBQIdFTZZ_2HhMIQL9DKy-cAm81FE17NQOKJNrNOC14bssTBHqqMWcQFgUbbzXj4nXJk5WjUV2oi-BQu0FQFPpd0qsvURqDMrS9hgY4bMYtR-MAEpI6-tdXq_OWYefFAurQrtgLrzsg1vnJEvhe1tJUW0Qc65ouyBP0795xVY8xfwlvJOWcTTy4F6sfYmK9NkyjbplaEoT3J1wTbU9be62GN03JxgyftOdChrXMJ-6JMFxps9lMyCQ-dBF2aviAGWzAWIbWiDZdpOU1v9B_fWAYRu081AYebnLJl9aYPLBCLFKWrowzKptIcMj2P6wadyA

在这里插入图片描述

猜你喜欢

转载自blog.csdn.net/qq_25599925/article/details/123805400