a32.ansible 生产实战案例 -- 基于kubeadm安装kubernetes v1.22 -- 集群部署(一)

源码下载地址:https://gitee.com/raymond9/kubernetes-ansible

1.高可用Kubernetes集群规划

角色 机器名 机器配置 ip地址 安装软件
ansible ansible-server.example.local 2C2G 172.31.3.100 ansible
master1 k8s-master01.example.local 2C4G 172.31.3.101 chrony-client、docker、kubeadm 、kubelet、kubectl
master2 k8s-master02.example.local 2C4G 172.31.3.102 chrony-client、docker、kubeadm 、kubelet、kubectl
master3 k8s-master03.example.local 2C4G 172.31.3.103 chrony-client、docker、kubeadm 、kubelet、kubectl
ha1 k8s-ha01.example.local 2C2G 172.31.3.104 chrony-server、haproxy、keepalived
ha2 k8s-ha02.example.local 2C2G 172.31.3.105 chrony-server、haproxy、keepalived
harbor1 k8s-harbor01.example.local 2C2G 172.31.3.106 chrony-client、docker、docker-compose、harbor
harbor2 k8s-harbor02.example.local 2C2G 172.31.3.107 chrony-client、docker、docker-compose、harbor
node1 k8s-node01.example.local 2C4G 172.31.3.108 chrony-client、docker、kubeadm 、kubelet
node2 k8s-node02.example.local 2C4G 172.31.3.109 chrony-client、docker、kubeadm 、kubelet
node3 k8s-node03.example.local 2C4G 172.31.3.110 chrony-client、docker、kubeadm 、kubelet

软件版本信息和Pod、Service网段规划:

配置信息 备注
支持的操作系统版本 CentOS 7.9/stream 8、Rocky 8、Ubuntu 18.04/20.04
Docker版本 20.10.14
kubeadm版本 1.22.8
Pod网段 192.168.0.0/12
Service网段 10.96.0.0/12

2.安装ansible和配置

2.1 安装ansible

#CentOS
[root@ansible-server ~]# yum -y install ansible

[root@ansible-server ~]# ansible --version
ansible 2.9.25
  config file = /data/ansible/ansible.cfg
  configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
  ansible python module location = /usr/lib/python2.7/site-packages/ansible
  executable location = /usr/bin/ansible
  python version = 2.7.5 (default, Oct 14 2020, 14:45:30) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)]

#ubuntu18.04安装最新版的ansible
root@ubuntu1804:~# apt update

root@ubuntu1804:~# apt -y install software-properties-common

root@ubuntu1804:~# apt-add-repository --yes --update ppa:ansible/ansible

root@ubuntu1804:~# apt -y install ansible
root@ubuntu1804:~# ansible --version
ansible 2.9.27
  config file = /etc/ansible/ansible.cfg
  configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
  ansible python module location = /usr/lib/python2.7/dist-packages/ansible
  executable location = /usr/bin/ansible
  python version = 2.7.17 (default, Feb 27 2021, 15:10:58) [GCC 7.5.0]

#ubuntu 20.04安装
[root@ubuntu ~]# apt -y install ansible

2.2 配置ansible

[root@ansible-server ~]# mkdir /data/ansible
[root@ansible-server ~]# cd /data/ansible

[root@ansible-server ansible]# vim ansible.cfg
[defaults]
inventory      = ./inventory
forks          = 10
roles_path    = ./roles
remote_user = root

#下面的IP根据自己的k8s集群主机规划设置
[root@ansible-server ansible]# vim inventory 
[master]
172.31.3.101 hname=k8s-master01
172.31.3.102 hname=k8s-master02
172.31.3.103 hname=k8s-master03

[ha]
172.31.3.104 hname=k8s-ha01
172.31.3.105 hname=k8s-ha02

[harbor]
172.31.3.106 hname=k8s-harbor01
172.31.3.107 hname=k8s-harbor02

[node]
172.31.3.108 hname=k8s-node01
172.31.3.109 hname=k8s-node02
172.31.3.110 hname=k8s-node03

[all:vars]
domain=example.local

[k8s_cluster:children]
master
node

[chrony_server:children]
ha

[chrony_client:children]
master
node
harbor

[keepalives_master]
172.31.3.104

[keepalives_backup]
172.31.3.105

[haproxy:children]
ha

[master01]
172.31.3.101

3.设置客户端网卡名和ip

#rocky8和centos系统设置
[root@172 ~]# bash reset.sh 

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 11
Rocky 8.5 网卡名已修改成功,请重新启动系统后才能生效!

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 12
请输入IP地址:172.31.0.101
IP 172.31.0.101  available!
请输入子网掩码位数:21
请输入网关地址:172.31.0.2
IP 172.31.0.2  available!
Rocky 8.5 IP地址和网关地址已修改成功,请重新启动系统后生效!

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 21

#ubuntu系统设置
[C:\~]$ ssh [email protected]


Connecting to 172.31.7.3:22...
Connection established.
To escape to local shell, press 'Ctrl+Alt+]'.

Welcome to Ubuntu 18.04.6 LTS (GNU/Linux 4.15.0-156-generic x86_64)

 * Documentation:  https://help.ubuntu.com
 * Management:     https://landscape.canonical.com
 * Support:        https://ubuntu.com/advantage

  System information as of Mon Dec 27 13:56:42 CST 2021

  System load:  0.17              Processes:            193
  Usage of /:   2.1% of 91.17GB   Users logged in:      1
  Memory usage: 10%               IP address for ens33: 172.31.7.3
  Swap usage:   0%

 * Super-optimized for small spaces - read how we shrank the memory
   footprint of MicroK8s to make it the smallest full K8s around.

   https://ubuntu.com/blog/microk8s-memory-optimisation

19 updates can be applied immediately.
18 of these updates are standard security updates.
To see these additional updates run: apt list --upgradable

New release '20.04.3 LTS' available.
Run 'do-release-upgrade' to upgrade to it.


Last login: Mon Dec 27 13:56:31 2021
/usr/bin/xauth:  file /home/raymond/.Xauthority does not exist
To run a command as administrator (user "root"), use "sudo <command>".
See "man sudo_root" for details.

raymond@ubuntu1804:~$ bash reset.sh 

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 18
请输入密码: 123456
[sudo] password for raymond: Enter new UNIX password: Retype new UNIX password: passwd: password updated successfully
Ubuntu 18.04 root用户登录已设置完成,请重新登录后生效!

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 21
raymond@ubuntu1804:~$ exit
logout

Connection closed.

Disconnected from remote host(172.31.7.3:22) at 13:57:16.

Type `help' to learn how to use Xshell prompt.

[C:\~]$ ssh [email protected]


Connecting to 172.31.7.3:22...
Connection established.
To escape to local shell, press 'Ctrl+Alt+]'.

Welcome to Ubuntu 18.04.6 LTS (GNU/Linux 4.15.0-156-generic x86_64)

 * Documentation:  https://help.ubuntu.com
 * Management:     https://landscape.canonical.com
 * Support:        https://ubuntu.com/advantage

  System information as of Mon Dec 27 13:57:47 CST 2021

  System load:  0.06              Processes:            199
  Usage of /:   2.1% of 91.17GB   Users logged in:      1
  Memory usage: 11%               IP address for ens33: 172.31.7.3
  Swap usage:   0%

 * Super-optimized for small spaces - read how we shrank the memory
   footprint of MicroK8s to make it the smallest full K8s around.

   https://ubuntu.com/blog/microk8s-memory-optimisation

19 updates can be applied immediately.
18 of these updates are standard security updates.
To see these additional updates run: apt list --upgradable

New release '20.04.3 LTS' available.
Run 'do-release-upgrade' to upgrade to it.



The programs included with the Ubuntu system are free software;
the exact distribution terms for each program are described in the
individual files in /usr/share/doc/*/copyright.

Ubuntu comes with ABSOLUTELY NO WARRANTY, to the extent permitted by
applicable law.

/usr/bin/xauth:  file /root/.Xauthority does not exist
root@ubuntu1804:~# mv /home/raymond/reset.sh .
root@ubuntu1804:~# bash reset.sh 

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 11
Ubuntu 18.04 网卡名已修改成功,请重新启动系统后才能生效!

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 12
请输入IP地址:172.31.0.103
IP 172.31.0.103  available!
请输入子网掩码位数:21
请输入网关地址:172.31.0.2
IP 172.31.0.2  available!
Ubuntu 18.04 IP地址和网关地址已修改成功,请重新启动系统后生效!

************************************************************
*                      初始化脚本菜单                      *
* 1.禁用SELinux               12.修改IP地址和网关地址      *
* 2.关闭防火墙                13.设置主机名                *
* 3.优化SSH                   14.设置PS1和系统环境变量     *
* 4.设置系统别名              15.禁用SWAP                  *
* 5.1-4全设置                 16.优化内核参数              *
* 6.设置vimrc配置文件         17.优化资源限制参数          *
* 7.设置软件包仓库            18.Ubuntu设置root用户登录    *
* 8.Minimal安装建议安装软件   19.Ubuntu卸载无用软件包      *
* 9.安装邮件服务并配置邮件    20.重启系统                  *
* 10.更改SSH端口号            21.退出                      *
* 11.修改网卡名                                            *
************************************************************

请选择相应的编号(1-21): 21

4.实现基于key验证的脚本

#下面的IP根据自己的k8s集群主机规划设置
[root@ansible-server ansible]# cat ssh_key.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2021-12-20
#FileName:      ssh_key.sh
#URL:           raymond.blog.csdn.net
#Description:   ssh_key for CentOS 7/8 & Ubuntu 18.04/24.04 & Rocky 8
#Copyright (C): 2021 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'

NET_NAME=`ip addr |awk -F"[: ]" '/^2: e.*/{print $3}'`
IP=`ip addr show ${
     
     NET_NAME}| awk -F" +|/" '/global/{print $3}'`
export SSHPASS=123456
HOSTS="
172.31.3.101
172.31.3.102
172.31.3.103
172.31.3.104
172.31.3.105
172.31.3.106
172.31.3.107
172.31.3.108
172.31.3.109
172.31.3.110
172.31.3.188"

os(){
    
    
    OS_ID=`sed -rn '/^NAME=/s@.*="([[:alpha:]]+).*"$@\1@p' /etc/os-release`
}

ssh_key_push(){
    
    
    rm -f ~/.ssh/id_rsa*
    ssh-keygen -f /root/.ssh/id_rsa -P '' &> /dev/null
    if [ ${OS_ID} == "CentOS" -o ${OS_ID} == "Rocky" ] &> /dev/null;then
        rpm -q sshpass &> /dev/null || {
    
     ${COLOR}"安装sshpass软件包"${END};yum -y install sshpass &> /dev/null; }
    else
        dpkg -S sshpass &> /dev/null || {
    
     ${COLOR}"安装sshpass软件包"${END};apt -y install sshpass &> /dev/null; }
    fi
    sshpass -e ssh-copy-id -o StrictHostKeyChecking=no ${IP} &> /dev/null
    [ $? -eq 0 ] && echo ${IP} is finished || echo ${IP} is false

    for i in ${HOSTS};do
        sshpass -e scp -o StrictHostKeyChecking=no -r /root/.ssh root@${i}: &> /dev/null
        [ $? -eq 0 ] && echo ${i} is finished || echo ${i} is false
    done

    for i in ${HOSTS};do
        scp /root/.ssh/known_hosts ${i}:.ssh/ &> /dev/null
        [ $? -eq 0 ] && echo ${i} is finished || echo ${i} is false
    done
}

main(){
    
    
    os
    ssh_key_push
}

main

[root@ansible-server ansible]# bash ssh_key.sh 
172.31.3.100 is finished
172.31.3.101 is finished
172.31.3.102 is finished
172.31.3.103 is finished
172.31.3.104 is finished
172.31.3.105 is finished
172.31.3.106 is finished
172.31.3.107 is finished
172.31.3.108 is finished
172.31.3.109 is finished
172.31.3.110 is finished
172.31.3.101 is finished
172.31.3.102 is finished
172.31.3.103 is finished
172.31.3.104 is finished
172.31.3.105 is finished
172.31.3.106 is finished
172.31.3.107 is finished
172.31.3.108 is finished
172.31.3.109 is finished
172.31.3.110 is finished

5.系统初始化和安装软件包

5.1 系统初始化

[root@ansible-server ansible]# mkdir -p roles/reset/{tasks,templates,vars}

[root@ansible-server ansible]# cd roles/reset/
[root@ansible-server reset]# ls
tasks  templates  vars

[root@ansible-server reset]# vim templates/yum8.repo.j2 
[BaseOS]
name=BaseOS
{
    
    % if ansible_distribution =="Rocky" %}
baseurl=https://{
    
    {
    
     ROCKY_URL }}/rocky/$releasever/BaseOS/$basearch/os/
{
    
    % elif ansible_distribution=="CentOS" %}
baseurl=https://{
    
    {
    
     URL }}/centos/$releasever-stream/BaseOS/$basearch/os/
{
    
    % endif %}
gpgcheck=1
{
    
    % if ansible_distribution =="Rocky" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
{
    
    % elif ansible_distribution=="CentOS" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
{
    
    % endif %}

[AppStream]
name=AppStream
{
    
    % if ansible_distribution =="Rocky" %}
baseurl=https://{
    
    {
    
     ROCKY_URL }}/rocky/$releasever/AppStream/$basearch/os/
{
    
    % elif ansible_distribution=="CentOS" %}
baseurl=https://{
    
    {
    
     URL }}/centos/$releasever-stream/AppStream/$basearch/os/
{
    
    % endif %}
gpgcheck=1
{
    
    % if ansible_distribution =="Rocky" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
{
    
    % elif ansible_distribution=="CentOS" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
{
    
    % endif %}

[extras]
name=extras
{
    
    % if ansible_distribution =="Rocky" %}
baseurl=https://{
    
    {
    
     ROCKY_URL }}/rocky/$releasever/extras/$basearch/os/
{
    
    % elif ansible_distribution=="CentOS" %}
baseurl=https://{
    
    {
    
     URL }}/centos/$releasever-stream/extras/$basearch/os/
{
    
    % endif %}
gpgcheck=1
{
    
    % if ansible_distribution =="Rocky" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
{
    
    % elif ansible_distribution=="CentOS" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
{
    
    % endif %}

{
    
    % if ansible_distribution =="Rocky" %}
[plus]
{
    
    % elif ansible_distribution=="CentOS" %}
[centosplus]
{
    
    % endif %}
{
    
    % if ansible_distribution =="Rocky" %}
name=plus
{
    
    % elif ansible_distribution=="CentOS" %}
name=centosplus
{
    
    % endif %}
{
    
    % if ansible_distribution =="Rocky" %}
baseurl=https://{
    
    {
    
     ROCKY_URL }}/rocky/$releasever/plus/$basearch/os/
{
    
    % elif ansible_distribution=="CentOS" %}
baseurl=https://{
    
    {
    
     URL }}/centos/$releasever-stream/centosplus/$basearch/os/
{
    
    % endif %}
gpgcheck=1
{
    
    % if ansible_distribution =="Rocky" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
{
    
    % elif ansible_distribution=="CentOS" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
{
    
    % endif %}

[PowerTools]
name=PowerTools
{
    
    % if ansible_distribution =="Rocky" %}
baseurl=https://{
    
    {
    
     ROCKY_URL }}/rocky/$releasever/PowerTools/$basearch/os/
{
    
    % elif ansible_distribution=="CentOS" %}
baseurl=https://{
    
    {
    
     URL }}/centos/$releasever-stream/PowerTools/$basearch/os/
{
    
    % endif %}
gpgcheck=1
{
    
    % if ansible_distribution =="Rocky" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
{
    
    % elif ansible_distribution=="CentOS" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
{
    
    % endif %}

[epel]
name=epel
{
    
    % if ansible_distribution =="Rocky" %}
baseurl=https://{
    
    {
    
     ROCKY_URL }}/fedora/epel/$releasever/Everything/$basearch/
{
    
    % elif ansible_distribution=="CentOS" %}
baseurl=https://{
    
    {
    
     URL }}/epel/$releasever/Everything/$basearch/
{
    
    % endif %}
gpgcheck=1
{
    
    % if ansible_distribution =="Rocky" %}
gpgkey=https://{
    
    {
    
     ROCKY_URL }}/fedora/epel/RPM-GPG-KEY-EPEL-$releasever
{
    
    % elif ansible_distribution=="CentOS" %}
gpgkey=https://{
    
    {
    
     URL }}/epel/RPM-GPG-KEY-EPEL-$releasever
{
    
    % endif %}

[root@ansible-server reset]# vim templates/yum7.repo.j2 
[base]
name=base
baseurl=https://{
    
    {
    
     URL }}/centos/$releasever/os/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-$releasever

[extras]
name=extras
baseurl=https://{
    
    {
    
     URL }}/centos/$releasever/extras/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-$releasever

[updates]
name=updates
baseurl=https://{
    
    {
    
     URL }}/centos/$releasever/updates/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-$releasever

[centosplus]
name=centosplus
baseurl=https://{
    
    {
    
     URL }}/centos/$releasever/centosplus/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-$releasever

[epel]
name=epel
baseurl=https://{
    
    {
    
     URL }}/epel/$releasever/$basearch/
gpgcheck=1
gpgkey=https://{
    
    {
    
     URL }}/epel/RPM-GPG-KEY-EPEL-$releasever

[root@ansible-server reset]#  vim templates/apt.list.j2 
deb http://{
    
    {
    
     URL }}/ubuntu/ {
    
    {
    
     ansible_distribution_release }} main restricted universe multiverse
deb-src http://{
    
    {
    
     URL }}/ubuntu/ {
    
    {
    
     ansible_distribution_release }} main restricted universe multiverse

deb http://{
    
    {
    
     URL }}/ubuntu/ {
    
    {
    
     ansible_distribution_release }}-security main restricted universe multiverse
deb-src http://{
    
    {
    
     URL }}/ubuntu/ {
    
    {
    
     ansible_distribution_release }}-security main restricted universe multiverse

deb http://{
    
    {
    
     URL }}/ubuntu/ {
    
    {
    
     ansible_distribution_release }}-updates main restricted universe multiverse
deb-src http://{
    
    {
    
     URL }}/ubuntu/ {
    
    {
    
     ansible_distribution_release }}-updates main restricted universe multiverse

deb http://{
    
    {
    
     URL }}/ubuntu/ {
    
    {
    
     ansible_distribution_release }}-proposed main restricted universe multiverse
deb-src http://{
    
    {
    
     URL }}/ubuntu/ {
    
    {
    
     ansible_distribution_release }}-proposed main restricted universe multiverse

deb http://{
    
    {
    
     URL }}/ubuntu/ {
    
    {
    
     ansible_distribution_release }}-backports main restricted universe multiverse
deb-src http://{
    
    {
    
     URL }}/ubuntu/ {
    
    {
    
     ansible_distribution_release }}-backports main restricted universe multiverse

#下面VIP设置成自己的keepalived里的VIP(虚拟IP)地址,HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server reset]# vim vars/main.yml
VIP: 172.31.3.188
HARBOR_DOMAIN: harbor.raymonds.cc
ROCKY_URL: mirrors.ustc.edu.cn
URL: mirrors.cloud.tencent.com

[root@ansible-server reset]# vim tasks/set_hostname.yml
- name: set hostname
  hostname:
    name: "{
    
    { hname }}.{
    
    { domain }}"

[root@ansible-server reset]# vim tasks/set_hosts.yml
- name: set hosts file
  lineinfile:
    path: "/etc/hosts"
    line: "{
    
    { item }} {
    
    {hostvars[item].ansible_hostname}}.{
    
    { domain }} {
    
    {hostvars[item].ansible_hostname}}"
  loop:
    "{
    
    { play_hosts }}"
- name: set hosts file2
  lineinfile:
    path: "/etc/hosts"
    line: "{
    
    { item }}"
  loop:
    - "{
    
    { VIP }} k8s-lb"
    - "{
    
    { VIP }} {
    
    { HARBOR_DOMAIN }}"

[root@ansible-server reset]# vim tasks/disable_selinux.yml
- name: disable selinux
  replace:
    path: /etc/sysconfig/selinux
    regexp: '^(SELINUX=).*'
    replace: '\1disabled'
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")

[root@ansible-server reset]# vim tasks/disable_firewall.yml
- name: disable firewall
  systemd:
    name: firewalld
    state: stopped
    enabled: no
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: disable ufw
  systemd:
    name: ufw
    state: stopped
    enabled: no
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server reset]# vim tasks/disable_networkmanager.yml
- name: disable NetworkManager
  systemd:
    name: NetworkManager
    state: stopped
    enabled: no
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"

[root@ansible-server reset]# vim tasks/disable_swap.yml
- name: disable swap
  replace:
    path: /etc/fstab
    regexp: '^(.*swap.*)'
    replace: '#\1'
- name: get sd number
  shell:
    cmd: lsblk|awk -F"[ └─]" '/SWAP/{printf $3}'
  register: SD_NAME
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_distribution_major_version=="20"
- name: disable swap for ubuntu20
  shell:
    cmd: systemctl mask dev-{
    
    {
    
     SD_NAME.stdout}}.swap
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_distribution_major_version=="20"

[root@ansible-server reset]# vim tasks/set_limits.yml
- name: set limit
  shell:
    cmd: ulimit -SHn 65535
- name: set limits.conf file
  lineinfile:
    path: "/etc/security/limits.conf"
    line: "{
    
    { item }}"
  loop:
    - "* soft nofile 655360"
    - "* hard nofile 131072"
    - "* soft nproc 655350"
    - "* hard nproc 655350"
    - "* soft memlock unlimited"
    - "* hard memlock unlimited" 

[root@ansible-server reset]# vim tasks/optimization_sshd.yml
- name: optimization sshd disable UseDNS
  replace:
    path: /etc/ssh/sshd_config
    regexp: '^#(UseDNS).*'
    replace: '\1 no'
- name: optimization sshd diaable CentOS or Rocky GSSAPIAuthentication
  replace:
    path: /etc/ssh/sshd_config
    regexp: '^(GSSAPIAuthentication).*'
    replace: '\1 no'
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: optimization sshd diaable Ubuntu GSSAPIAuthentication
  replace:
    path: /etc/ssh/sshd_config
    regexp: '^#(GSSAPIAuthentication).*'
    replace: '\1 no'
  notify:
    - restart sshd
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server reset]# vim tasks/set_alias.yml
- name: set CentOS or Rocky alias
  lineinfile:
    path: ~/.bashrc
    line: "{
    
    { item }}"
  loop:
    - "alias cdnet=\"cd /etc/sysconfig/network-scripts\""
    - "alias vie0=\"vim /etc/sysconfig/network-scripts/ifcfg-eth0\""
    - "alias vie1=\"vim /etc/sysconfig/network-scripts/ifcfg-eth1\""
    - "alias scandisk=\"echo '- - -' > /sys/class/scsi_host/host0/scan;echo '- - -' > /sys/class/scsi_host/host1/scan;echo '- - -' > /sys/class/scsi_host/host2/scan\""
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: set Ubuntu alias
  lineinfile:
    path: ~/.bashrc
    line: "{
    
    { item }}"
  loop:
    - "alias cdnet=\"cd /etc/netplan\""
    - "alias scandisk=\"echo '- - -' > /sys/class/scsi_host/host0/scan;echo '- - -' > /sys/class/scsi_host/host1/scan;echo '- - -' > /sys/class/scsi_host/host2/scan\""
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server reset]# vim tasks/set_mirror.yml
- name: find CentOS or Rocky repo files
  find:
    paths: /etc/yum.repos.d/
    patterns: "*.repo"
  register: FILENAME
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: delete CentOS or Rocky repo files
  file:
    path: "{
    
    { item.path }}"
    state: absent
  with_items: "{
    
    { FILENAME.files }}"
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: set CentOS8 or Rocky8 Mirror warehouse
  template:
    src: yum8.repo.j2
    dest: /etc/yum.repos.d/base.repo
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_distribution_major_version=="8"
- name: set CentOS7 Mirror warehouse
  template:
    src: yum7.repo.j2
    dest: /etc/yum.repos.d/base.repo
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: set Ubuntu Mirror warehouse
  template:
    src: apt.list.j2
    dest: /etc/apt/sources.list
  when:
    - ansible_distribution=="Ubuntu"
- name: delete lock files
  file:
    path: "{
    
    { item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes 
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server reset]# vim tasks/main.yml
- include: set_hostname.yml
- include: set_hosts.yml
- include: disable_selinux.yml
- include: disable_firewall.yml
- include: disable_networkmanager.yml
- include: disable_swap.yml
- include: set_limits.yml
- include: optimization_sshd.yml
- include: set_alias.yml
- include: set_mirror.yml

[root@ansible-server reset]# cd ../../
[root@ansible-server ansible]# tree roles/reset/
[root@ansible-server ansible]# tree roles/reset/
roles/reset/
├── tasks
│   ├── disable_firewall.yml
│   ├── disable_networkmanager.yml
│   ├── disable_selinux.yml
│   ├── disable_swap.yml
│   ├── main.yml
│   ├── optimization_sshd.yml
│   ├── set_alias.yml
│   ├── set_hostname.yml
│   ├── set_hosts.yml
│   ├── set_limits.yml
│   └── set_mirror.yml
├── templates
│   ├── apt.list.j2
│   ├── yum7.repo.j2
│   └── yum8.repo.j2
└── vars
    └── main.yml

3 directories, 15 files

[root@ansible-server ansible]# vim reset_role.yml
---
- hosts: all

  roles:
    - role: reset

[root@ansible-server ansible]# ansible-playbook reset_role.yml 

5.2 安装软件包

[root@ansible-server ansible]# mkdir -p roles/reset-installpackage/{files,tasks}

[root@ansible-server ansible]# cd roles/reset-installpackage/
[root@ansible-server reset-installpackage]# ls
files  tasks

[root@ansible-server reset-installpackage]# wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm -P files/

[root@ansible-server reset-installpackage]# wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm -P files/

[root@ansible-server reset-installpackage]# vim files/ge4.18_ipvs.conf
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip

[root@ansible-server reset-installpackage]# vim files/lt4.18_ipvs.conf
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack_ipv4
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip

[root@ansible-server reset-installpackage]# vim files/k8s.conf 
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384


[root@ansible-server reset-installpackage]# vim tasks/install_package.yml
- name: install Centos or Rocky package
  yum:
    name: vim,tree,lrzsz,wget,jq,psmisc,net-tools,telnet,git
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: install Centos8 or Rocky8 package
  yum:
    name: rsync
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_distribution_major_version=="8"
- name: install Ubuntu package
  apt:
    name: tree,lrzsz,jq
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server reset-installpackage]# vim tasks/set_centos7_kernel.yml
- name: update CentOS7
  yum:
    name: '*'
    state: latest
    exclude: kernel*
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: copy CentOS7 kernel files
  copy: 
    src: "{
    
    { item }}"
    dest: /tmp
  loop:
    - kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm
    - kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: Finding RPM files 
  find: 
    paths: "/tmp" 
    patterns: "*.rpm" 
  register: RPM_RESULT
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: Install RPM 
  yum: 
    name: "{
    
    { item.path }}" 
  with_items: "{
    
    { RPM_RESULT.files }}" 
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: delete kernel files
  file:
    path: "{
    
    { item.path }}"
    state: absent 
  with_items: "{
    
    { RPM_RESULT.files }}" 
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: set grub
  shell:
    cmd: grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg; grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"

[root@ansible-server reset-installpackage]# vim tasks/install_ipvsadm.yml
- name: install CentOS or Rocky ipvsadm
  yum:
    name: ipvsadm,ipset,sysstat,conntrack,libseccomp
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - inventory_hostname in groups.k8s_cluster
- name: install Ubuntu ipvsadm
  apt:
    name: ipvsadm,ipset,sysstat,conntrack,libseccomp-dev
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
    - inventory_hostname in groups.k8s_cluster

[root@ansible-server reset-installpackage]# vim tasks/set_ipvs.yml
- name: configuration load_mod
  shell:
    cmd: |
      modprobe -- ip_vs
      modprobe -- ip_vs_rr
      modprobe -- ip_vs_wrr
      modprobe -- ip_vs_sh
  when:
    - inventory_hostname in groups.k8s_cluster
- name: configuration load_mod kernel ge4.18
  shell:
    cmd: modprobe -- nf_conntrack
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky") or (ansible_distribution=="Ubuntu" and ansible_distribution_major_version=="20")
    - inventory_hostname in groups.k8s_cluster
- name: configuration load_mod kernel lt4.18
  shell:
    cmd: modprobe -- nf_conntrack_ipv4
  when:
    - (ansible_distribution=="Ubuntu" and ansible_distribution_major_version=="18")
    - inventory_hostname in groups.k8s_cluster
- name: Copy ge4.18_ipvs.conf file
  copy: 
    src: ge4.18_ipvs.conf
    dest: /etc/modules-load.d/ipvs.conf
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky") or (ansible_distribution=="Ubuntu" and ansible_distribution_major_version=="20")
    - inventory_hostname in groups.k8s_cluster
- name: Copy lt4.18_ipvs.conf file
  copy: 
    src: lt4.18_ipvs.conf
    dest: /etc/modules-load.d/ipvs.conf
  when:
    - (ansible_distribution=="Ubuntu" and ansible_distribution_major_version=="18")
    - inventory_hostname in groups.k8s_cluster
- name: start systemd-modules-load service 
  systemd:
    name: systemd-modules-load
    state: started
    enabled: yes
  when:
    - inventory_hostname in groups.k8s_cluster

[root@ansible-server reset-installpackage]# vim tasks/set_k8s_kernel.yml
- name: copy k8s.conf file
  copy: 
    src: k8s.conf
    dest: /etc/sysctl.d/
- name: Load kernel config
  shell:
    cmd: "sysctl --system"

[root@ansible-server reset-installpackage]# vim tasks/reboot_system.yml
- name: reboot system
  reboot:

[root@ansible-server reset-installpackage]# vim tasks/main.yml
- include: install_package.yml
- include: set_centos7_kernel.yml
- include: install_ipvsadm.yml
- include: set_ipvs.yml
- include: set_k8s_kernel.yml
- include: reboot_system.yml

[root@ansible-server reset-installpackage]# cd ../../
[root@ansible-server ansible]# tree roles/reset-installpackage/
roles/reset-installpackage/
├── files
│   ├── ge4.18_ipvs.conf
│   ├── k8s.conf
│   ├── kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm
│   ├── kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
│   └── lt4.18_ipvs.conf
└── tasks
    ├── install_ipvsadm.yml
    ├── install_package.yml
    ├── main.yml
    ├── reboot_system.yml
    ├── set_centos7_kernel.yml
    ├── set_ipvs.yml
    └── set_k8s_kernel.yml

2 directories, 12 files

[root@ansible-server ansible]# vim reset_installpackage_role.yml 
---
- hosts: all
  serial: 3

  roles:
    - role: reset-installpackage

[root@ansible-server ansible]# ansible-playbook reset_installpackage_role.yml 

6.安装chrony

6.1 安装chrony-server

[root@ansible-server ansible]# mkdir -p roles/chrony-server/{tasks,handlers}

[root@ansible-server ansible]# cd roles/chrony-server/
[root@ansible-server chrony-server]# ls
handlers  tasks

[root@ansible-server chrony-server]# vim tasks/install_chrony_yum.yml
- name: install CentOS or Rocky chrony
  yum:
    name: chrony
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: delete CentOS or Rocky /etc/chrony.conf file contains '^pool.*' string line
  lineinfile:
    path: /etc/chrony.conf
    regexp: '^pool.*'
    state: absent
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd
- name: delete CentOS or Rocky /etc/chrony.conf file contains '^server.*' string line
  lineinfile:
    path: /etc/chrony.conf
    regexp: '^server.*'
    state: absent
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd
- name: add Time server for CentOS or Rocky /etc/chrony.conf file
  lineinfile:
    path: /etc/chrony.conf
    insertafter: '^# Please consider .*'
    line: "server ntp.aliyun.com iburst\nserver time1.cloud.tencent.com iburst\nserver ntp.tuna.tsinghua.edu.cn iburst"
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd
- name: Substitution '^#(allow).*' string for CentOS or Rocky /etc/chrony.conf file
  replace:
    path: /etc/chrony.conf
    regexp: '^#(allow).*'
    replace: '\1 0.0.0.0/0'
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd
- name: Substitution '^#(local).*' string for CentOS or Rocky /etc/chrony.conf file
  replace:
    path: /etc/chrony.conf
    regexp: '^#(local).*'
    replace: '\1 stratum 10'
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd

[root@ansible-server chrony-server]# vim tasks/install_chrony_apt.yml
- name: delete lock files
  file:
    path: "{
    
    { item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu chrony
  apt:
    name: chrony
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
- name: delete Ubuntu /etc/chrony/chrony.conf file contains '^pool.*' string line
  lineinfile:
    path: /etc/chrony/chrony.conf
    regexp: '^pool.*'
    state: absent
  when:
    - ansible_distribution=="Ubuntu"
  notify:
    - restart chronyd
- name: add Time server for Ubuntu /etc/chrony/chrony.conf file
  lineinfile:
    path: /etc/chrony/chrony.conf
    insertafter: '^# See http:.*'
    line: "server ntp.aliyun.com iburst\nserver time1.cloud.tencent.com iburst\nserver ntp.tuna.tsinghua.edu.cn iburst"
  when:
    - ansible_distribution=="Ubuntu"
- name: add 'allow 0.0.0.0/0' string and 'local stratum 10' string for Ubuntu /etc/chrony/chrony.conf file
  lineinfile:
    path: /etc/chrony/chrony.conf
    line: "{
    
    { item }}"
  loop:
    - "allow 0.0.0.0/0"
    - "local stratum 10"
  when:
    - ansible_distribution=="Ubuntu"
  notify:
    - restart chronyd

[root@ansible-server chrony-server]# vim tasks/service.yml
- name: start chronyd
  systemd:
    name: chronyd
    state: started
    enabled: yes

[root@ansible-server chrony-server]# vim tasks/main.yml
- include: install_chrony_yum.yml
- include: install_chrony_apt.yml
- include: service.yml

[root@ansible-server chrony-server]# vim handlers/main.yml
- name: restart chronyd
  systemd:
    name: chronyd
    state: restarted

[root@ansible-server chrony-server]# cd ../../
[root@ansible-server ansible]# tree roles/chrony-server/
roles/chrony-server/
├── handlers
│   └── main.yml
└── tasks
    ├── install_chrony_apt.yml
    ├── install_chrony_yum.yml
    ├── main.yml
    └── service.yml

2 directories, 5 files

[root@ansible-server ansible]# vim chrony_server_role.yml 
---
- hosts: chrony_server

  roles:
    - role: chrony-server

[root@ansible-server ansible]# ansible-playbook chrony_server_role.yml

[root@k8s-ha01 ~]# chronyc sources -nv
210 Number of sources = 3
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^- 203.107.6.88                  2   6    37    62    -15ms[  -15ms] +/-   35ms
^* 139.199.215.251               2   6    37    62    -10us[+1488us] +/-   37ms
^? 101.6.6.172                   0   7     0     -     +0ns[   +0ns] +/-    0ns

[root@k8s-ha02 ~]# chronyc sources -nv
210 Number of sources = 3
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* 203.107.6.88                  2   6    77     3  -4058us[+2582us] +/-   31ms
^+ 139.199.215.251               2   6    77     2  +6881us[+6881us] +/-   33ms
^? 101.6.6.172                   0   7     0     -     +0ns[   +0ns] +/-    0ns

6.2 安装chrony-client

[root@ansible-server ansible]# mkdir -p roles/chrony-client/{tasks,handlers,vars}
[root@ansible-server ansible]# cd roles/chrony-client/
[root@ansible-server chrony-client]# ls
handlers  tasks  vars

#下面IP设置成chrony-server的IP地址,SERVER1设置ha1的IP地址,SERVER2设置ha2的IP地址
[root@ansible-server chrony-client]# vim vars/main.yml
SERVER1: 172.31.3.104
SERVER2: 172.31.3.105

[root@ansible-server chrony-client]# vim tasks/install_chrony_yum.yml
- name: install CentOS or Rocky chrony
  yum:
    name: chrony
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: delete CentOS or Rocky /etc/chrony.conf file contains '^pool.*' string line
  lineinfile:
    path: /etc/chrony.conf
    regexp: '^pool.*'
    state: absent
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd
- name: delete CentOS or Rocky /etc/chrony.conf file contains '^server.*' string line
  lineinfile:
    path: /etc/chrony.conf
    regexp: '^server.*'
    state: absent
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd
- name: add Time server for CentOS or Rocky /etc/chrony.conf file
  lineinfile:
    path: /etc/chrony.conf
    insertafter: '^# Please consider .*'
    line: "server {
    
    { SERVER1 }} iburst\nserver {
    
    { SERVER2 }} iburst"
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
  notify:
    - restart chronyd

[root@ansible-server chrony-client]# vim tasks/install_chrony_apt.yml
- name: delete lock files
  file:
    path: "{
    
    { item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu chrony
  apt:
    name: chrony
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
- name: delete Ubuntu /etc/chrony/chrony.conf file contains '^pool.*' string line
  lineinfile:
    path: /etc/chrony/chrony.conf
    regexp: '^pool.*'
    state: absent
  when:
    - ansible_distribution=="Ubuntu"
  notify:
    - restart chronyd
- name: add Time server for Ubuntu /etc/chrony/chrony.conf file
  lineinfile:
    path: /etc/chrony/chrony.conf
    insertafter: '^# See http:.*'
    line: "server {
    
    { SERVER1 }} iburst\nserver {
    
    { SERVER2 }} iburst"
  when:
    - ansible_distribution=="Ubuntu"
  notify:
    - restart chronyd

[root@ansible-server chrony-client]# vim tasks/service.yml
- name: start chronyd
  systemd:
    name: chronyd
    state: started
    enabled: yes

[root@ansible-server chrony-client]# vim tasks/main.yml
- include: install_chrony_yum.yml
- include: install_chrony_apt.yml
- include: service.yml

[root@ansible-server chrony-client]# vim handlers/main.yml
- name: restart chronyd
  systemd:
    name: chronyd
    state: restarted

[root@ansible-server chrony-client]# cd ../../
[root@ansible-server ansible]# tree roles/chrony-client/
roles/chrony-client/
├── handlers
│   └── main.yml
├── tasks
│   ├── install_chrony_apt.yml
│   ├── install_chrony_yum.yml
│   ├── main.yml
│   └── service.yml
└── vars
    └── main.yml

3 directories, 6 files

[root@ansible-server ansible]# vim chrony_client_role.yml
---
- hosts: chrony_client

  roles:
    - role: chrony-client

[root@ansible-server ansible]# ansible-playbook chrony_client_role.yml

[root@k8s-master01 ~]# chronyc sources -nv
210 Number of sources = 2
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* k8s-ha01                      3   6    17    28    -57us[  -29us] +/-   31ms
^+ k8s-ha02                      3   6    17    29   +204us[ +231us] +/-   34ms

7.安装haproxy

[root@ansible-server ansible]# mkdir -p roles/haproxy/{tasks,vars,files,templates}
[root@ansible-server ansible]# cd roles/haproxy/
[root@ansible-server haproxy]# ls
files  tasks  templates  vars

[root@ansible-server haproxy]# wget http://www.lua.org/ftp/lua-5.4.4.tar.gz -P files/
[root@ansible-server haproxy]# wget https://www.haproxy.org/download/2.4/src/haproxy-2.4.15.tar.gz -P files/

[root@ansible-server haproxy]# vim files/haproxy.service
[Unit]
Description=HAProxy Load Balancer
After=syslog.target network.target

[Service]
ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q
ExecStart=/usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.pid
ExecReload=/bin/kill -USR2 $MAINPID

[Install]
WantedBy=multi-user.target

#下面VIP设置成自己的keepalived里的VIP(虚拟IP)地址
[root@ansible-server haproxy]# vim vars/main.yml
SRC_DIR: /usr/local/src
LUA_FILE: lua-5.4.4.tar.gz
HAPROXY_FILE: haproxy-2.4.15.tar.gz
HAPROXY_INSTALL_DIR: /apps/haproxy
STATS_AUTH_USER: admin
STATS_AUTH_PASSWORD: 123456
VIP: 172.31.3.188

[root@ansible-server haproxy]# vim templates/haproxy.cfg.j2
global
maxconn 100000
chroot {
    
    {
    
     HAPROXY_INSTALL_DIR }}
stats socket /var/lib/haproxy/haproxy.sock mode 600 level admin
uid 99
gid 99
daemon
pidfile /var/lib/haproxy/haproxy.pid
log 127.0.0.1 local3 info

defaults
option http-keep-alive
option forwardfor
maxconn 100000
mode http
timeout connect 300000ms
timeout client 300000ms
timeout server 300000ms

listen stats
    mode http
    bind 0.0.0.0:9999
    stats enable
    log global
    stats uri /haproxy-status
    stats auth {
    
    {
    
     STATS_AUTH_USER }}:{
    
    {
    
     STATS_AUTH_PASSWORD }}

listen kubernetes-6443
    bind {
    
    {
    
     VIP }}:6443
    mode tcp
    log global
    {
    
    % for i in groups.master %}
    server {
    
    {
    
     i }} {
    
    {
    
     i }}:6443 check inter 3s fall 2 rise 5
    {
    
    % endfor %}

listen harbor-80
    bind {
    
    {
    
     VIP }}:80
    mode http
    log global
    balance source
    {
    
    % for i in groups.harbor %}
    server {
    
    {
    
     i }} {
    
    {
    
     i }}:80 check inter 3s fall 2 rise 5
    {
    
    % endfor %}

[root@ansible-server haproxy]# vim tasks/install_package.yml
- name: install CentOS or Rocky depend on the package
  yum:
    name: gcc,make,gcc-c++,glibc,glibc-devel,pcre,pcre-devel,openssl,openssl-devel,systemd-devel,libtermcap-devel,ncurses-devel,libevent-devel,readline-devel
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - inventory_hostname in groups.haproxy
- name: delete lock files
  file:
    path: "{
    
    { item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
    - inventory_hostname in groups.haproxy
- name: apt update
  apt:
    update_cache: yes 
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
    - inventory_hostname in groups.haproxy
- name: install Ubuntu depend on the package
  apt:
    name: gcc,make,openssl,libssl-dev,libpcre3,libpcre3-dev,zlib1g-dev,libreadline-dev,libsystemd-dev
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
    - inventory_hostname in groups.haproxy

[root@ansible-server haproxy]# vim tasks/build_lua.yml
- name: unarchive lua package
  unarchive:
    src: "{
    
    { LUA_FILE }}"
    dest: "{
    
    { SRC_DIR }}"
  when:
    - inventory_hostname in groups.haproxy
- name: get LUA_DIR directory
  shell:
    cmd: echo {
    
    {
    
     LUA_FILE }} | sed -nr 's/^(.*[0-9]).([[:lower:]]).*/\1/p'
  register: LUA_DIR
  when:
    - inventory_hostname in groups.haproxy
- name: Build and install lua
  shell: 
    chdir: "{
    
    { SRC_DIR }}/{
    
    { LUA_DIR.stdout }}"
    cmd: make all test
  when:
    - inventory_hostname in groups.haproxy

[root@ansible-server haproxy]# vim tasks/build_haproxy.yml
- name: unarchive haproxy package
  unarchive:
    src: "{
    
    { HAPROXY_FILE }}"
    dest: "{
    
    { SRC_DIR }}"
  when:
    - inventory_hostname in groups.haproxy
- name: get HAPROXY_DIR directory
  shell:
    cmd: echo {
    
    {
    
     HAPROXY_FILE }} | sed -nr 's/^(.*[0-9]).([[:lower:]]).*/\1/p'
  register: HAPROXY_DIR
  when:
    - inventory_hostname in groups.haproxy
- name: make Haproxy
  shell: 
    chdir: "{
    
    { SRC_DIR }}/{
    
    { HAPROXY_DIR.stdout }}"
    cmd: make -j {
    
    {
    
     ansible_processor_vcpus }} ARCH=x86_64 TARGET=linux-glibc USE_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1 USE_SYSTEMD=1 USE_CPU_AFFINITY=1 USE_LUA=1 LUA_INC={
    
    {
    
     SRC_DIR }}/{
    
    {
    
     LUA_DIR.stdout }}/src/ LUA_LIB={
    
    {
    
     SRC_DIR }}/{
    
    {
    
     LUA_DIR.stdout }}/src/ PREFIX={
    
    {
    
     HAPROXY_INSTALL_DIR }}
  when:
    - inventory_hostname in groups.haproxy
- name: make install Haproxy
  shell: 
    chdir: "{
    
    { SRC_DIR }}/{
    
    { HAPROXY_DIR.stdout }}"
    cmd: make install PREFIX={
    
    {
    
     HAPROXY_INSTALL_DIR }}
  when:
    - inventory_hostname in groups.haproxy

[root@ansible-server haproxy]# vim tasks/config.yml
- name: copy haproxy.service file
  copy:
    src: haproxy.service
    dest: /lib/systemd/system
  when:
    - inventory_hostname in groups.haproxy
- name: create haproxy link
  file:
    src: "../..{
    
    { HAPROXY_INSTALL_DIR }}/sbin/{
    
    { item.src }}"
    dest: "/usr/sbin/{
    
    { item.src }}"
    state: link
    owner: root
    group: root
    mode: 755
    force: yes   
  with_items:
    - src: haproxy
  when:
    - inventory_hostname in groups.haproxy
- name: create /etc/haproxy directory
  file:
    path: /etc/haproxy
    state: directory
  when:
    - inventory_hostname in groups.haproxy
- name: create /var/lib/haproxy/ directory
  file:
    path: /var/lib/haproxy/
    state: directory
  when:
    - inventory_hostname in groups.haproxy
- name: copy haproxy.cfg file
  template:
    src: haproxy.cfg.j2
    dest: /etc/haproxy/haproxy.cfg
  when:
    - inventory_hostname in groups.haproxy
- name: Add the kernel
  sysctl:
    name: net.ipv4.ip_nonlocal_bind
    value: "1"
  when:
    - inventory_hostname in groups.haproxy
- name: PATH variable
  copy:
    content: 'PATH={
    
    { HAPROXY_INSTALL_DIR }}/sbin:$PATH'
    dest: /etc/profile.d/haproxy.sh
  when:
    - inventory_hostname in groups.haproxy
- name: PATH variable entry
  shell:
    cmd: . /etc/profile.d/haproxy.sh
  when:
    - inventory_hostname in groups.haproxy

[root@ansible-server haproxy]# vim tasks/service.yml
- name: start haproxy
  systemd:
    name: haproxy
    state: started
    enabled: yes
    daemon_reload: yes
  when:
    - inventory_hostname in groups.haproxy

[root@ansible-server haproxy]# vim tasks/main.yml
- include: install_package.yml
- include: build_lua.yml
- include: build_haproxy.yml
- include: config.yml
- include: service.yml

[root@ansible-server haproxy]# cd ../../
[root@ansible-server ansible]# tree roles/haproxy/
roles/haproxy/
├── files
│   ├── haproxy-2.4.15.tar.gz
│   ├── haproxy.service
│   └── lua-5.4.4.tar.gz
├── tasks
│   ├── build_haproxy.yml
│   ├── build_lua.yml
│   ├── config.yml
│   ├── install_package.yml
│   ├── main.yml
│   └── service.yml
├── templates
│   └── haproxy.cfg.j2
└── vars
    └── main.yml

4 directories, 11 files

[root@ansible-server ansible]# vim haproxy_role.yml
---
- hosts: haproxy:master:harbor

  roles:
    - role: haproxy

[root@ansible-server ansible]# ansible-playbook haproxy_role.yml

8.安装keepalived

8.1 安装keepalived-master

[root@ansible-server ansible]# mkdir -p roles/keepalived-master/{tasks,files,vars,templates}
[root@ansible-server ansible]# cd roles/keepalived-master/
[root@ansible-server keepalived-master]# ls
files  tasks  templates  vars

[root@ansible-server keepalived-master]#  wget https://keepalived.org/software/keepalived-2.2.7.tar.gz -P files/

[root@ansible-server keepalived-master]# vim files/check_haproxy.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-09
#FileName:      check_haproxy.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
err=0
for k in $(seq 1 3);do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi

#下面VIP设置成自己的keepalived里的VIP(虚拟IP)地址
[root@ansible-server keepalived-master]# vim vars/main.yml
URL: mirrors.cloud.tencent.com
ROCKY_URL: mirrors.sjtug.sjtu.edu.cn
KEEPALIVED_FILE: keepalived-2.2.7.tar.gz
SRC_DIR: /usr/local/src
KEEPALIVED_INSTALL_DIR: /apps/keepalived
STATE: MASTER
PRIORITY: 100
VIP: 172.31.3.188

[root@ansible-server keepalived-master]# vim templates/PowerTools.repo.j2 
[PowerTools]
name=PowerTools
{
    
    % if ansible_distribution =="Rocky" %}
baseurl=https://{
    
    {
    
     ROCKY_URL }}/rocky/$releasever/PowerTools/$basearch/os/
{
    
    % elif ansible_distribution=="CentOS" %}
baseurl=https://{
    
    {
    
     URL }}/centos/$releasever/PowerTools/$basearch/os/
{
    
    % endif %}
gpgcheck=1
{
    
    % if ansible_distribution =="Rocky" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
{
    
    % elif ansible_distribution=="CentOS" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
{
    
    % endif %}

[root@ansible-server keepalived-master]# vim templates/keepalived.conf.j2
! Configuration File for keepalived

global_defs {
    
    
    router_id LVS_DEVEL
    script_user root
    enable_script_security
}

vrrp_script check_haoroxy {
    
    
    script "/etc/keepalived/check_haproxy.sh"
    interval 5
    weight -5
    fall 2  
    rise 1
}

vrrp_instance VI_1 {
    
    
    state {
    
    {
    
     STATE }}
    interface {
    
    {
    
     ansible_default_ipv4.interface }}
    virtual_router_id 51
    priority {
    
    {
    
     PRIORITY }}
    advert_int 1
    authentication {
    
    
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    
    
        {
    
    {
    
     VIP }} dev {
    
    {
    
     ansible_default_ipv4.interface }} label {
    
    {
    
     ansible_default_ipv4.interface }}:1
    }
    track_script {
    
    
       check_haproxy
    }
}

[root@ansible-server keepalived-master]# vim tasks/install_package.yml
- name: find "[PowerTools]" mirror warehouse
  find:
    path: /etc/yum.repos.d/
    contains: '\[PowerTools\]'
  register: RETURN
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_distribution_major_version=="8"
- name: copy repo file
  template:
    src: PowerTools.repo.j2
    dest: /etc/yum.repos.d/PowerTools.repo
  when: 
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky") and (ansible_distribution_major_version=="8") 
    - RETURN.matched == 0
- name: install CentOS8 or Rocky8 depend on the package
  yum:
    name: make,gcc,ipvsadm,autoconf,automake,openssl-devel,libnl3-devel,iptables-devel,ipset-devel,file-devel,net-snmp-devel,glib2-devel,pcre2-devel,libnftnl-devel,libmnl-devel,systemd-devel
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_distribution_major_version=="8"
- name: install CentOS7 depend on the package
  yum:
    name: make,gcc,libnfnetlink-devel,libnfnetlink,ipvsadm,libnl,libnl-devel,libnl3,libnl3-devel,lm_sensors-libs,net-snmp-agent-libs,net-snmp-libs,openssh-server,openssh-clients,openssl,openssl-devel,automake,iproute
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: delete lock files
  file:
    path: "{
    
    { item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes 
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu 20.04 depend on the package
  apt:
    name: make,gcc,ipvsadm,build-essential,pkg-config,automake,autoconf,libipset-dev,libnl-3-dev,libnl-genl-3-dev,libssl-dev,libxtables-dev,libip4tc-dev,libip6tc-dev,libipset-dev,libmagic-dev,libsnmp-dev,libglib2.0-dev,libpcre2-dev,libnftnl-dev,libmnl-dev,libsystemd-dev
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_distribution_major_version=="20"
- name: install Ubuntu 18.04 depend on the package
  apt:
    name: make,gcc,ipvsadm,build-essential,pkg-config,automake,autoconf,iptables-dev,libipset-dev,libnl-3-dev,libnl-genl-3-dev,libssl-dev,libxtables-dev,libip4tc-dev,libip6tc-dev,libipset-dev,libmagic-dev,libsnmp-dev,libglib2.0-dev,libpcre2-dev,libnftnl-dev,libmnl-dev,libsystemd-dev
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_distribution_major_version=="18"

[root@ansible-server keepalived-master]# vim tasks/keepalived_file.yml
- name: unarchive  keepalived package
  unarchive:
    src: "{
    
    { KEEPALIVED_FILE }}"
    dest: "{
    
    { SRC_DIR }}"

[root@ansible-server keepalived_master]# vim tasks/build.yml
- name: get KEEPALIVED_DIR directory
  shell:
    cmd: echo {
    
    {
    
     KEEPALIVED_FILE }} | sed -nr 's/^(.*[0-9]).([[:lower:]]).*/\1/p'
  register: KEEPALIVED_DIR
- name: Build and install Keepalived
  shell: 
    chdir: "{
    
    { SRC_DIR }}/{
    
    { KEEPALIVED_DIR.stdout }}"
    cmd: ./configure --prefix={
    
    {
    
     KEEPALIVED_INSTALL_DIR }} --disable-fwmark
- name: make && make install
  shell:
    chdir: "{
    
    { SRC_DIR }}/{
    
    { KEEPALIVED_DIR.stdout }}"
    cmd: make -j {
    
    {
    
     ansible_processor_vcpus }} && make install

[root@ansible-server keepalived-master]# vim tasks/config.yml
- name: create /etc/keepalived directory
  file:
    path: /etc/keepalived
    state: directory
- name: copy keepalived.conf file
  template:
    src: keepalived.conf.j2
    dest: /etc/keepalived/keepalived.conf
- name: copy check_haproxy.sh file
  copy:
    src: check_haproxy.sh
    dest: /etc/keepalived/
    mode: 0755
- name: copy keepalived.service file
  copy:
    remote_src: True
    src: "{
    
    { SRC_DIR }}/{
    
    { KEEPALIVED_DIR.stdout }}/keepalived/keepalived.service"
    dest: /lib/systemd/system/
- name: PATH variable
  copy:
    content: 'PATH={
    
    { KEEPALIVED_INSTALL_DIR }}/sbin:$PATH'
    dest: /etc/profile.d/keepalived.sh
- name: PATH variable entry
  shell:
    cmd: . /etc/profile.d/keepalived.sh

[root@ansible-server keepalived-master]# vim tasks/service.yml
- name: start keepalived
  systemd:
    name: keepalived
    state: started
    enabled: yes
    daemon_reload: yes

[root@ansible-server keepalived-master]# vim tasks/main.yml
- include: install_package.yml
- include: keepalived_file.yml
- include: build.yml
- include: config.yml
- include: service.yml

[root@ansible-server keepalived-master]# cd ../../
[root@ansible-server ansible]# tree roles/keepalived-master/
roles/keepalived-master/
├── files
│   ├── check_haproxy.sh
│   └── keepalived-2.2.7.tar.gz
├── tasks
│   ├── build.yml
│   ├── config.yml
│   ├── install_package.yml
│   ├── keepalived_file.yml
│   ├── main.yml
│   └── service.yml
├── templates
│   ├── keepalived.conf.j2
│   └── PowerTools.repo.j2
└── vars
    └── main.yml

4 directories, 11 files

[root@ansible-server ansible]# vim keepalived_master_role.yml 
---
- hosts: keepalives_master

  roles:
    - role: keepalived-master

[root@ansible-server ansible]# ansible-playbook keepalived_master_role.yml 

8.2 安装keepalived-backup

[root@ansible-server ansible]# mkdir -p roles/keepalived-backup/{tasks,files,vars,templates}
[root@ansible-server ansible]# cd roles/keepalived-backup/
[root@ansible-server keepalived-master]# ls
files  tasks  templates  vars

[root@ansible-server keepalived-backup]#  wget https://keepalived.org/software/keepalived-2.2.7.tar.gz -P files/

[root@ansible-server keepalived-backup]# vim files/check_haproxy.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-09
#FileName:      check_haproxy.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
err=0
for k in $(seq 1 3);do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi

#下面VIP设置成自己的keepalived里的VIP(虚拟IP)地址
[root@ansible-server keepalived-backup]# vim vars/main.yml
URL: mirrors.cloud.tencent.com
ROCKY_URL: mirrors.sjtug.sjtu.edu.cn
KEEPALIVED_FILE: keepalived-2.2.7.tar.gz
SRC_DIR: /usr/local/src
KEEPALIVED_INSTALL_DIR: /apps/keepalived
STATE: BACKUP
PRIORITY: 90
VIP: 172.31.3.188

[root@ansible-server keepalived-backup]# vim templates/PowerTools.repo.j2 
[PowerTools]
name=PowerTools
{
    
    % if ansible_distribution =="Rocky" %}
baseurl=https://{
    
    {
    
     ROCKY_URL }}/rocky/$releasever/PowerTools/$basearch/os/
{
    
    % elif ansible_distribution=="CentOS" %}
baseurl=https://{
    
    {
    
     URL }}/centos/$releasever/PowerTools/$basearch/os/
{
    
    % endif %}
gpgcheck=1
{
    
    % if ansible_distribution =="Rocky" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial
{
    
    % elif ansible_distribution=="CentOS" %}
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
{
    
    % endif %}

[root@ansible-server keepalived-backup]# vim templates/keepalived.conf.j2
! Configuration File for keepalived

global_defs {
    
    
    router_id LVS_DEVEL
    script_user root
    enable_script_security
}

vrrp_script check_haoroxy {
    
    
    script "/etc/keepalived/check_haproxy.sh"
    interval 5
    weight -5
    fall 2  
    rise 1
}

vrrp_instance VI_1 {
    
    
    state {
    
    {
    
     STATE }}
    interface {
    
    {
    
     ansible_default_ipv4.interface }}
    virtual_router_id 51
    priority {
    
    {
    
     PRIORITY }}
    advert_int 1
    authentication {
    
    
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    
    
        {
    
    {
    
     VIP }} dev {
    
    {
    
     ansible_default_ipv4.interface }} label {
    
    {
    
     ansible_default_ipv4.interface }}:1
    }
    track_script {
    
    
       check_haproxy
    }
}

[root@ansible-server keepalived-backup]# vim tasks/install_package.yml
- name: find "[PowerTools]" mirror warehouse
  find:
    path: /etc/yum.repos.d/
    contains: '\[PowerTools\]'
  register: RETURN
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_distribution_major_version=="8"
- name: copy repo file
  template:
    src: PowerTools.repo.j2
    dest: /etc/yum.repos.d/PowerTools.repo
  when: 
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky") and (ansible_distribution_major_version=="8") 
    - RETURN.matched == 0
- name: install CentOS8 or Rocky8 depend on the package
  yum:
    name: make,gcc,ipvsadm,autoconf,automake,openssl-devel,libnl3-devel,iptables-devel,ipset-devel,file-devel,net-snmp-devel,glib2-devel,pcre2-devel,libnftnl-devel,libmnl-devel,systemd-devel
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - ansible_distribution_major_version=="8"
- name: install CentOS7 depend on the package
  yum:
    name: make,gcc,libnfnetlink-devel,libnfnetlink,ipvsadm,libnl,libnl-devel,libnl3,libnl3-devel,lm_sensors-libs,net-snmp-agent-libs,net-snmp-libs,openssh-server,openssh-clients,openssl,openssl-devel,automake,iproute
  when:
    - ansible_distribution=="CentOS"
    - ansible_distribution_major_version=="7"
- name: delete lock files
  file:
    path: "{
    
    { item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes 
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu 20.04 depend on the package
  apt:
    name: make,gcc,ipvsadm,build-essential,pkg-config,automake,autoconf,libipset-dev,libnl-3-dev,libnl-genl-3-dev,libssl-dev,libxtables-dev,libip4tc-dev,libip6tc-dev,libipset-dev,libmagic-dev,libsnmp-dev,libglib2.0-dev,libpcre2-dev,libnftnl-dev,libmnl-dev,libsystemd-dev
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_distribution_major_version=="20"
- name: install Ubuntu 18.04 depend on the package
  apt:
    name: make,gcc,ipvsadm,build-essential,pkg-config,automake,autoconf,iptables-dev,libipset-dev,libnl-3-dev,libnl-genl-3-dev,libssl-dev,libxtables-dev,libip4tc-dev,libip6tc-dev,libipset-dev,libmagic-dev,libsnmp-dev,libglib2.0-dev,libpcre2-dev,libnftnl-dev,libmnl-dev,libsystemd-dev
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
    - ansible_distribution_major_version=="18"

[root@ansible-server keepalived-backup]# vim tasks/keepalived_file.yml
- name: unarchive  keepalived package
  unarchive:
    src: "{
    
    { KEEPALIVED_FILE }}"
    dest: "{
    
    { SRC_DIR }}"

[root@ansible-server keepalived_backup]# vim tasks/build.yml
- name: get KEEPALIVED_DIR directory
  shell:
    cmd: echo {
    
    {
    
     KEEPALIVED_FILE }} | sed -nr 's/^(.*[0-9]).([[:lower:]]).*/\1/p'
  register: KEEPALIVED_DIR
- name: Build and install Keepalived
  shell: 
    chdir: "{
    
    { SRC_DIR }}/{
    
    { KEEPALIVED_DIR.stdout }}"
    cmd: ./configure --prefix={
    
    {
    
     KEEPALIVED_INSTALL_DIR }} --disable-fwmark
- name: make && make install
  shell:
    chdir: "{
    
    { SRC_DIR }}/{
    
    { KEEPALIVED_DIR.stdout }}"
    cmd: make -j {
    
    {
    
     ansible_processor_vcpus }} && make install

[root@ansible-server keepalived-backup]# vim tasks/config.yml
- name: create /etc/keepalived directory
  file:
    path: /etc/keepalived
    state: directory
- name: copy keepalived.conf file
  template:
    src: keepalived.conf.j2
    dest: /etc/keepalived/keepalived.conf
- name: copy check_haproxy.sh file
  copy:
    src: check_haproxy.sh
    dest: /etc/keepalived/
    mode: 0755
- name: copy keepalived.service file
  copy:
    remote_src: True
    src: "{
    
    { SRC_DIR }}/{
    
    { KEEPALIVED_DIR.stdout }}/keepalived/keepalived.service"
    dest: /lib/systemd/system/
- name: PATH variable
  copy:
    content: 'PATH={
    
    { KEEPALIVED_INSTALL_DIR }}/sbin:$PATH'
    dest: /etc/profile.d/keepalived.sh
- name: PATH variable entry
  shell:
    cmd: . /etc/profile.d/keepalived.sh

[root@ansible-server keepalived-backup]# vim tasks/service.yml
- name: start keepalived
  systemd:
    name: keepalived
    state: started
    enabled: yes
    daemon_reload: yes

[root@ansible-server keepalived-backup]# vim tasks/main.yml
- include: install_package.yml
- include: keepalived_file.yml
- include: build.yml
- include: config.yml
- include: service.yml

[root@ansible-server keepalived-backup]# cd ../../
roles/keepalived-backup/
├── files
│   ├── check_haproxy.sh
│   └── keepalived-2.2.7.tar.gz
├── tasks
│   ├── build.yml
│   ├── config.yml
│   ├── install_package.yml
│   ├── keepalived_file.yml
│   ├── main.yml
│   └── service.yml
├── templates
│   ├── keepalived.conf.j2
│   └── PowerTools.repo.j2
└── vars
    └── main.yml

4 directories, 11 files

[root@ansible-server ansible]# vim keepalived_backup_role.yml 
---
- hosts: keepalives_backup

  roles:
    - role: keepalived-backup

[root@ansible-server ansible]# ansible-playbook keepalived_backup_role.yml 

9.安装harbor

9.1 docker基于镜像仓库

[root@ansible-server ansible]# mkdir -p roles/docker/{tasks,vars,templates}

[root@ansible-server ansible]# cd roles/docker/
[root@ansible-server docker]# ls
tasks  templates  vars

[root@ansible-server docker]# vim templates/daemon.json.j2 
{
    
    
    "registry-mirrors": [
        "https://registry.docker-cn.com",
        "http://hub-mirror.c.163.com",
        "https://docker.mirrors.ustc.edu.cn"
    ],
    "insecure-registries": ["{
    
    { HARBOR_DOMAIN }}"],
    "exec-opts": ["native.cgroupdriver=systemd"],
    "max-concurrent-downloads": 10,
    "max-concurrent-uploads": 5,
    "log-opts": {
    
    
        "max-size": "300m",
        "max-file": "2"  
    },
    "live-restore": true
}

#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server docker]# vim vars/main.yml
DOCKER_VERSION: 20.10.14
DOCKER_MIRRORS: mirrors.cloud.tencent.com
HARBOR_DOMAIN: harbor.raymonds.cc

[root@ansible-server docker]# vim tasks/install_docker_yum.yml
- name: add CentOS or Rocky docker mirror warehouse
  yum_repository:
    name: docker-ce
    description: docker-ce
    file: docker-ce
    baseurl: https://{
    
    {
    
     DOCKER_MIRRORS }}/docker-ce/linux/centos/{
    
    {
    
     ansible_distribution_major_version }}/x86_64/stable/
    gpgkey: https://{
    
    {
    
     DOCKER_MIRRORS }}/docker-ce/linux/centos/gpg 
    gpgcheck: yes
  when: 
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: install CentOS or Rocky docker
  yum: 
    name: docker-ce-{
    
    {
    
     DOCKER_VERSION }},docker-ce-cli-{
    
    {
    
     DOCKER_VERSION }}
  when: 
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")

[root@ansible-server docker]# vim tasks/install_docker_apt.yml
- name: delete lock files
  file:
    path: "{
    
    { item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes 
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu docker depend on the package
  apt:
    name: apt-transport-https,ca-certificates,curl,software-properties-common
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
- name: import Ubuntu docker key
  apt_key:
    url: https://{
    
    {
    
     DOCKER_MIRRORS }}/docker-ce/linux/ubuntu/gpg
  when:
    - ansible_distribution=="Ubuntu"
- name: import Ubuntu docker installation source
  apt_repository:
    repo: "deb [arch=amd64] https://{
    
    { DOCKER_MIRRORS }}/docker-ce/linux/ubuntu {
    
    { ansible_distribution_release }} stable"
    filename: docker-ce
  when:
    - ansible_distribution=="Ubuntu"
- name: delete /var/lib/dpkg/lock file
  file:
    path: /var/lib/dpkg/lock
    state: absent
  when:
    - ansible_distribution=="Ubuntu"
- name: delete /var/lib/apt/lists/lock file
  file:
    path: /var/lib/apt/lists/lock
    state: absent
  when:
    - ansible_distribution=="Ubuntu"
- name: delete /var/cache/apt/archives/lock file
  file:
    path: /var/cache/apt/archives/lock
    state: absent
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu docker
  apt:
    name: docker-ce=5:{
    
    {
    
     DOCKER_VERSION }}~3-0~ubuntu-{
    
    {
    
     ansible_distribution_release }},docker-ce-cli=5:{
    
    {
    
     DOCKER_VERSION }}~3-0~ubuntu-{
    
    {
    
     ansible_distribution_release }}
    force: yes
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server docker]# vim tasks/set_mirror_accelerator.yml
- name: mkdir /etc/docker
  file:
    path: /etc/docker
    state: directory
- name: set mirror_accelerator
  template:
    src: daemon.json.j2
    dest: /etc/docker/daemon.json

[root@ansible-server docker]# vim tasks/service.yml
- name: start docker
  systemd:
    name: docker
    state: started
    enabled: yes
    daemon_reload: yes

[root@ansible-server docker]# vim tasks/set_alias.yml
- name: set docker alias
  lineinfile:
    path: ~/.bashrc
    line: "{
    
    { item }}"
  loop:
    - "alias rmi=\"docker images -qa|xargs docker rmi -f\""
    - "alias rmc=\"docker ps -qa|xargs docker rm -f\""

[root@ansible-server docker]# vim tasks/set_swap.yml
- name: set WARNING No swap limit support
  replace:
    path: /etc/default/grub
    regexp: '^(GRUB_CMDLINE_LINUX=.*)\"$'
    replace: '\1 swapaccount=1"'
  when:
    - ansible_distribution=="Ubuntu"
- name: update-grub
  shell:
    cmd: update-grub
  when:
    - ansible_distribution=="Ubuntu"
- name: reboot Ubuntu system
  reboot:
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server docker]# vim tasks/main.yml
- include: install_docker_yum.yml
- include: install_docker_apt.yml
- include: set_mirror_accelerator.yml
- include: service.yml
- include: set_alias.yml
- include: set_swap.yml

[root@ansible-server docker]# cd ../../
[root@ansible-server ansible]# tree roles/docker
roles/docker
├── tasks
│   ├── install_docker_apt.yml
│   ├── install_docker_yum.yml
│   ├── main.yml
│   ├── service.yml
│   ├── set_alias.yml
│   ├── set_mirror_accelerator.yml
│   └── set_swap.yml
├── templates
│   └── daemon.json.j2
└── vars
    └── main.yml

3 directories, 9 files

9.2 docker-compose

[root@ansible-server ansible]# mkdir -p roles/docker-compose/{tasks,files}
[root@ansible-server ansible]# cd roles/docker-compose/
[root@ansible-server docker-compose]# ls
files  tasks

[root@ansible-server docker-compose]# wget https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64 -P files

[root@ansible-server docker-compose]# vim tasks/install_docker_compose.yml
- name: copy docker compose file
  copy:
    src: docker-compose-linux-x86_64
    dest: /usr/bin/docker-compose
    mode: 755

[root@ansible-server docker-compose]# vim tasks/main.yml
- include: install_docker_compose.yml

[root@ansible-server ansible]# tree roles/docker-compose/
roles/docker-compose/
├── files
│   └── docker-compose-linux-x86_64
└── tasks
    ├── install_docker_compose.yml
    └── main.yml

2 directories, 3 files

9.3 harbor

[root@ansible-server ansible]# mkdir -p roles/harbor/{tasks,files,templates,vars,meta}

[root@ansible-server ansible]# cd roles/harbor/
[root@ansible-server harbor]# ls
files  meta  tasks  templates  vars

[root@ansible-server harbor]# wget https://github.com/goharbor/harbor/releases/download/v2.5.0/harbor-offline-installer-v2.5.0.tgz -P files/

[root@ansible-server harbor]# vim templates/harbor.service.j2
[Unit]
Description=Harbor
After=docker.service systemd-networkd.service systemd-resolved.service
Requires=docker.service
Documentation=http://github.com/vmware/harbor

[Service]
Type=simple
Restart=on-failure
RestartSec=5
ExecStart=/usr/bin/docker-compose -f {
    
    {
    
     HARBOR_INSTALL_DIR }}/harbor/docker-compose.yml up
ExecStop=/usr/bin/docker-compose -f {
    
    {
    
     HARBOR_INSTALL_DIR }}/harbor/docker-compose.yml down

[Install]
WantedBy=multi-user.target

[root@ansible-server harbor]# vim vars/main.yml
HARBOR_INSTALL_DIR: /apps
HARBOR_VERSION: 2.5.0
HARBOR_ADMIN_PASSWORD: 123456

[root@ansible-server harbor]# vim tasks/harbor_files.yml
- name: create HARBOR_INSTALL_DIR directory
  file:
    path: "{
    
    { HARBOR_INSTALL_DIR }}"
    state: directory
- name: unarchive  harbor package
  unarchive:
    src: "harbor-offline-installer-v{
    
    { HARBOR_VERSION }}.tgz"
    dest: "{
    
    { HARBOR_INSTALL_DIR }}/"
    creates: "{
    
    { HARBOR_INSTALL_DIR }}/harbor"

[root@ansible-server harbor]# vim tasks/config.yml
- name: mv harbor.yml
  shell: 
    cmd: mv {
    
    {
    
     HARBOR_INSTALL_DIR }}/harbor/harbor.yml.tmpl {
    
    {
    
     HARBOR_INSTALL_DIR }}/harbor/harbor.yml
    creates: "{
    
    { HARBOR_INSTALL_DIR }}/harbor/harbor.yml"
- name: set harbor.yml file 'hostname' string line
  replace: 
    path: "{
    
    { HARBOR_INSTALL_DIR }}/harbor/harbor.yml"
    regexp: '^(hostname:) .*'
    replace: '\1 {
    
    { ansible_default_ipv4.address }}'
- name: set harbor.yml file 'harbor_admin_password' string line
  replace: 
    path: "{
    
    { HARBOR_INSTALL_DIR }}/harbor/harbor.yml"
    regexp: '^(harbor_admin_password:) .*'
    replace: '\1 {
    
    { HARBOR_ADMIN_PASSWORD }}'
- name: set harbor.yml file 'https' string line
  replace:
    path: "{
    
    { HARBOR_INSTALL_DIR }}/harbor/harbor.yml"
    regexp: '^(https:)'
    replace: '#\1'
- name: set harbor.yml file 'port' string line
  replace: 
    path: "{
    
    { HARBOR_INSTALL_DIR }}/harbor/harbor.yml"
    regexp: '  (port: 443)'
    replace: '#  \1'
- name: set harbor.yml file 'certificate' string line
  replace: 
    path: "{
    
    { HARBOR_INSTALL_DIR }}/harbor/harbor.yml"
    regexp: '  (certificate: .*)'
    replace: '#  \1'
- name: set harbor.yml file 'private_key' string line
  replace: 
    path: "{
    
    { HARBOR_INSTALL_DIR }}/harbor/harbor.yml"
    regexp: '  (private_key: .*)'
    replace: '#  \1'

[root@ansible-server harbor]# vim tasks/install_python.yml
- name: install CentOS or Rocky python
  yum:
    name: python3
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: delete lock files
  file:
    path: "{
    
    { item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes 
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu python
  apt:
    name: python3
  when:
    - ansible_distribution=="Ubuntu"

[root@ansible-server harbor]# vim tasks/install_harbor.yml
- name: install harbor
  shell:
    cmd: "{
    
    { HARBOR_INSTALL_DIR }}/harbor/install.sh"

[root@ansible-server harbor]# vim tasks/service_file.yml
- name: copy harbor.service
  template:
    src: harbor.service.j2
    dest: /lib/systemd/system/harbor.service

[root@ansible-server harbor]# vim tasks/service.yml
- name: service enable
  systemd:
    name: harbor
    state: started
    enabled: yes
    daemon_reload: yes

[root@ansible-server harbor]# vim tasks/main.yml
- include: harbor_files.yml
- include: config.yml
- include: install_python.yml
- include: install_harbor.yml
- include: service_file.yml
- include: service.yml

#这里是harbor依赖的角色,docker-binary就是docker基于二进制安装,根据情况修改
[root@ansible-server harbor]# vim meta/main.yml
dependencies:
  - role: docker
  - role: docker-compose

[root@ansible-server harbor]# cd ../../
[root@ansible-server ansible]# tree roles/harbor/
roles/harbor/
├── files
│   └── harbor-offline-installer-v2.5.0.tgz
├── meta
│   └── main.yml
├── tasks
│   ├── config.yml
│   ├── harbor_files.yml
│   ├── install_harbor.yml
│   ├── install_python.yml
│   ├── main.yml
│   ├── service_file.yml
│   └── service.yml
├── templates
│   └── harbor.service.j2
└── vars
    └── main.yml

5 directories, 11 files

[root@ansible-server ansible]# vim harbor_role.yml
---
- hosts: harbor

  roles:
    - role: harbor

[root@ansible-server ansible]# ansible-playbook harbor_role.yml

9.4 创建harbor仓库

这步一定要做,不然后面镜像下载了上传不到harbor,ansible会执行出错

在harbor01新建项目google_containers
在这里插入图片描述
在这里插入图片描述
在harbor02新建项目google_containers
在这里插入图片描述
在这里插入图片描述
在harbor02上新建目标
在这里插入图片描述
在这里插入图片描述
在harbor02上新建规则
在这里插入图片描述
在这里插入图片描述
在harbor01上新建目标
在这里插入图片描述
在这里插入图片描述
在harbor01上新建规则
在这里插入图片描述
在这里插入图片描述

10.安装docker

#只需要创建这个文件就行
[root@ansible-server ansible]# vim docker_role.yml 
---
- hosts: k8s_cluster

  roles:
    - role: docker

[root@ansible-server ansible]# ansible-playbook docker_role.yml

11.安装kubeadm

[root@ansible-server ansible]# mkdir -p roles/kubeadm/{templates,vars,tasks}
[root@ansible-server ansible]# cd roles/kubeadm/
[root@ansible-server kubeadm]# ls
files  tasks  vars

[root@ansible-server kubeadm]# vim templates/kubernetes.repo.j2
[kubernetes]
name=Kubernetes
baseurl=https://{
    
    {
    
     KUBEADM_MIRRORS }}/kubernetes/yum/repos/kubernetes-el7-$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=0
gpgkey=https://{
    
    {
    
     KUBEADM_MIRRORS }}/kubernetes/yum/doc/yum-key.gpg https://{
    
    {
    
     KUBEADM_MIRRORS }}/kubernetes/yum/doc/rpm-package-key.gpg

[root@ansible-server kubeadm]# vim templates/kubeadm-config.yaml.j2 
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: 7t2weq.bjbawausm0jaxury
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: {
    
    {
    
     ansible_default_ipv4.address }}
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master01.example.local
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  certSANs:
  - {
    
    {
    
     VIP }}
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: {
    
    {
    
     VIP }}:6443
controllerManager: {
    
    }
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: {
    
    {
    
     HARBOR_DOMAIN }}/google_containers
kind: ClusterConfiguration
kubernetesVersion: v{
    
    {
    
     KUBEADM_VERSION }}
networking:
  dnsDomain: {
    
    {
    
     domain }}
  podSubnet: {
    
    {
    
     POD_SUBNET }}
  serviceSubnet: {
    
    {
    
     SERVICE_SUBNET }}
scheduler: {
    
    }

#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址,VIP设置成自己的keepalived里的VIP(虚拟IP)地址,POD_SUBNET改成自己规划的容器网段,SERVICE_SUBNET改成自己规划的service网段地址
[root@ansible-server kubeadm]# vim vars/main.yml
KUBEADM_MIRRORS: mirrors.aliyun.com
KUBEADM_VERSION: 1.22.8
HARBOR_DOMAIN: harbor.raymonds.cc
USERNAME: admin
PASSWORD: 123456
VIP: 172.31.3.188
POD_SUBNET: 192.168.0.0/12
SERVICE_SUBNET: 10.96.0.0/12

[root@ansible-server kubeadm]# vim tasks/install_kubeadm_yum.yml
- name: set CentOS or Rocky kubernetes mirror warehouse
  template:
    src: kubernetes.repo.j2
    dest: /etc/yum.repos.d/kubernetes.repo
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: install CentOS or Rocky kubeadm for master
  yum:
    name: kubelet-{
    
    {
    
     KUBEADM_VERSION }},kubeadm-{
    
    {
    
     KUBEADM_VERSION }},kubectl-{
    
    {
    
     KUBEADM_VERSION }}
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - inventory_hostname in groups.master
- name: install CentOS or Rocky kubeadm for node
  yum:
    name: kubelet-{
    
    {
    
     KUBEADM_VERSION }},kubeadm-{
    
    {
    
     KUBEADM_VERSION }}
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - inventory_hostname in groups.node

[root@ansible-server kubeadm]# vim tasks/install_kubeadm_apt.yml
- name: delete lock files
  file:
    path: "{
    
    { item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes 
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu kubernetes depend on the package
  apt:
    name: apt-transport-https
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
- name: import Ubuntu kubernetes key
  apt_key:
    url: https://{
    
    {
    
     KUBEADM_MIRRORS }}/kubernetes/apt/doc/apt-key.gpg
  when:
    - ansible_distribution=="Ubuntu"
- name: import Ubuntu kubernetes installation source
  apt_repository:
    repo: "deb https://{
    
    { KUBEADM_MIRRORS }}/kubernetes/apt kubernetes-xenial main"
    filename: kubernetes
  when:
    - ansible_distribution=="Ubuntu"
- name: delete lock files
  file:
    path: "{
    
    { item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu kubeadm for master
  apt:
    name: kubelet={
    
    {
    
     KUBEADM_VERSION }}-00,kubeadm={
    
    {
    
     KUBEADM_VERSION }}-00,kubectl={
    
    {
    
     KUBEADM_VERSION }}-00
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
    - inventory_hostname in groups.master
- name: install Ubuntu kubeadm for node
  apt:
    name: kubelet={
    
    {
    
     KUBEADM_VERSION }}-00,kubeadm={
    
    {
    
     KUBEADM_VERSION }}-00
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
    - inventory_hostname in groups.node

[root@ansible-server kubeadm]# vim tasks/service.yml
- name: start kubelet
  systemd:
    name: kubelet
    state: started
    enabled: yes
    daemon_reload: yes

[root@ansible-server kubeadm]# vim tasks/docker_login.yml
- name: docker login
  shell:
    cmd: docker login -u {
    
    {
    
     USERNAME }} -p {
    
    {
    
     PASSWORD }} {
    
    {
    
     HARBOR_DOMAIN }}
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubeadm]# vim tasks/download_images.yml
- name: get kubeadm version
  shell:
    cmd: kubeadm config images list --kubernetes-version=v{
    
    {
    
     KUBEADM_VERSION }} | awk -F "/"  '{print $NF}'
  register: KUBEADM_IMAGES_VERSION
  when:
    - ansible_hostname=="k8s-master01"
- name: download kubeadm image
  shell: |
    {
    
    % for i in KUBEADM_IMAGES_VERSION.stdout_lines %}
      docker pull registry.aliyuncs.com/google_containers/{
    
    {
    
     i }}
      docker tag registry.aliyuncs.com/google_containers/{
    
    {
    
     i }} {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
      docker rmi registry.aliyuncs.com/google_containers/{
    
    {
    
     i }}
      docker push {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
    {
    
    % endfor %}
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubeadm]# vim tasks/copy_kubeadm_config.yml
- name: copy kubeadm_config.yml file
  template:
    src: kubeadm-config.yaml.j2
    dest: /root/kubeadm-config.yaml
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubeadm]# vim tasks/main.yml
- include: install_kubeadm_yum.yml
- include: install_kubeadm_apt.yml
- include: service.yml
- include: docker_login.yml
- include: download_images.yml
- include: copy_kubeadm_config.yml

[root@ansible-server kubeadm]# cd ../../
[root@ansible-server ansible]# tree roles/kubeadm/
roles/kubeadm/
├── tasks
│   ├── copy_kubeadm_config.yml
│   ├── docker_login.yml
│   ├── download_images.yml
│   ├── install_kubeadm_apt.yml
│   ├── install_kubeadm_yum.yml
│   ├── main.yml
│   └── service.yml
├── templates
│   ├── kubeadm-config.yaml.j2
│   └── kubernetes.repo.j2
└── vars
    └── main.yml

3 directories, 10 files

[root@ansible-server ansible]# vim kubeadm_role.yml
---
- hosts: master:node

  roles:
    - role: kubeadm

[root@ansible-server ansible]# ansible-playbook kubeadm_role.yml

12.集群初始化

[root@k8s-master01 ~]# kubeadm init --config /root/kubeadm-config.yaml  --upload-certs
[init] Using Kubernetes version: v1.22.8
[preflight] Running pre-flight checks
	[WARNING FileExisting-tc]: tc not found in system path
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.example.local] and IPs [10.96.0.1 172.31.3.101 172.31.3.188]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [172.31.3.101 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [172.31.3.101 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 15.546267 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.22" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
b3202c8538092768b83473fc065d2f58450cf284d84d380cc44c033c7bc057f8
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 7t2weq.bjbawausm0jaxury
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 172.31.3.188:6443 --token 7t2weq.bjbawausm0jaxury \
	--discovery-token-ca-cert-hash sha256:c5c3516af6157181f5d6b922b96d05461a13b4abbc8a5210f68bc3aebac729b1 \
	--control-plane --certificate-key b3202c8538092768b83473fc065d2f58450cf284d84d380cc44c033c7bc057f8

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.31.3.188:6443 --token 7t2weq.bjbawausm0jaxury \
	--discovery-token-ca-cert-hash sha256:c5c3516af6157181f5d6b922b96d05461a13b4abbc8a5210f68bc3aebac729b1 

Master01节点配置环境变量,用于访问Kubernetes集群:

[root@k8s-master01 ~]# cat >> /root/.bashrc <<EOF
export KUBECONFIG=/etc/kubernetes/admin.conf
EOF

[root@k8s-master01 ~]# source .bashrc 

#Kubectl 自动补全
#CentOS
[root@k8s-master01 ~]# yum -y install bash-completion

#Ubuntu
[root@k8s-master01 ~]# apt -y install bash-completion

[root@k8s-master01 ~]# source <(kubectl completion bash)
[root@k8s-master01 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc 
root@k8s-master01:~# exit
logout

查看节点状态:

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS     ROLES                  AGE   VERSION
k8s-master01   NotReady   control-plane,master   71s   v1.22.8

采用初始化安装方式,所有的系统组件均以容器的方式运行并且在kube-system命名空间内,此时可以查看Pod状态:

[root@k8s-master01 ~]# kubectl get pods -n kube-system -o wide
NAME                                   READY   STATUS    RESTARTS   AGE   IP             NODE           NOMINATED NODE   READINESS GATES
coredns-566c4546cf-w4ltd               0/1     Pending   0          86s   <none>         <none>         <none>           <none>
coredns-566c4546cf-wq78f               0/1     Pending   0          86s   <none>         <none>         <none>           <none>
etcd-k8s-master01                      1/1     Running   0          86s   172.31.3.101   k8s-master01   <none>           <none>
kube-apiserver-k8s-master01            1/1     Running   0          86s   172.31.3.101   k8s-master01   <none>           <none>
kube-controller-manager-k8s-master01   1/1     Running   0          86s   172.31.3.101   k8s-master01   <none>           <none>
kube-proxy-f85bf                       1/1     Running   0          86s   172.31.3.101   k8s-master01   <none>           <none>
kube-scheduler-k8s-master01            1/1     Running   0          86s   172.31.3.101   k8s-master01   <none>           <none>

13.高可用Master

添加master02和master03:

kubeadm join 172.31.3.188:6443 --token 7t2weq.bjbawausm0jaxury \
	--discovery-token-ca-cert-hash sha256:c5c3516af6157181f5d6b922b96d05461a13b4abbc8a5210f68bc3aebac729b1 \
	--control-plane --certificate-key b3202c8538092768b83473fc065d2f58450cf284d84d380cc44c033c7bc057f8

[root@k8s-master01 ~]# kubectl get nodes
NAME                         STATUS     ROLES                  AGE     VERSION
k8s-master01                 NotReady   control-plane,master   3m29s   v1.22.8
k8s-master02.example.local   NotReady   control-plane,master   58s     v1.22.8
k8s-master03.example.local   NotReady   control-plane,master   6s      v1.22.8

14.Node节点的配置

添加node01、node02、node03:

kubeadm join 172.31.3.188:6443 --token 7t2weq.bjbawausm0jaxury \
	--discovery-token-ca-cert-hash sha256:c5c3516af6157181f5d6b922b96d05461a13b4abbc8a5210f68bc3aebac729b1

[root@k8s-master01 ~]# kubectl get nodes
NAME                         STATUS     ROLES                  AGE     VERSION
k8s-master01                 NotReady   control-plane,master   4m36s   v1.22.8
k8s-master02.example.local   NotReady   control-plane,master   2m5s    v1.22.8
k8s-master03.example.local   NotReady   control-plane,master   73s     v1.22.8
k8s-node01.example.local     NotReady   <none>                 36s     v1.22.8
k8s-node02.example.local     NotReady   <none>                 24s     v1.22.8
k8s-node03.example.local     NotReady   <none>                 12s     v1.22.8

15.安装Calico

15.1 安装calico

[root@ansible-server ansible]# mkdir -p roles/calico/{tasks,vars,templates}
[root@ansible-server ansible]# cd roles/calico
[root@ansible-server calico]# ls
tasks  templates  vars

#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server calico]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc

[root@ansible-server calico]# cat templates/calico-etcd.yaml.j2
---
# Source: calico/templates/calico-etcd-secrets.yaml
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
apiVersion: v1
kind: Secret
type: Opaque
metadata:
  name: calico-etcd-secrets
  namespace: kube-system
data:
  # Populate the following with etcd TLS configuration if desired, but leave blank if
  # not using TLS for etcd.
  # The keys below should be uncommented and the values populated with the base64
  # encoded contents of each file that would be associated with the TLS data.
  # Example command for encoding a file contents: cat <file> | base64 -w 0
  # etcd-key: null
  # etcd-cert: null
  # etcd-ca: null
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
  name: calico-config
  namespace: kube-system
data:
  # Configure this with the location of your etcd cluster.
  etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"
  # If you're using TLS enabled etcd uncomment the following.
  # You must also populate the Secret below with these files.
  etcd_ca: ""   # "/calico-secrets/etcd-ca"
  etcd_cert: "" # "/calico-secrets/etcd-cert"
  etcd_key: ""  # "/calico-secrets/etcd-key"
  # Typha is disabled.
  typha_service_name: "none"
  # Configure the backend to use.
  calico_backend: "bird"

  # Configure the MTU to use for workload interfaces and tunnels.
  # By default, MTU is auto-detected, and explicitly setting this field should not be required.
  # You can override auto-detection by providing a non-zero value.
  veth_mtu: "0"

  # The CNI network configuration to install on each node. The special
  # values in this config will be automatically populated.
  cni_network_config: |-
    {
    
    
      "name": "k8s-pod-network",
      "cniVersion": "0.3.1",
      "plugins": [
        {
    
    
          "type": "calico",
          "log_level": "info",
          "log_file_path": "/var/log/calico/cni/cni.log",
          "etcd_endpoints": "__ETCD_ENDPOINTS__",
          "etcd_key_file": "__ETCD_KEY_FILE__",
          "etcd_cert_file": "__ETCD_CERT_FILE__",
          "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
          "mtu": __CNI_MTU__,
          "ipam": {
    
    
              "type": "calico-ipam"
          },
          "policy": {
    
    
              "type": "k8s"
          },
          "kubernetes": {
    
    
              "kubeconfig": "__KUBECONFIG_FILEPATH__"
          }
        },
        {
    
    
          "type": "portmap",
          "snat": true,
          "capabilities": {
    
    "portMappings": true}
        },
        {
    
    
          "type": "bandwidth",
          "capabilities": {
    
    "bandwidth": true}
        }
      ]
    }

---
# Source: calico/templates/calico-kube-controllers-rbac.yaml

# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-kube-controllers
rules:
  # Pods are monitored for changing labels.
  # The node controller monitors Kubernetes nodes.
  # Namespace and serviceaccount labels are used for policy.
  - apiGroups: [""]
    resources:
      - pods
      - nodes
      - namespaces
      - serviceaccounts
    verbs:
      - watch
      - list
      - get
  # Watch for changes to Kubernetes NetworkPolicies.
  - apiGroups: ["networking.k8s.io"]
    resources:
      - networkpolicies
    verbs:
      - watch
      - list
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-kube-controllers
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: calico-kube-controllers
subjects:
- kind: ServiceAccount
  name: calico-kube-controllers
  namespace: kube-system
---

---
# Source: calico/templates/calico-node-rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-node
rules:
  # The CNI plugin needs to get pods, nodes, and namespaces.
  - apiGroups: [""]
    resources:
      - pods
      - nodes
      - namespaces
    verbs:
      - get
  # EndpointSlices are used for Service-based network policy rule
  # enforcement.
  - apiGroups: ["discovery.k8s.io"]
    resources:
      - endpointslices
    verbs:
      - watch 
      - list
  - apiGroups: [""]
    resources:
      - endpoints
      - services
    verbs:
      # Used to discover service IPs for advertisement.
      - watch
      - list
  # Pod CIDR auto-detection on kubeadm needs access to config maps.
  - apiGroups: [""]
    resources:
      - configmaps
    verbs:
      - get
  - apiGroups: [""]
    resources:
      - nodes/status
    verbs:
      # Needed for clearing NodeNetworkUnavailable flag.
      - patch

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: calico-node
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: calico-node
subjects:
- kind: ServiceAccount
  name: calico-node
  namespace: kube-system

---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
  name: calico-node
  namespace: kube-system
  labels:
    k8s-app: calico-node
spec:
  selector:
    matchLabels:
      k8s-app: calico-node
  updateStrategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  template:
    metadata:
      labels:
        k8s-app: calico-node
    spec:
      nodeSelector:
        kubernetes.io/os: linux
      hostNetwork: true
      tolerations:
        # Make sure calico-node gets scheduled on all nodes.
        - effect: NoSchedule
          operator: Exists
        # Mark the pod as a critical add-on for rescheduling.
        - key: CriticalAddonsOnly
          operator: Exists
        - effect: NoExecute
          operator: Exists
      serviceAccountName: calico-node
      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
      # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
      terminationGracePeriodSeconds: 0
      priorityClassName: system-node-critical
      initContainers:
        # This container installs the CNI binaries
        # and CNI network config file on each node.
        - name: install-cni
          image: docker.io/calico/cni:v3.21.4
          command: ["/opt/cni/bin/install"]
          envFrom:
          - configMapRef:
              # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
              name: kubernetes-services-endpoint
              optional: true
          env:
            # Name of the CNI config file to create.
            - name: CNI_CONF_NAME
              value: "10-calico.conflist"
            # The CNI network config to install on each node.
            - name: CNI_NETWORK_CONFIG
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: cni_network_config
            # The location of the etcd cluster.
            - name: ETCD_ENDPOINTS
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_endpoints
            # CNI MTU Config variable
            - name: CNI_MTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # Prevents the container from sleeping forever.
            - name: SLEEP
              value: "false"
          volumeMounts:
            - mountPath: /host/opt/cni/bin
              name: cni-bin-dir
            - mountPath: /host/etc/cni/net.d
              name: cni-net-dir
            - mountPath: /calico-secrets
              name: etcd-certs
          securityContext:
            privileged: true
        # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
        # to communicate with Felix over the Policy Sync API.
        - name: flexvol-driver
          image: docker.io/calico/pod2daemon-flexvol:v3.21.4
          volumeMounts:
          - name: flexvol-driver-host
            mountPath: /host/driver
          securityContext:
            privileged: true
      containers:
        # Runs calico-node container on each Kubernetes node. This
        # container programs network policy and routes on each
        # host.
        - name: calico-node
          image: docker.io/calico/node:v3.21.4
          envFrom:
          - configMapRef:
              # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
              name: kubernetes-services-endpoint
              optional: true
          env:
            # The location of the etcd cluster.
            - name: ETCD_ENDPOINTS
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_endpoints
            # Location of the CA certificate for etcd.
            - name: ETCD_CA_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_ca
            # Location of the client key for etcd.
            - name: ETCD_KEY_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_key
            # Location of the client certificate for etcd.
            - name: ETCD_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_cert
            # Set noderef for node controller.
            - name: CALICO_K8S_NODE_REF
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            # Choose the backend to use.
            - name: CALICO_NETWORKING_BACKEND
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: calico_backend
            # Cluster type to identify the deployment type
            - name: CLUSTER_TYPE
              value: "k8s,bgp"
            # Auto-detect the BGP IP address.
            - name: IP
              value: "autodetect"
            # Enable IPIP
            - name: CALICO_IPV4POOL_IPIP
              value: "Always"
            # Enable or Disable VXLAN on the default IP pool.
            - name: CALICO_IPV4POOL_VXLAN
              value: "Never"
            # Set MTU for tunnel device used if ipip is enabled
            - name: FELIX_IPINIPMTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # Set MTU for the VXLAN tunnel device.
            - name: FELIX_VXLANMTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # Set MTU for the Wireguard tunnel device.
            - name: FELIX_WIREGUARDMTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # The default IPv4 pool to create on startup if none exists. Pod IPs will be
            # chosen from this range. Changing this value after installation will have
            # no effect. This should fall within `--cluster-cidr`.
            # - name: CALICO_IPV4POOL_CIDR
            #   value: "192.168.0.0/16"
            # Disable file logging so `kubectl logs` works.
            - name: CALICO_DISABLE_FILE_LOGGING
              value: "true"
            # Set Felix endpoint to host default action to ACCEPT.
            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
              value: "ACCEPT"
            # Disable IPv6 on Kubernetes.
            - name: FELIX_IPV6SUPPORT
              value: "false"
            - name: FELIX_HEALTHENABLED
              value: "true"
          securityContext:
            privileged: true
          resources:
            requests:
              cpu: 250m
          lifecycle:
            preStop:
              exec:
                command:
                - /bin/calico-node
                - -shutdown
          livenessProbe:
            exec:
              command:
              - /bin/calico-node
              - -felix-live
              - -bird-live
            periodSeconds: 10
            initialDelaySeconds: 10
            failureThreshold: 6
            timeoutSeconds: 10
          readinessProbe:
            exec:
              command:
              - /bin/calico-node
              - -felix-ready
              - -bird-ready
            periodSeconds: 10
            timeoutSeconds: 10
          volumeMounts:
            # For maintaining CNI plugin API credentials.
            - mountPath: /host/etc/cni/net.d
              name: cni-net-dir
              readOnly: false
            - mountPath: /lib/modules
              name: lib-modules
              readOnly: true
            - mountPath: /run/xtables.lock
              name: xtables-lock
              readOnly: false
            - mountPath: /var/run/calico
              name: var-run-calico
              readOnly: false
            - mountPath: /var/lib/calico
              name: var-lib-calico
              readOnly: false
            - mountPath: /calico-secrets
              name: etcd-certs
            - name: policysync
              mountPath: /var/run/nodeagent
            # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
            # parent directory.
            - name: sysfs
              mountPath: /sys/fs/
              # Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
              # If the host is known to mount that filesystem already then Bidirectional can be omitted.
              mountPropagation: Bidirectional
            - name: cni-log-dir
              mountPath: /var/log/calico/cni
              readOnly: true
      volumes:
        # Used by calico-node.
        - name: lib-modules
          hostPath:
            path: /lib/modules
        - name: var-run-calico
          hostPath:
            path: /var/run/calico
        - name: var-lib-calico
          hostPath:
            path: /var/lib/calico
        - name: xtables-lock
          hostPath:
            path: /run/xtables.lock
            type: FileOrCreate
        - name: sysfs
          hostPath:
            path: /sys/fs/
            type: DirectoryOrCreate
        # Used to install CNI.
        - name: cni-bin-dir
          hostPath:
            path: /opt/cni/bin
        - name: cni-net-dir
          hostPath:
            path: /etc/cni/net.d
        # Used to access CNI logs.
        - name: cni-log-dir
          hostPath:
            path: /var/log/calico/cni
        # Mount in the etcd TLS secrets with mode 400.
        # See https://kubernetes.io/docs/concepts/configuration/secret/
        - name: etcd-certs
          secret:
            secretName: calico-etcd-secrets
            defaultMode: 0400
        # Used to create per-pod Unix Domain Sockets
        - name: policysync
          hostPath:
            type: DirectoryOrCreate
            path: /var/run/nodeagent
        # Used to install Flex Volume Driver
        - name: flexvol-driver-host
          hostPath:
            type: DirectoryOrCreate
            path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---

apiVersion: v1
kind: ServiceAccount
metadata:
  name: calico-node
  namespace: kube-system

---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
  name: calico-kube-controllers
  namespace: kube-system
  labels:
    k8s-app: calico-kube-controllers
spec:
  # The controllers can only have a single active instance.
  replicas: 1
  selector:
    matchLabels:
      k8s-app: calico-kube-controllers
  strategy:
    type: Recreate
  template:
    metadata:
      name: calico-kube-controllers
      namespace: kube-system
      labels:
        k8s-app: calico-kube-controllers
    spec:
      nodeSelector:
        kubernetes.io/os: linux
      tolerations:
        # Mark the pod as a critical add-on for rescheduling.
        - key: CriticalAddonsOnly
          operator: Exists
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      serviceAccountName: calico-kube-controllers
      priorityClassName: system-cluster-critical
      # The controllers must run in the host network namespace so that
      # it isn't governed by policy that would prevent it from working.
      hostNetwork: true
      containers:
        - name: calico-kube-controllers
          image: docker.io/calico/kube-controllers:v3.21.4
          env:
            # The location of the etcd cluster.
            - name: ETCD_ENDPOINTS
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_endpoints
            # Location of the CA certificate for etcd.
            - name: ETCD_CA_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_ca
            # Location of the client key for etcd.
            - name: ETCD_KEY_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_key
            # Location of the client certificate for etcd.
            - name: ETCD_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_cert
            # Choose which controllers to run.
            - name: ENABLED_CONTROLLERS
              value: policy,namespace,serviceaccount,workloadendpoint,node
          volumeMounts:
            # Mount in the etcd TLS secrets.
            - mountPath: /calico-secrets
              name: etcd-certs
          livenessProbe:
            exec:
              command:
              - /usr/bin/check-status
              - -l
            periodSeconds: 10
            initialDelaySeconds: 10
            failureThreshold: 6
            timeoutSeconds: 10
          readinessProbe:
            exec:
              command:
              - /usr/bin/check-status
              - -r
            periodSeconds: 10
      volumes:
        # Mount in the etcd TLS secrets with mode 400.
        # See https://kubernetes.io/docs/concepts/configuration/secret/
        - name: etcd-certs
          secret:
            secretName: calico-etcd-secrets
            defaultMode: 0440

---

apiVersion: v1
kind: ServiceAccount
metadata:
  name: calico-kube-controllers
  namespace: kube-system

---

# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict

apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
  name: calico-kube-controllers
  namespace: kube-system
  labels:
    k8s-app: calico-kube-controllers
spec:
  maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: calico-kube-controllers

---
# Source: calico/templates/calico-typha.yaml

---
# Source: calico/templates/configure-canal.yaml

---
# Source: calico/templates/kdd-crds.yaml

#修改下面内容
[root@ansible-server calico]# grep "etcd_endpoints:.*" templates/calico-etcd.yaml.j2 
  etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"

[root@ansible-server calico]# sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "{% for i in groups.master %}https://{
    
    { hostvars[i].ansible_default_ipv4.address }}:2379{% if not loop.last %},{% endif %}{% endfor %}"#g' templates/calico-etcd.yaml.j2  

[root@ansible-server calico]# grep "etcd_endpoints:.*" templates/calico-etcd.yaml.j2
  etcd_endpoints: {
    
    % for i in groups.master %}https://{
    
    {
    
     hostvars[i].ansible_default_ipv4.address }}{
    
    % if not loop.last %},{
    
    % endif %}{
    
    % endfor %}	

[root@ansible-server calico]# vim tasks/calico_file.yml
- name: copy calico-etcd.yaml file
  template:
    src: calico-etcd.yaml.j2
    dest: /root/calico-etcd.yaml
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico]# vim tasks/config.yml
- name: get ETCD_KEY key
  shell:
    cmd: cat /etc/kubernetes/pki/etcd/server.key | base64 | tr -d '\n'
  register: ETCD_KEY
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd-key:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (etcd-key:) null'
    replace: '\1 {
    
    { ETCD_KEY.stdout }}'
  when:
    - ansible_hostname=="k8s-master01"
- name: get ETCD_CERT key
  shell:
    cmd: cat /etc/kubernetes/pki/etcd/server.crt | base64 | tr -d '\n'
  register: ETCD_CERT
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd-cert:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (etcd-cert:) null'
    replace: '\1 {
    
    { ETCD_CERT.stdout }}'
  when:
    - ansible_hostname=="k8s-master01"
- name: get ETCD_CA key
  shell:
    cmd: cat /etc/kubernetes/pki/etcd/ca.crt | base64 | tr -d '\n'
  when:
    - ansible_hostname=="k8s-master01"
  register: ETCD_CA
- name: Modify the ".*etcd-ca:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (etcd-ca:) null'
    replace: '\1 {
    
    { ETCD_CA.stdout }}'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_ca:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(etcd_ca:) ""'
    replace: '\1 "/calico-secrets/etcd-ca"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_cert:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(etcd_cert:) ""'
    replace: '\1 "/calico-secrets/etcd-cert"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_key:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(etcd_key:) ""'
    replace: '\1 "/calico-secrets/etcd-key"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*CALICO_IPV4POOL_CIDR.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (- name: CALICO_IPV4POOL_CIDR)'
    replace: '\1'
  when:
    - ansible_hostname=="k8s-master01"
- name: get POD_SUBNET
  shell:
    cmd: cat /etc/kubernetes/manifests/kube-controller-manager.yaml | grep cluster-cidr= | awk -F= '{print $NF}'
  register: POD_SUBNET
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*192.168.0.0.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '#   (value:) "192.168.0.0/16"'
    replace: '  \1 "{
    
    { POD_SUBNET.stdout }}"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the "image:" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(.*image:) docker.io/calico(/.*)'
    replace: '\1 {
    
    { HARBOR_DOMAIN }}/google_containers\2'
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico]# vim tasks/download_images.yml
- name: get calico version
  shell:
    chdir: /root
    cmd: awk -F "/"  '/image:/{print $NF}' calico-etcd.yaml
  register: CALICO_VERSION
  when:
    - ansible_hostname=="k8s-master01"
- name: download calico image
  shell: |
    {
    
    % for i in CALICO_VERSION.stdout_lines %}
      docker pull registry.cn-beijing.aliyuncs.com/raymond9/{
    
    {
    
     i }}
      docker tag registry.cn-beijing.aliyuncs.com/raymond9/{
    
    {
    
     i }} {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
      docker rmi registry.cn-beijing.aliyuncs.com/raymond9/{
    
    {
    
     i }}
      docker push {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
    {
    
    % endfor %}
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico]# vim tasks/install_calico.yml
- name: install calico
  shell:
    chdir: /root
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f calico-etcd.yaml"
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico]# vim tasks/main.yml
- include: calico_file.yml
- include: config.yml
- include: download_images.yml
- include: install_calico.yml

[root@ansible-server calico]# cd ../../
[root@ansible-server ansible]# tree roles/calico
roles/calico
├── tasks
│   ├── calico_file.yml
│   ├── config.yml
│   ├── download_images.yml
│   ├── install_calico.yml
│   └── main.yml
├── templates
│   └── calico-etcd.yaml.j2
└── vars
    └── main.yml

3 directories, 7 files

[root@ansible-server ansible]# vim calico_role.yml 
---
- hosts: master

  roles:
    - role: calico

[root@ansible-server ansible]# ansible-playbook calico_role.yml 

15.2 验证calico

[root@k8s-master01 ~]# kubectl get pod -A |grep calico
kube-system   calico-kube-controllers-7dd7f59c79-qnqpq             1/1     Running   0             89s
kube-system   calico-node-8v2wj                                    1/1     Running   0             89s
kube-system   calico-node-c7lx5                                    1/1     Running   0             89s
kube-system   calico-node-n2kqq                                    1/1     Running   0             89s
kube-system   calico-node-plbsk                                    1/1     Running   0             89s
kube-system   calico-node-pz2nw                                    1/1     Running   0             89s
kube-system   calico-node-xlpm4                                    1/1     Running   0             89s

[root@k8s-master01 ~]# kubectl get nodes 
NAME                         STATUS   ROLES                  AGE   VERSION
k8s-master01                 Ready    control-plane,master   37m   v1.22.8
k8s-master02.example.local   Ready    control-plane,master   35m   v1.22.8
k8s-master03.example.local   Ready    control-plane,master   34m   v1.22.8
k8s-node01.example.local     Ready    <none>                 33m   v1.22.8
k8s-node02.example.local     Ready    <none>                 33m   v1.22.8
k8s-node03.example.local     Ready    <none>                 33m   v1.22.8

16.安装Metrics

16.1 安装metrics

[root@ansible-server ansible]# mkdir -p roles/metrics/{files,vars,tasks}
[root@ansible-server ansible]# cd roles/metrics/
[root@ansible-server metrics]# ls
files  tasks  vars

#下面NODE变量改成自己NODE的IP地址,HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server metrics]# vim vars/main.yml
NODE:                                                                                                                                          
 - 172.31.3.108
 - 172.31.3.109
 - 172.31.3.110

HARBOR_DOMAIN: harbor.raymonds.cc

[root@ansible-server metrics]# cat files/components.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
    rbac.authorization.k8s.io/aggregate-to-view: "true"
  name: system:aggregated-metrics-reader
rules:
- apiGroups:
  - metrics.k8s.io
  resources:
  - pods
  - nodes
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
rules:
- apiGroups:
  - ""
  resources:
  - pods
  - nodes
  - nodes/stats
  - namespaces
  - configmaps
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  ports:
  - name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxUnavailable: 0
  template:
    metadata:
      labels:
        k8s-app: metrics-server
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
        image: k8s.gcr.io/metrics-server/metrics-server:v0.5.2
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /livez
            port: https
            scheme: HTTPS
          periodSeconds: 10
        name: metrics-server
        ports:
        - containerPort: 4443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /readyz
            port: https
            scheme: HTTPS
          initialDelaySeconds: 20
          periodSeconds: 10
        resources:
          requests:
            cpu: 100m
            memory: 200Mi
        securityContext:
          readOnlyRootFilesystem: true
          runAsNonRoot: true
          runAsUser: 1000
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
      nodeSelector:
        kubernetes.io/os: linux
      priorityClassName: system-cluster-critical
      serviceAccountName: metrics-server
      volumes:
      - emptyDir: {
    
    }
        name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
  labels:
    k8s-app: metrics-server
  name: v1beta1.metrics.k8s.io
spec:
  group: metrics.k8s.io
  groupPriorityMinimum: 100
  insecureSkipTLSVerify: true
  service:
    name: metrics-server
    namespace: kube-system
  version: v1beta1
  versionPriority: 100

[root@ansible-server metrics]# vim files/components.yaml 
...
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
#添加下面内容
        - --kubelet-insecure-tls
        - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
        - --requestheader-username-headers=X-Remote-User
        - --requestheader-group-headers=X-Remote-Group
        - --requestheader-extra-headers-prefix=X-Remote-Extra- 
...
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
#添加下面内容
        - name: ca-ssl
          mountPath: /etc/kubernetes/pki 
...
      volumes:
      - emptyDir: {
    
    }
        name: tmp-dir
#添加下面内容
      - name: ca-ssl
        hostPath:
          path: /etc/kubernetes/pki 
...

[root@ansible-server metrics]# vim tasks/metrics_file.yml
- name: copy components.yaml file
  copy:
    src: components.yaml
    dest: /root/components.yaml

[root@ansible-server metrics]# vim tasks/config.yml
- name: transfer front-proxy-ca.crt file from mater01 to node
  synchronize:
    src: /etc/kubernetes/pki/front-proxy-ca.crt
    dest: /etc/kubernetes/pki/front-proxy-ca.crt
    mode: pull
  delegate_to: "{
    
    { item }}"
  loop: 
    "{
    
    { NODE }}"
- name: Modify the "image:" line
  replace:
    path: /root/components.yaml
    regexp: '(.*image:) k8s.gcr.io/metrics-server(/.*)'
    replace: '\1 {
    
    { HARBOR_DOMAIN }}/google_containers\2'

[root@ansible-server metrics]# vim tasks/download_images.yml
- name: get metrics version
  shell:
    chdir: /root
    cmd: awk -F "/"  '/image:/{print $NF}' components.yaml
  register: METRICS_VERSION
- name: download metrics image
  shell: |
    {
    
    % for i in METRICS_VERSION.stdout_lines %}
      docker pull registry.aliyuncs.com/google_containers/{
    
    {
    
     i }}
      docker tag registry.aliyuncs.com/google_containers/{
    
    {
    
     i }} {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
      docker rmi registry.aliyuncs.com/google_containers/{
    
    {
    
     i }}
      docker push {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
    {
    
    % endfor %}

[root@ansible-server metrics]# vim tasks/install_metrics.yml
- name: install metrics
  shell:
    chdir: /root
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f components.yaml"

[root@ansible-server metrics]# vim tasks/main.yml 
- include: metrics_file.yml
- include: config.yml
- include: download_images.yml
- include: install_metrics.yml

[root@ansible-server metrics]# cd ../../
[root@ansible-server ansible]# tree roles/metrics/
roles/metrics/
├── files
│   └── components.yaml
├── tasks
│   ├── config.yml
│   ├── download_images.yml
│   ├── install_metrics.yml
│   ├── main.yml
│   └── metrics_file.yml
└── vars
    └── main.yml

3 directories, 7 files

[root@ansible-server ansible]# vim metrics_role.yml
---
- hosts: master01

  roles:
    - role: metrics

[root@ansible-server ansible]# ansible-playbook metrics_role.yml

16.2 验证metrics

[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep metrics
metrics-server-9787b55bd-x75nn                       1/1     Running   0          18s

[root@k8s-master01 ~]# kubectl top node
NAME                         CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master01                 159m         7%     1542Mi          40%       
k8s-master02.example.local   133m         6%     1225Mi          32%       
k8s-master03.example.local   141m         7%     1182Mi          30%       
k8s-node01.example.local     63m          3%     768Mi           20%       
k8s-node02.example.local     62m          3%     762Mi           19%       
k8s-node03.example.local     68m          3%     799Mi           20% 

17.安装dashboard

17.1 安装dashboard

[root@ansible-server ansible]# mkdir -p roles/dashboard/{tasks,vars,files,templates}
[root@ansible-server ansible]# cd roles/dashboard/
[root@ansible-server dashboard]# ls
files  tasks  templates  vars

#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server dashboard]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
NODEPORT: 30005

[root@ansible-server dashboard]# cat templates/recommended.yaml.j2
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard
  type: NodePort

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.3.1
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {
    
    }
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.6
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {
    
    }

[root@ansible-server dashboard]# vim templates/recommended.yaml.j2
...
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort #添加这行
  ports:
    - port: 443
      targetPort: 8443
      nodePort: {
    
    {
    
     NODEPORT }} #添加这行
  selector:
    k8s-app: kubernetes-dashboard
...

[root@ansible-server dashboard]# vim files/admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding 
metadata: 
  name: admin-user
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system

[root@ansible-server dashboard]# vim tasks/dashboard_file.yml
- name: copy recommended.yaml file
  template:
    src: recommended.yaml.j2
    dest: /root/recommended.yaml
- name: copy admin.yaml file
  copy:
    src: admin.yaml
    dest: /root/admin.yaml

[root@ansible-server dashboard]# vim tasks/config.yml
- name: Modify the "image:" line
  replace:
    path: /root/recommended.yaml
    regexp: '(.*image:) kubernetesui(/.*)'
    replace: '\1 {
    
    { HARBOR_DOMAIN }}/google_containers\2'

[root@ansible-server dashboard]# vim tasks/download_images.yml
- name: get dashboard version
  shell:
    chdir: /root
    cmd: awk -F "/"  '/image:/{print $NF}' recommended.yaml
  register: DASHBOARD_VERSION
- name: download dashboard image
  shell: |
    {
    
    % for i in DASHBOARD_VERSION.stdout_lines %}
      docker pull registry.aliyuncs.com/google_containers/{
    
    {
    
     i }}
      docker tag registry.aliyuncs.com/google_containers/{
    
    {
    
     i }} {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
      docker rmi registry.aliyuncs.com/google_containers/{
    
    {
    
     i }}
      docker push {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
    {
    
    % endfor %}

[root@ansible-server dashboard]# vim tasks/install_dashboard.yml
- name: install dashboard
  shell:
    chdir: /root
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f recommended.yaml -f admin.yaml"

[root@ansible-server dashboard]# vim tasks/main.yml
- include: dashboard_file.yml
- include: config.yml
- include: download_images.yml
- include: install_dashboard.yml

[root@ansible-server dashboard]# cd ../../
[root@ansible-server ansible]# tree roles/dashboard/
roles/dashboard/
├── files
│   └── admin.yaml
├── tasks
│   ├── config.yml
│   ├── dashboard_file.yml
│   ├── download_images.yml
│   ├── install_dashboard.yml
│   └── main.yml
├── templates
│   └── recommended.yaml.j2
└── vars
    └── main.yml

4 directories, 8 files

[root@ansible-server ansible]# vim dashboard_role.yml
---
- hosts: master01

  roles:
    - role: dashboard

[root@ansible-server ansible]# ansible-playbook dashboard_role.yml

17.2 登录dashboard

https://172.31.3.101:30005
在这里插入图片描述
查看token值:

[root@k8s-master01 ~]#  kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name:         admin-user-token-4cgk2
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: 8ea51eb9-5c85-47f8-afef-d087162d6168

Type:  kubernetes.io/service-account-token

Data
====
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6ImhlemE5UEhFc0w2VWZabzlUV2k5c1RaQzZZbmxzMThmZ05ldkpnbWZGeUkifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTRjZ2syIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI4ZWE1MWViOS01Yzg1LTQ3ZjgtYWZlZi1kMDg3MTYyZDYxNjgiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.YQVpRA-laaOkOPKnKcHRuOsTj7bNiu-1woJlOC-OWu6mrwOvHGnxC7ru5ugwXQxYDbJXgVX3CzQDbZ1j_6RI4SdvC2H28E2GNtUxVLfKaRMBIiQ1aWv6OmYlQMINqfjd6ZR7m9gwO0iLKkMSrwxZw19ydE0ZlymkImeboSjST0z4nXZvSsRcuZjR52mOLru75E3bYCUiPWAypRRI3z0ZHl_JZ9Lurq3jWlxD_ooYQ17_qVcK_ymc3FxkiGQ9NcMPo3WdQEa-YVOQa7K1_SWMkRBYnOEB-NNvi9SQerWmAglndd9DsBGB3n63BlI05gtfUiH0lUBuj7FIyP7tYYMPdA
ca.crt:     1066 bytes
namespace:  11 bytes

在这里插入图片描述

18.一些必须的配置更改

将Kube-proxy改为ipvs模式,因为在初始化集群的时候注释了ipvs配置,所以需要自行修改一下:

在master01节点执行

[root@k8s-master01 ~]# curl 127.0.0.1:10249/proxyMode
iptables

[root@k8s-master01 ~]# kubectl edit cm kube-proxy -n kube-system
    mode: "ipvs"

更新Kube-Proxy的Pod:

[root@k8s-master01 ~]# kubectl patch daemonset kube-proxy -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"date\":\"`date +'%s'`\"}}}}}" -n kube-system
daemonset.apps/kube-proxy patched

验证Kube-Proxy模式

[root@k8s-master01 ~]# curl 127.0.0.1:10249/proxyMode
ipvs

[root@k8s-master01 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.17.0.1:30005 rr
  -> 192.169.111.130:8443         Masq    1      0          0         
TCP  172.31.3.101:30005 rr
  -> 192.169.111.130:8443         Masq    1      0          0         
TCP  192.162.55.64:30005 rr
  -> 192.169.111.130:8443         Masq    1      0          0         
TCP  10.96.0.1:443 rr
  -> 172.31.3.101:6443            Masq    1      0          0         
  -> 172.31.3.102:6443            Masq    1      0          0         
  -> 172.31.3.103:6443            Masq    1      0          0         
TCP  10.96.0.10:53 rr
  -> 192.167.195.129:53           Masq    1      0          0         
  -> 192.169.111.129:53           Masq    1      0          0         
TCP  10.96.0.10:9153 rr
  -> 192.167.195.129:9153         Masq    1      0          0         
  -> 192.169.111.129:9153         Masq    1      0          0         
TCP  10.99.1.2:8000 rr
  -> 192.167.195.130:8000         Masq    1      0          0         
TCP  10.101.227.108:443 rr
  -> 192.169.111.130:8443         Masq    1      0          0         
TCP  10.109.223.212:443 rr
  -> 192.170.21.193:4443          Masq    1      0          0         
TCP  127.0.0.1:30005 rr
  -> 192.169.111.130:8443         Masq    1      0          0         
UDP  10.96.0.10:53 rr
  -> 192.167.195.129:53           Masq    1      0          0         
  -> 192.169.111.129:53           Masq    1      0          0 

猜你喜欢

转载自blog.csdn.net/qq_25599925/article/details/123387713