IP |
HOSTNAME |
ROLE |
192.168.72.39 |
k8s-01 |
master&node |
192.168.72.40 |
k8s-02 |
master&node |
192.168.72.41 |
k8s-03 |
master&node |
192.168.72.42 |
k8s-04 |
node |
192.168.72.43 |
k8s-05 |
node |
SERVER |
VERSION |
Kubernetes |
1.19.7 |
Etcd |
3.4.12 |
Docker |
19.03.9 |
Flannel |
0.12.0 |
Linux:~
NAME="SLES"
VERSION="12-SP3"
VERSION_ID="12.3"
PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"
ID="sles"
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:suse:sles:12:sp3"
Linux:~
4.4.73-5-default
- 注:
suse 12 sp3
默认的内核是4.4.73
的,如内核版本小于4.x,则需要升级内核,因为Docker overlay2
需要使用kernel 4.x
版本
k8s-master
的配置不能小于2c2g
,k8s-node 可以给1c1g,集群为奇数,所以不能少于3个master,master和node节点可以复用,8G内存玩起来会很憋屈,玩过的都知道(哭唧唧~~~)
- 本次部署用到的安装包,都已经上传至百度云了(合计379MB),如果有担心不安全,可以在github上下载,就是会很慢
- 如果没有百度会员的话,还是直接github下载吧,利用迅雷,会快一点
- 链接:https://pan.baidu.com/s/1r5v1czYb1Bk2t8mDbWB4Bw
- 提取码:rwuh
0、环境准备
0.0、修改主机名
Linux:~
0.1、添加hosts解析
k8s-01:~
192.168.72.39 k8s-01
192.168.72.40 k8s-02
192.168.72.41 k8s-03
192.168.72.42 k8s-04
192.168.72.43 k8s-05
EOF
0.2、配置ssh免密
k8s-01:~
expect-5.45-18.56.x86_64
"如果没有expect,则执行如下操作(注:需要网络或者本地源)"
k8s-01:~
ssh-keygen -t rsa -P "" -f /root/.ssh/id_rsa -q
for host in k8s-01 k8s-02 k8s-03 k8s-04 k8s-05
do
expect -c "
spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@${host}
expect {
\"*yes/no*\" {send \"yes\r\"; exp_continue}
\"*Password*\" {send \"123.com\r\"; exp_continue}
\"*Password*\" {send \"123.com\r\";}
}"
done
- 注:我本机的密码是
123.com
,注意修改成自己本机的密码
- 后续的操作,最后有脚本,除了
0.10、配置脚本参数文件
需要自己创建,切记,需要先完成免密
和hosts文件
创建
0.3、发送hosts解析文件到其他节点,并修改hostname
for host in k8s-01 k8s-02 k8s-03 k8s-04 k8s-05
do
printf "\e[1;34m${host}\e[0m\n"
scp /etc/hosts ${host}:/etc/hosts
ssh root@${host} "hostnamectl set-hostname --static ${host}"
done
0.4、更新PATH变量
"所有的k8s安装相关的文件,都存放在/opt/k8s目录下"
k8s-01:~
k8s-01:~
k8s-01:~
/opt/k8s/bin:/sbin:/usr/sbin:/usr/local/sbin:/root/bin:/usr/local/bin:/usr/bin:/bin:/usr/games
0.5、安装依赖包
k8s-01:~
0.6、关闭防火墙以及swap分区
k8s-01:~
k8s-01:~
k8s-01:~
"切记,所有节点都要关闭swap分区,否则kubelet会启动失败"
k8s-01:~
k8s-01:~
- 注:suse默认安装的时候是没有开启
selinux
的,如果有开启,记得关闭
一下
0.7、开启内核模块
k8s-01:~
modprobe -- ip_vs_rr && \
modprobe -- ip_vs_wrr && \
modprobe -- ip_vs_sh && \
modprobe -- nf_conntrack_ipv4 && \
modprobe -- br_netfilter
k8s-01:~
0.8、内核优化
k8s-01:~
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
k8s-01:~
vm.swappiness=0
禁止使用 swap
空间,只有当系统 OOM
时才允许使用它
vm.overcommit_memory=1
不检查物理内存是否够用
vm.panic_on_oom=0
开启 OOM
net.ipv4.tcp_tw_recycle=0
关闭tcp_tw_recycle
,否则和NAT冲突,会导致服务不通
net.ipv6.conf.all.disable_ipv6=1
关闭 IPV6
,防止触发Docker BUG
0.9、创建安装所需目录
"每个节点都需要创建"
for host in k8s-01 k8s-02 k8s-03 k8s-04 k8s-05
do
printf "\e[1;34m${host}\e[0m\n"
ssh root@${host} "mkdir -p /opt/k8s/{bin,packages,ssl,conf,server} /etc/{kubernetes,etcd}/cert"
done
"最终的目录结构"
k8s-01:~
/opt/k8s/
├── bin
├── conf
├── packages
├── server
└── ssl
5 directories, 0 files
k8s-01:~
/etc/kubernetes/
└── cert
1 directory, 0 files
k8s-01:~
/etc/etcd/
└── cert
1 directory, 0 files
0.10、配置脚本参数文件
- 后续的部署,将直接使用变量进行代替,以减少出错的概率,相关的信息,请修改成自己的环境信息,并分发到所有节点的
/opt/k8s/bin
目录下,文件名称为 k8s-env.sh
(可自行定义文件名称,后续的部署记得修改
即可)
export NODE_IPS=( 192.168.72.39 192.168.72.40 192.168.72.41 192.168.72.42 192.168.72.43 )
export NODE_NAMES=( k8s-01 k8s-02 k8s-03 k8s-04 k8s-05 )
export MASTER_IPS=( 192.168.72.39 192.168.72.40 192.168.72.41 )
export MASTER_NAMES=( k8s-01 k8s-02 k8s-03 )
export ETCD_ENDPOINTS="https://192.168.72.39:2379,https://192.168.72.40:2379,https://192.168.72.41:2379"
export ETCD_NODES="k8s-01=https://192.168.72.39:2380,k8s-02=https://192.168.72.40:2380,k8s-03=https://192.168.72.41:2380"
export ETCD_IPS=( 192.168.72.39 192.168.72.40 192.168.72.41 )
export IFACE="eth0"
export ETCD_DATA_DIR="/opt/k8s/server/etcd/data"
export ETCD_WAL_DIR="/opt/k8s/server/etcd/wal"
export KUBE_APISERVER="https://192.168.72.39:8443"
export K8S_DIR="/opt/k8s/server/k8s"
SERVICE_CIDR="10.254.0.0/16"
CLUSTER_CIDR="172.30.0.0/16"
export NODE_PORT_RANGE="30000-32767"
export FLANNEL_ETCD_PREFIX="/kubernetes/network"
export CLUSTER_KUBERNETES_SVC_IP="10.254.0.1"
export CLUSTER_DNS_SVC_IP="10.254.0.2"
export CLUSTER_DNS_DOMAIN="cluster.local"
export PATH=$PATH:/opt/k8s/bin
0.11、环境配置脚本
cat > /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
cat >> /etc/rc.d/rc.local <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
modprobe -- br_netfilter
EOF
for host in k8s-01 k8s-02 k8s-03 k8s-04 k8s-05
do
printf "\e[1;34m${host}\e[0m\n"
scp /etc/hosts ${host}:/etc/hosts
scp /etc/sysctl.d/kubernetes.conf ${host}:/etc/sysctl.d/kubernetes.conf
scp /etc/rc.d/rc.local ${host}:/etc/rc.d/rc.local
ssh root@${host} "hostnamectl set-hostname --static ${host}"
ssh root@${host} "zypper in -y ntp ipset iptables curl sysstat wget"
ssh root@${host} "swapoff -a"
ssh root@${host} "sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab"
ssh root@${host} "modprobe -- ip_vs && \
modprobe -- ip_vs_rr && \
modprobe -- ip_vs_wrr && \
modprobe -- ip_vs_sh && \
modprobe -- nf_conntrack_ipv4 && \
modprobe -- br_netfilter"
ssh root@${host} "chmod +x /etc/rc.d/rc.local"
ssh root@${host} "sysctl -p /etc/sysctl.d/kubernetes.conf"
ssh root@${host} "systemctl disable SuSEfirewall2.service --now"
ssh root@${host} "iptables -F && iptables -X && \
iptables -F -t nat && iptables -X -t nat && \
iptables -P FORWARD ACCEPT"
ssh root@${host} "echo 'PATH=$PATH:/opt/k8s/bin' >> /etc/profile"
ssh root@${host} "source /etc/profile"
ssh root@${host} "mkdir -p /opt/k8s/{bin,packages,ssl,conf,server} /etc/{kubernetes,etcd}/cert"
done