Kubernetes v1.14.0 之 kubelet部署

kubelet 准备

1、服务器配置

对外ip 内网容器ip cpu 内存 硬盘 hostname
192.168.4.1 172.172.2.1 8 16 200 k8s-vip-01
192.168.4.2 172.172.2.2 8 16 200 k8s-vip-02
192.168.4.3 172.172.2.3 64 256 4T k8s-node-01
192.168.4.4 172.172.2.4 64 256 4T k8s-node-02
192.168.4.5 172.172.2.5 64 256 4T k8s-node-03
192.168.31.1 172.172.31.1 8 16 200 k8s-ingress-01
192.168.32.2 172.172.31.2 8 16 200 k8s-ingress-02

2、kubelet 二进制准备

cd /apps/work/k8s/node
wget https://storage.googleapis.com/kubernetes-release/release/v1.14.0/kubernetes-node-linux-amd64.tar.gz
tar -xvf kubernetes-node-linux-amd64.tar.gz 
cd kubernetes/node
mkdir conf kubelet-plugins  log  ssl

3、生成bootstrap Token

cd /apps/work/k8s/node/kubernetes
Bootstrap Token 生成
echo "$(head -c 6 /dev/urandom | md5sum | head -c 6)"."$(head -c 16 /dev/urandom | md5sum | head -c 16)"
8a7988.f77fde53170b9d91
### 创建 Bootstrap Token Secret
vi bootstrap.secret.yaml
apiVersion: v1
kind: Secret
metadata:
  # Name MUST be of form "bootstrap-token-<token id>"
  name: bootstrap-token-8a7988
  namespace: kube-system

# Type MUST be 'bootstrap.kubernetes.io/token'
type: bootstrap.kubernetes.io/token
stringData:
  # Human readable description. Optional.
  description: "The default bootstrap token generated by 'kubelet '."

  # Token ID and secret. Required.
  token-id: 8a7988
  token-secret: f77fde53170b9d91

  # Expiration. Optional. #过期时间可以设置不过期999年
  expiration: 2019-09-10T00:00:11Z

  # Allowed usages.
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"

  # Extra groups to authenticate the token as. Must start with "system:bootstrappers:"
  auth-extra-groups: system:bootstrappers:worker,system:bootstrappers:ingress

### 创建k8s资源
kubectl create -f bootstrap.secret.yaml
### 创建bootstrap.clusterrole.yaml
vi bootstrap.clusterrole.yaml
# A ClusterRole which instructs the CSR approver to approve a node requesting a
# serving cert matching its client cert.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver
rules:
- apiGroups: ["certificates.k8s.io"]
  resources: ["certificatesigningrequests/selfnodeserver"]
  verbs: ["create"]

kubectl create -f bootstrap.clusterrole.yaml
### 创建 apiserver-to-kubelet.yaml
vi apiserver-to-kubelet.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kubernetes-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kubernetes
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kubernetes-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kubernetes
kubectl create -f apiserver-to-kubelet.yaml
### 查看创建的token
kubeadm token list
# 允许 system:bootstrappers 组用户创建 CSR 请求
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers
# 自动批准 system:bootstrappers 组用户 TLS bootstrapping 首次申请证书的 CSR 请求
kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --group=system:bootstrappers
# 自动批准 system:nodes 组用户更新 kubelet 自身与 apiserver 通讯证书的 CSR 请求
kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes

# 自动批准 system:nodes 组用户更新 kubelet 10250 api 端口证书的 CSR 请求
kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes

4、创建bootstrap.kubeconfig

cd /apps/work/k8s/node/kubernetes/node/conf
# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  --embed-certs=true \
  --server=https://api.k8s.niuke.local:6443 \
  --kubeconfig=bootstrap.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials system:bootstrap:8a7988 \
  --token=8a7988.f77fde53170b9d91 \
  --kubeconfig=bootstrap.kubeconfig
# 设置上下文参数
kubectl config set-context default \
  --cluster=kubernetes \
  --user=system:bootstrap:8a7988 \
  --kubeconfig=bootstrap.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

5、kubelet 特殊参数说明

rotate-server-certificates=true  需要手动批准签发证书 kubectl get csr |grep system:node | grep Pending| while read name number; do     kubectl  certificate approve  $name ; done 
 --node-labels=node-role.kubernetes.io/k8s-vip=true  根据集群不同用处修改

6、创建kubelet配置 例:其它节点参考次配置

cd /apps/work/k8s/node/kubernetes/node/conf
KUBELET_OPTS="--bootstrap-kubeconfig=/apps/kubernetes/conf/bootstrap.kubeconfig \
              --fail-swap-on=false \
              --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/apps/cni/bin \
              --kubeconfig=/apps/kubernetes/conf/kubelet.kubeconfig \
              --address=172.172.2.1 \
              --node-ip=172.172.2.1 \
              --hostname-override=k8s-vip-01 \
              --cluster-dns=10.64.0.2 \
              --cluster-domain=niuke.local \
              --authorization-mode=Webhook \
              --authentication-token-webhook=true \
              --client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
              --rotate-certificates=true \
              --rotate-server-certificates=true \
              --cgroup-driver=cgroupfs \
              --allow-privileged=true \
              --healthz-port=10248 \
              --healthz-bind-address=172.172.2.1 \
              --cert-dir=/apps/kubernetes/ssl \
              --feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \
              --node-labels=node-role.kubernetes.io/k8s-vip=true \
              --serialize-image-pulls=false \
              --enforce-node-allocatable=pods,kube-reserved,system-reserved \
              --pod-manifest-path=/apps/work/kubernetes/manifests \
              --runtime-cgroups=/systemd/system.slice/kubelet.service \
              --kube-reserved-cgroup=/systemd/system.slice/kubelet.service \
              --system-reserved-cgroup=/systemd/system.slice \
              --root-dir=/apps/work/kubernetes/kubelet \
              --log-dir=/apps/kubernetes/log \
              --alsologtostderr=true \
              --logtostderr=false \
              --anonymous-auth=true \
              --image-gc-high-threshold=70 \
              --image-gc-low-threshold=50 \
              --kube-reserved=cpu=500m,memory=512Mi,ephemeral-storage=1Gi \
              --system-reserved=cpu=1000m,memory=1024Mi,ephemeral-storage=1Gi \
              --eviction-hard=memory.available<500Mi,nodefs.available<10% \
              --sync-frequency=30s \
              --resolv-conf=/etc/resolv.conf \
              --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 \
              --v=2 \
              --image-pull-progress-deadline=30 \
              --event-burst=30 \
              --event-qps=15 \
              --kube-api-burst=30 \
              --kube-api-qps=15 \
              --max-pods=200 \
              --pods-per-core=10 \
              --read-only-port=0 \
              --volume-plugin-dir=/apps/kubernetes/kubelet-plugins/volume"

7、创建kubelet.service

cd /apps/work/k8s/node/kubernetes/
vi kubelet.service 
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/apps/work/kubernetes
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity

EnvironmentFile=-/apps/kubernetes/conf/kubelet
ExecStart=/apps/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
RestartSec=5
KillMode=process
[Install]
WantedBy=multi-user.target

8、cni 插件准备

mkdir /apps/work/k8s/cni
cd /apps/work/k8s/cni
wget https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz
tar -xvf cni-plugins-amd64-v0.7.1.tgz
rm -rf cni-plugins-amd64-v0.7.1.tgz
mkdir bin
mv * bin
### 创建10-kuberouter.conf 模板
mkdir -p cni/net.d
cd cni/net.d
vi 10-kuberouter.conf
{
  "name":"kubernetes",
  "type":"bridge",
  "bridge":"kube-bridge",
  "isDefaultGateway":true,
  "ipam": {
    "type":"host-local"
  }
}

9、lxcfs 在安装准备那篇文章里面已经编译生成了

这里只做文件分发

10、安装依赖 node 节点执行

yum install -y  epel-release
yum install -y   yum-utils  ipvsadm  telnet  wget  net-tools  conntrack  ipset  jq  iptables  curl  sysstat  libseccomp  socat  nfs-utils  fuse  fuse-devel  ceph-common
ansible -i host node ingress vip  -m shell -a "yum install -y  epel-release"
ansible -i host node ingress vip  -m shell -a "yum install -y   yum-utils  ipvsadm  telnet  wget  net-tools  conntrack  ipset  jq  iptables  curl  sysstat  libseccomp  socat  nfs-utils  fuse  fuse-devel  ceph-common"

11、cp k8s-ca.pem

/apps/work/k8s/node/kubernetes/node/ssl
mkdir k8s
cp -pdr /apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem ./k8s/

12、 分发lxcfs 并 启动

cd /apps/work/k8s/binlxfs
ansible -i /apps/work/k8s/host node ingress vip -m copy -a "src=lxcfs  dest=/usr/local/bin/lxcfs owner=root group=root mode=755"
ansible -i /apps/work/k8s/host node ingress vip -m copy -a "src=lxcfs.service dest=/usr/lib/systemd/system/lxcfs.service" 
ansible -i /apps/work/k8s/host node ingress vip -m copy -a "src=lib dest=/usr/local/" 
ansible -i /apps/work/k8s/host node ingress vip -m shell -a "mkdir -p /var/lib/lxcfs/"
ansible -i /apps/work/k8s/host node ingress vip -m shell -a "systemctl daemon-reload && systemctl start lxcfs && systemctl enable lxcfs"

13、安装docker node 节点安装

使用阿里源
cat > /etc/yum.repos.d/docker-ce.repo << EOF
[docker-ce-stable]
name=Docker CE Stable - \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-stable-debuginfo]
name=Docker CE Stable - Debuginfo \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-stable-source]
name=Docker CE Stable - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-edge]
name=Docker CE Edge - \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/edge
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-edge-debuginfo]
name=Docker CE Edge - Debuginfo \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/edge
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-edge-source]
name=Docker CE Edge - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/edge
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test]
name=Docker CE Test - \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test-debuginfo]
name=Docker CE Test - Debuginfo \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test-source]
name=Docker CE Test - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly]
name=Docker CE Nightly - \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly-debuginfo]
name=Docker CE Nightly - Debuginfo \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly-source]
name=Docker CE Nightly - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
EOF

### 安装docker 依赖
yum install -y    python-pip python-devel yum-utils device-mapper-persistent-data lvm2   
## 安装docker
yum install -y docker-ce
### 修改/lib/systemd/system/docker.service
ExecStart=/usr/bin/dockerd -H fd:// --graph /apps/docker -H unix:///var/run/docker.sock  --max-concurrent-downloads=20
systemctl daemon-reload
systemctl restart docker
systemctl enable docker

14、 分发 cni 到node 节点

cd /apps/work/k8s/cni
创建远程cni 目录
    ansible -i /apps/work/k8s/host node ingress vip -m shell -a "mkdir -p /apps/cni"
    分发cni bin文件
    ansible -i /apps/work/k8s/host node ingress vip -m copy -a "src=bin dest=/apps/cni/  owner=root group=root mode=755"
    分发启动模板文件
    ansible-i /apps/work/k8s/host node ingress vip -m copy -a "src=cni dest=/etc/"

15、分发kubelet 到node节点

cd /apps/work/k8s/node/kubernetes
ansible -i /apps/work/k8s/host node ingress vip -m shell -a "mkdir -p /apps/kubernetes/kubelet-plugins/volume"
ansible -i /apps/work/k8s/host node ingress vip -m copy -a "src=node/ dest=/apps/kubernetes/"
ansible -i /apps/work/k8s/host node ingress vip -m shell -a "mkdir -p /apps/work/kubernetes/{kubelet,manifests}"
ansible -i /apps/work/k8s/host node ingress vip -m shell -a "chmod u+x /apps/kubernetes/bin/*"
ansible -i /apps/work/k8s/host node ingress vip -m copy -a "src=kubelet.service dest=/usr/lib/systemd/system/"
说明:node /apps/kubernetes/conf/kubelet  每个节点ip不一样请修改或者使用参数分发

16、启动 kubelet

ansible -i /apps/work/k8s/host node ingress vip -m shell -a "systemctl daemon-reload"
ansible -i /apps/work/k8s/host node ingress vip -m shell -a " systemctl enable kubelet"
ansible -i /apps/work/k8s/host node ingress vip -m shell -a " systemctl start kubelet"
ansible -i /apps/work/k8s/host node ingress vip -m shell -a " systemctl status kubelet"

17、查看节点是否正常 手动签发kubelet 服务器证书

kubectl get csr
由于kubectl 配置启用 rotate-server-certificates=true
kubectl get csr |grep system:node | grep Pending| while read name number; do     kubectl  certificate approve  $name ; done
kubectl get  node  查看节点状态
[root@jenkins tasks]# kubectl get  node
NAME           STATUS  ROLES          AGE   VERSION
k8s-vip-01       Ready   k8s-vip            26d    v1.14.0   
k8s-vip-02       Ready   k8s-vip            26d    v1.14.0     
k8s-node-01    Ready   k8s-node        26d    v1.14.0
k8s-node-02    Ready   k8s-node        26d    v1.14.0
k8s-node-03    Ready   k8s-node        26d    v1.14.0
k8s-ingress-01 Ready   k8s-ingress    26d    v1.14.0
k8s-ingress-02 Ready   k8s-ingress    26d    v1.14.0
### 查看节点签发证书
cd /apps/kubernetes/ssl
ll
[root@k8s-vip-01 ssl]# ll
total 12
drwxr-xr-x 2 k8s  root 4096 Apr 23 09:22 k8s
-rw------- 1 root root 1273 May  5 14:25 kubelet-client-2019-05-05-14-25-10.pem
lrwxrwxrwx 1 root root   59 May  5 14:25 kubelet-client-current.pem -> /apps/kubernetes/ssl/kubelet-client-2019-05-05-14-25-10.pem
-rw------- 1 root root 1309 May  5 15:06 kubelet-server-2019-05-05-15-06-57.pem
lrwxrwxrwx 1 root root   59 May  5 15:06 kubelet-server-current.pem -> /apps/kubernetes/ssl/kubelet-server-2019-05-05-15-06-57.pem

18、node节点playbook

.
├── cni
│   ├── defaults
│   ├── files
│   │   ├── 10-kuberouter.conf
│   │   └── bin
│   │       ├── bridge
│   │       ├── dhcp
│   │       ├── flannel
│   │       ├── host-device
│   │       ├── host-local
│   │       ├── ipvlan
│   │       ├── loopback
│   │       ├── macvlan
│   │       ├── portmap
│   │       ├── ptp
│   │       ├── sample
│   │       ├── tuning
│   │       └── vlan
│   ├── handlers
│   ├── meta
│   ├── tasks
│   │   └── main.yml
│   ├── templates
│   └── vars
├── docker_client
│   ├── defaults
│   ├── files
│   │   ├── docker-compose
│   │   ├── docker-enter
│   │   └── docker-enter.old
│   ├── handlers
│   ├── meta
│   ├── tasks
│   │   └── main.yml
│   ├── templates
│   │   └── docker.repo
│   └── vars
├── kubelet
│   ├── defaults
│   ├── files
│   │   ├── bin
│   │   │   ├── kubeadm
│   │   │   ├── kubectl
│   │   │   ├── kubelet
│   │   │   └── kube-proxy
│   │   └── ssl
│   │       └── k8s
│   │           └── k8s-ca.pem
│   ├── handlers
│   ├── meta
│   ├── tasks
│   │   └── main.yml
│   ├── templates
│   │   ├── conf
│   │   │   ├── bootstrap.kubeconfig
│   │   │   └── kubelet
│   │   └── kubelet.service
│   └── vars
├── lxcfs
│   ├── defaults
│   ├── files
│   │   ├── lib
│   │   │   └── lxcfs
│   │   │       ├── liblxcfs.la
│   │   │       └── liblxcfs.so
│   │   ├── lxcfs
│   │   └── lxcfs.service
│   ├── handlers
│   ├── meta
│   ├── tasks
│   │   └── main.yml
│   ├── templates
│   └── vars
└── rpm
    ├── defaults
    ├── files
    ├── handlers
    ├── meta
    ├── tasks
    │   └── main.yml
    ├── templates
    └── vars

18.1、rpm

- name: Yum Install
  yum: name="{{ item }}" state=latest
  with_items:
      - yum-plugin-fastestmirror
      - epel-release
  become: yes
  become_method: su
- name: rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
  raw: rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
- name: rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
  raw: rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
- name: yum -y --enablerepo=elrepo-kernel install kernel-ml
  shell: yum -y --enablerepo=elrepo-kernel install kernel-ml
- name: grub2-set-default 0
  shell: grub2-set-default 0
- name: grub2-mkconfig -o /boot/grub2/grub.cfg
  shell: grub2-mkconfig -o /boot/grub2/grub.cfg
- name: Yum Install
  yum: name="{{ item }}" state=latest
  with_items:
      - yum-utils
      - ipvsadm
      - telnet
      - wget
      - net-tools
      - conntrack
      - ipset
      - jq
      - iptables
      - curl
      - sysstat
      - libseccomp
      - socat
      - nfs-utils
      - fuse
      - fuse-devel
      - ceph-common
  become: yes
  become_method: su

18.2、lxcfs

- name: copy /usr/local/lib/lxcfs
  copy: src=lib dest=/usr/local/
- name: up lxcfs
  copy: src=lxcfs dest=/usr/local/bin/lxcfs owner=root group=root mode=755
- name: up lxcfs.service
  copy: src=lxcfs.service dest=/usr/lib/systemd/system/lxcfs.service
- name: create /var/lib/lxcfs
  shell: mkdir -p /var/lib/lxcfs
- name: systemctl daemon-reload
  shell: systemctl daemon-reload
- name: systemctl enable lxcfs
  shell: systemctl enable lxcfs
- name: systemctl start lxcfs 
  shell: systemctl start lxcfs

18.3、cni

- name: create cni
  shell: mkdir -p {{ k8s_path }}/cni
- name: copy to cni
  copy: src=bin dest={{ k8s_path }}/cni/ owner=root group=root mode=755
- name: create /etc/cni/net.d
  shell: mkdir -p /etc/cni/net.d
- name: copy 10-kuberouter.conf
  copy: src=10-kuberouter.conf dest=/etc/cni/net.d 

18.4 docker

- name: yum epel-release
yum: name=epel-release state=present
- name: yum python-pip
yum: name={{ item }} state=present
with_items: 
- python-pip
- python-devel
- yum-utils
- device-mapper-persistent-data
- lvm2     
- pip: name={{ item }}
with_items:
- docker-py
- stat: path=/usr/bin/docker
register: docker_path_register
- name: yum old docker
yum: name=docker* state=removed
when: docker_path_register.stat.exists == True
- name: cp  docker.repo client
template: src=docker.repo dest=/etc/yum.repos.d/docker.repo
- name: yum install docker
yum: name=docker-ce state=present
- lineinfile: 
dest: /lib/systemd/system/docker.service
regexp: '^ExecStart='
line: 'ExecStart=/usr/bin/dockerd -H fd:// --graph {{ graph }} -H unix:///var/run/docker.sock  --max-concurrent-downloads=20'
- name: systemctl daemon-reload
shell: systemctl daemon-reload
- name: enabled service docker.service
service: name=docker.service enabled=yes
- name: start  service docker.service
service: name=docker  state=started
- name: cp docker-compose
copy: src=docker-compose  dest=/usr/bin/docker-compose owner=root group=root mode=755
- name: start  service docker.service
service: name=docker  state=restarted

18.5 kubelet

- name: create {{ k8s_path }}/kubernetes/{log,kubelet-plugins,conf}
  shell: mkdir -p {{ k8s_path }}/kubernetes/{log,kubelet-plugins,conf} && mkdir -p {{ k8s_path }}/work/kubernetes/manifests 
- name: copy kubelet to {{ k8s_path }}/kubernetes
  copy: src=bin dest={{ k8s_path }}/kubernetes/ owner=root group=root mode=755
- name: copy kubelet ssl
  copy: src=ssl dest={{ k8s_path }}/kubernetes/
- name: copy to kubelet config
  template: src=conf/{{ item }} dest={{ k8s_path }}/kubernetes/conf
  with_items:
      - kubelet
      - bootstrap.kubeconfig
- name:  copy to kubelet service
  template: src={{ item }} dest=/usr/lib/systemd/system/
  with_items:
      - kubelet.service
- name: systemctl daemon-reload 
  shell: systemctl daemon-reload
- name: systemctl enable kubelet
  shell: systemctl enable kubelet &&  systemctl start kubelet

    ###### kubelet.service
    [Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
EnvironmentFile=-{{ k8s_path }}/kubernetes/conf/kubelet
ExecStart={{ k8s_path }}/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
####### kubelet
KUBELET_OPTS="--bootstrap-kubeconfig={{ k8s_path }}/kubernetes/conf/bootstrap.kubeconfig \
              --fail-swap-on=false \
              --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir={{ k8s_path }}/cni/bin \
              --kubeconfig={{ k8s_path }}/kubernetes/conf/kubelet.kubeconfig \
              --address={{ ansible_eth1.ipv4.address }} \
              --node-ip={{ ansible_eth1.ipv4.address }} \
              --hostname-override={{ ansible_hostname }} \
              --cluster-dns={{ k8s_dns }} \
              --cluster-domain={{ k8s_domain }} \
              --authorization-mode=Webhook \
              --authentication-token-webhook=true \
              --client-ca-file={{ k8s_path }}/kubernetes/ssl/k8s/k8s-ca.pem \
              --rotate-certificates=true \
              --rotate-server-certificates=true \
              --cgroup-driver=cgroupfs \
              --allow-privileged=true \
              --healthz-port=10248 \
              --healthz-bind-address={{ ansible_eth1.ipv4.address }} \
              --cert-dir={{ k8s_path }}/kubernetes/ssl \
              --feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \
              --node-labels=node-role.kubernetes.io/{{ k8s_node }}=true \
              --serialize-image-pulls=false \
              --enforce-node-allocatable=pods,kube-reserved,system-reserved \
              --pod-manifest-path={{ k8s_path }}/work/kubernetes/manifests \
              --runtime-cgroups=/systemd/system.slice/kubelet.service \
              --kube-reserved-cgroup=/systemd/system.slice/kubelet.service \
              --system-reserved-cgroup=/systemd/system.slice \
              --root-dir={{ k8s_path }}/work/kubernetes/kubelet \
              --log-dir={{ k8s_path }}/kubernetes/log \
              --alsologtostderr=true \
              --logtostderr=false \
              --anonymous-auth=true \
              --image-gc-high-threshold=70 \
              --image-gc-low-threshold=50 \
              --kube-reserved=cpu=500m,memory=512Mi,ephemeral-storage=1Gi \
              --system-reserved=cpu=1000m,memory=1024Mi,ephemeral-storage=1Gi \
              --eviction-hard=memory.available<500Mi,nodefs.available<10% \
              --serialize-image-pulls=false \
              --sync-frequency=30s \
              --resolv-conf=/etc/resolv.conf \
              --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 \
              --image-pull-progress-deadline=30 \
              --v={{ level_log }} \
              --event-burst=30 \
              --event-qps=15 \
              --kube-api-burst=30 \
              --kube-api-qps=15 \
              --max-pods=200 \
              --pods-per-core=10 \
              --read-only-port=0 \
              --volume-plugin-dir={{ k8s_path }}/kubernetes/kubelet-plugins/volume"

18.6

cd /apps/work/k8s

cat site.yml
- hosts: all
  user: root
  vars:
    k8s_path: /apps
    k8s_dns: 10.64.0.2
    k8s_domain: niuke.local
    cluster_cidr: 10.48.0.0/12
    level_log: 2
    graph: "/apps/docker"
  roles:
    - cni
    - lxcfs
    - docker_client
    - kubelet

18.7 host

[vip]
192.168.4.1
192.168.4.2
[node]
192.168.4.3
192.168.4.4
192.168.4.5
[ingress]
192.168.31.1
192.168.32.2
[vip:vars]
k8s_node=k8s-vip
[node:vars]
k8s_node=k8s-node
[ingress:vars]
k8s_node=k8s-ingress

19、创建podpreset 修改时区跟挂载lxcfs

cd /apps/work/k8s
vi allow-lxcfs-tz-env.yaml
apiVersion: settings.k8s.io/v1alpha1
kind: PodPreset
metadata:
  name: allow-lxcfs-tz-env
spec:
  selector:
    matchLabels:
  volumeMounts:
    - mountPath: /proc/cpuinfo
      name: proc-cpuinfo
    - mountPath: /proc/diskstats
      name: proc-diskstats
    - mountPath: /proc/meminfo
      name: proc-meminfo
    - mountPath: /proc/stat
      name: proc-stat
    - mountPath: /proc/swaps
      name: proc-swaps
    - mountPath: /proc/uptime
      name: proc-uptime
    - mountPath: /etc/localtime
      name: allow-tz-env

  volumes:
    - name: proc-cpuinfo
      hostPath:
        path: /var/lib/lxcfs/proc/cpuinfo
    - name: proc-diskstats
      hostPath:
        path: /var/lib/lxcfs/proc/diskstats
    - name: proc-meminfo
      hostPath:
        path: /var/lib/lxcfs/proc/meminfo
    - name: proc-stat
      hostPath:
        path: /var/lib/lxcfs/proc/stat
    - name: proc-swaps
      hostPath:
        path: /var/lib/lxcfs/proc/swaps
    - name: proc-uptime
      hostPath:
        path: /var/lib/lxcfs/proc/uptime
    - name: allow-tz-env
      hostPath:
        path: /usr/share/zoneinfo/Asia/Shanghai
       ###执行allow-lxcfs-tz-env.yaml 
                kubectl apply -f allow-lxcfs-tz-env.yaml 
                #### 说明每个namespaces 都有执行
                kubectl apply -f allow-lxcfs-tz-env.yaml -n kube-system

下一篇: Kubernetes 生产环境安装部署 基于 Kubernetes v1.14.0 之 kube-router部署

转载于:https://blog.51cto.com/juestnow/2405807

猜你喜欢

转载自blog.csdn.net/weixin_34365417/article/details/92270402