a26.ansible 生产实战案例 -- 基于kubeadm安装kubernetes v1.20 -- docker安装、kubeadm等组件安装、集群初始化等

10.安装docker

#只需要创建这个文件就行
[root@ansible-server ansible]# vim docker_role.yml 
---
- hosts: k8s_cluster

  roles:
    - role: docker

[root@ansible-server ansible]# ansible-playbook docker_role.yml

11.安装kubeadm

[root@ansible-server ansible]# mkdir -p roles/kubeadm/{templates,vars,tasks}
[root@ansible-server ansible]# cd roles/kubeadm/
[root@ansible-server kubeadm]# ls
files  tasks  vars

[root@ansible-server kubeadm]# vim templates/kubernetes.repo.j2
[kubernetes]
name=Kubernetes
baseurl=https://{
    
    {
    
     KUBEADM_MIRRORS }}/kubernetes/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=0
gpgkey=https://{
    
    {
    
     KUBEADM_MIRRORS }}/kubernetes/yum/doc/yum-key.gpg https://{
    
    {
    
     KUBEADM_MIRRORS }}/kubernetes/yum/doc/rpm-package-key.gpg

[root@ansible-server kubeadm]# vim templates/kubeadm-config.yaml.j2 
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: 7t2weq.bjbawausm0jaxury
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: {
    
    {
    
     ansible_default_ipv4.address }}
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  certSANs:
  - {
    
    {
    
     VIP }}
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: {
    
    {
    
     VIP }}:6443
controllerManager: {
    
    }
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: {
    
    {
    
     HARBOR_DOMAIN }}/google_containers
kind: ClusterConfiguration
kubernetesVersion: v{
    
    {
    
     KUBEADM_VERSION }}
networking:
  dnsDomain: {
    
    {
    
     domain }}
  podSubnet: {
    
    {
    
     POD_SUBNET }}
  serviceSubnet: {
    
    {
    
     SERVICE_SUBNET }}
scheduler: {
    
    }

[root@ansible-server kubeadm]# vim vars/main.yml
KUBEADM_MIRRORS: mirrors.aliyun.com
KUBEADM_VERSION: 1.20.14
HARBOR_DOMAIN: harbor.raymonds.cc
USERNAME: admin
PASSWORD: 123456
VIP: 172.31.3.188
POD_SUBNET: 192.168.0.0/12
SERVICE_SUBNET: 10.96.0.0/12

[root@ansible-server kubeadm]# vim tasks/install_kubeadm_yum.yml
- name: set CentOS or Rocky kubernetes mirror warehouse
  template:
    src: kubernetes.repo.j2
    dest: /etc/yum.repos.d/kubernetes.repo
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
- name: install CentOS or Rocky kubeadm for master
  yum:
    name: kubelet-{
    
    {
    
     KUBEADM_VERSION }},kubeadm-{
    
    {
    
     KUBEADM_VERSION }},kubectl-{
    
    {
    
     KUBEADM_VERSION }}
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - inventory_hostname in groups.master
- name: install CentOS or Rocky kubeadm for node
  yum:
    name: kubelet-{
    
    {
    
     KUBEADM_VERSION }},kubeadm-{
    
    {
    
     KUBEADM_VERSION }}
  when:
    - (ansible_distribution=="CentOS" or ansible_distribution=="Rocky")
    - inventory_hostname in groups.node

[root@ansible-server kubeadm]# vim tasks/install_kubeadm_apt.yml
- name: delete lock files
  file:
    path: "{
    
    { item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes 
    force: yes 
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu kubernetes depend on the package
  apt:
    name: apt-transport-https
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
- name: import Ubuntu kubernetes key
  apt_key:
    url: https://{
    
    {
    
     KUBEADM_MIRRORS }}/kubernetes/apt/doc/apt-key.gpg
  when:
    - ansible_distribution=="Ubuntu"
- name: import Ubuntu kubernetes installation source
  apt_repository:
    repo: "deb https://{
    
    { KUBEADM_MIRRORS }}/kubernetes/apt kubernetes-xenial main"
    filename: kubernetes
  when:
    - ansible_distribution=="Ubuntu"
- name: delete lock files
  file:
    path: "{
    
    { item }}"
    state: absent
  loop:
    - /var/lib/dpkg/lock
    - /var/lib/apt/lists/lock
    - /var/cache/apt/archives/lock
  when:
    - ansible_distribution=="Ubuntu"
- name: apt update
  apt:
    update_cache: yes
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
- name: install Ubuntu kubeadm for master
  apt:
    name: kubelet={
    
    {
    
     KUBEADM_VERSION }}-00,kubeadm={
    
    {
    
     KUBEADM_VERSION }}-00,kubectl={
    
    {
    
     KUBEADM_VERSION }}-00
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
    - inventory_hostname in groups.master
- name: install Ubuntu kubeadm for node
  apt:
    name: kubelet={
    
    {
    
     KUBEADM_VERSION }}-00,kubeadm={
    
    {
    
     KUBEADM_VERSION }}-00
    force: yes
  when:
    - ansible_distribution=="Ubuntu"
    - inventory_hostname in groups.node

[root@ansible-server kubeadm]# vim tasks/service.yml
- name: start kubelet
  systemd:
    name: kubelet
    state: started
    enabled: yes
    daemon_reload: yes

[root@ansible-server kubeadm]# vim tasks/docker_login.yml
- name: docker login
  shell:
    cmd: docker login -u {
    
    {
    
     USERNAME }} -p {
    
    {
    
     PASSWORD }} {
    
    {
    
     HARBOR_DOMAIN }}
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubeadm]# vim tasks/download_images.yml
- name: get kubeadm version
  shell:
    cmd: kubeadm config images list --kubernetes-version=v{
    
    {
    
     KUBEADM_VERSION }} | awk -F "/"  '{print $NF}'
  register: KUBEADM_IMAGES_VERSION
  when:
    - ansible_hostname=="k8s-master01"
- name: download kubeadm image
  shell: |
    {
    
    % for i in KUBEADM_IMAGES_VERSION.stdout_lines %}
      docker pull registry.aliyuncs.com/google_containers/{
    
    {
    
     i }}
      docker tag registry.aliyuncs.com/google_containers/{
    
    {
    
     i }} {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
      docker rmi registry.aliyuncs.com/google_containers/{
    
    {
    
     i }}
      docker push {
    
    {
    
     HARBOR_DOMAIN }}/google_containers/{
    
    {
    
     i }}
    {
    
    % endfor %}
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubeadm]# vim tasks/copy_kubeadm_config.yml
- name: copy kubeadm_config.yml file
  template:
    src: kubeadm-config.yaml.j2
    dest: /root/kubeadm-config.yaml
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server kubeadm]# vim tasks/main.yml
- include: install_kubeadm_yum.yml
- include: install_kubeadm_apt.yml
- include: service.yml
- include: docker_login.yml
- include: download_images.yml
- include: copy_kubeadm_config.yml

[root@ansible-server kubeadm]# cd ../../
[root@ansible-server ansible]# tree roles/kubeadm/
roles/kubeadm/
├── tasks
│   ├── copy_kubeadm_config.yml
│   ├── docker_login.yml
│   ├── download_images.yml
│   ├── install_kubeadm_apt.yml
│   ├── install_kubeadm_yum.yml
│   ├── main.yml
│   └── service.yml
├── templates
│   ├── kubeadm-config.yaml.j2
│   └── kubernetes.repo.j2
└── vars
    └── main.yml

3 directories, 10 files

[root@ansible-server ansible]# vim kubeadm_role.yml
---
- hosts: master:node

  roles:
    - role: kubeadm

[root@ansible-server ansible]# ansible-playbook kubeadm_role.yml

12.集群初始化

[root@k8s-master01 ~]# kubeadm init --config /root/kubeadm-config.yaml  --upload-certs
[init] Using Kubernetes version: v1.20.14
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.example.local] and IPs [10.96.0.1 172.31.3.101 172.31.3.188]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [172.31.3.101 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [172.31.3.101 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 24.032819 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
5358baf292edb83228848d98ca88a2c5b18ee13f9a7dbd266280fa1660030b1b
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 7t2weq.bjbawausm0jaxury
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 172.31.3.188:6443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:607d3c3e1ffb53dd13650310fccf9d7d4a9ff829c30855161875ac5ddc20e8e6 \
    --control-plane --certificate-key 5358baf292edb83228848d98ca88a2c5b18ee13f9a7dbd266280fa1660030b1b

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.31.3.188:6443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:607d3c3e1ffb53dd13650310fccf9d7d4a9ff829c30855161875ac5ddc20e8e6 

Master01节点配置环境变量,用于访问Kubernetes集群:

[root@k8s-master01 ~]# cat >> /root/.bashrc <<EOF
export KUBECONFIG=/etc/kubernetes/admin.conf
EOF

[root@k8s-master01 ~]# source .bashrc 

#Kubectl 自动补全
#CentOS
[root@k8s-master01 ~]# yum -y install bash-completion

#Ubuntu
[root@k8s-master01 ~]# apt -y install bash-completion

[root@k8s-master01 ~]# source <(kubectl completion bash)
[root@k8s-master01 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc 

查看节点状态:

[root@k8s-master01 ~]# kubectl get nodes
NAME                         STATUS     ROLES                  AGE     VERSION
k8s-master01.example.local   NotReady   control-plane,master   2m13s   v1.20.14

采用初始化安装方式,所有的系统组件均以容器的方式运行并且在kube-system命名空间内,此时可以查看Pod状态:

[root@k8s-master01 ~]# kubectl get pods -n kube-system -o wide
NAME                                                 READY   STATUS    RESTARTS   AGE     IP             NODE                         NOMINATED NODE   READINESS GATES
coredns-5ffd5c4586-g8vzr                             0/1     Pending   0          2m34s   <none>         <none>                       <none>           <none>
coredns-5ffd5c4586-mh9ts                             0/1     Pending   0          2m34s   <none>         <none>                       <none>           <none>
etcd-k8s-master01.example.local                      1/1     Running   0          2m28s   172.31.3.101   k8s-master01.example.local   <none>           <none>
kube-apiserver-k8s-master01.example.local            1/1     Running   0          2m28s   172.31.3.101   k8s-master01.example.local   <none>           <none>
kube-controller-manager-k8s-master01.example.local   1/1     Running   0          2m28s   172.31.3.101   k8s-master01.example.local   <none>           <none>
kube-proxy-fzbgw                                     1/1     Running   0          2m34s   172.31.3.101   k8s-master01.example.local   <none>           <none>
kube-scheduler-k8s-master01.example.local            1/1     Running   0          2m28s   172.31.3.101   k8s-master01.example.local   <none>           <none>

13.高可用Master

添加master02和master03:

kubeadm join 172.31.3.188:6443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:607d3c3e1ffb53dd13650310fccf9d7d4a9ff829c30855161875ac5ddc20e8e6 \
    --control-plane --certificate-key 5358baf292edb83228848d98ca88a2c5b18ee13f9a7dbd266280fa1660030b1b

[root@k8s-master01 ~]# kubectl get nodes
NAME                         STATUS     ROLES                  AGE     VERSION
k8s-master01.example.local   NotReady   control-plane,master   7m21s   v1.20.14
k8s-master02.example.local   NotReady   control-plane,master   65s     v1.20.14
k8s-master03.example.local   NotReady   control-plane,master   11s     v1.20.14

14.Node节点的配置

添加node01、node02、node03:

kubeadm join 172.31.3.188:6443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:607d3c3e1ffb53dd13650310fccf9d7d4a9ff829c30855161875ac5ddc20e8e6

[root@k8s-master01 ~]# kubectl get nodes
NAME                         STATUS     ROLES                  AGE     VERSION
k8s-master01.example.local   NotReady   control-plane,master   9m33s   v1.20.14
k8s-master02.example.local   NotReady   control-plane,master   3m17s   v1.20.14
k8s-master03.example.local   NotReady   control-plane,master   2m23s   v1.20.14
k8s-node01.example.local     NotReady   <none>                 69s     v1.20.14
k8s-node02.example.local     NotReady   <none>                 19s     v1.20.14
k8s-node03.example.local     NotReady   <none>                 3s      v1.20.14

猜你喜欢

转载自blog.csdn.net/qq_25599925/article/details/122504156