Kubeadm部署k8s1.27.4

环境准备

三台机器:

k8s-master

k8s-node1

k8s-node2

K8s服务器基础配置:

#####三台机器都要配置#####
1、# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
​
2、# 关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config  #永久
setenforce 0  #临时
​
3、# 关闭swap(k8s禁止虚拟内存以提高性能)
sed -ri 's/.*swap.*/#&/' /etc/fstab #永久关闭,需要重启
swapoff -a #临时
​
4、# 修改主机名和集群内部解析
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
​
cat >> /etc/hosts << EOF
192.168.9.170 k8s-master
192.168.9.171 k8s-node1
192.168.9.172 k8s-node2
EOF
​
5、# 调整内核参数和路由转发
cat > /etc/sysctl.d/kubernetes.conf << EOF
# 开启网桥模式,可将网桥的流量传递给iptables链
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
# 关闭ipv6协议
net.ipv6.conf.all.disable_ipv6=1
net.ipv4.ip_forward=1
EOF
​
6、# 加载br_netfilter模块 && 查看是否有加载
[root@k8s-master ~]# modprobe br_netfilter && lsmod | grep br_netfilter
br_netfilter           22256  0 
bridge                151336  1 br_netfilter
​
# 重新加载配置生效
[root@k8s-master ~]# sysctl -p /etc/sysctl.d/k8s.conf  
​
7、# 配置阿里云的yum源
[root@k8s-master ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@k8s-master ~]# yum clean all && yum makecache
​
8、# 时间同步配置
crontab -e
15 */3 * * * /usr/sbin/ntpdate 192.168.9.253
​
9、# 添加ipvs功能
[root@k8s-master ~]# yum -y install ipset ipvsadm
# 编辑需要添加模块
[root@k8s-master ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
# 附加执行权限并执行
[root@k8s-master ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules
[root@k8s-master ~]# bash /etc/sysconfig/modules/ipvs.modules
# 监测是否成功
[root@k8s-master ~]# lsmod | grep -e ip_vs -e nf_conntrack

Docker环境安装

#####三台机器都要配置#####

# 安装需要的依赖包包
yum install -y yum-utils
​
# 设置镜像的仓库
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
​
# 清理和更新yum仓库
yum clean all && yum makecache fast
​
# 安装docker-ce社区版
yum -y install docker-ce
​
# 启动docker并设置开机自启动
systemctl start docker && systemctl enable docker
​
# 查看docker版本
docker version

配置Docker加速器

#####三台机器都要配置#####

[root@k8s-master ~]# mkdir /etc/docker
[root@k8s-master ~]# cat /etc/docker/deamon.json
{
        "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn",
                            "https://docker.m.daocloud.io",
                            "http://hub-mirrors.c.163.com"],
        "max-concurrent-downloads": 10,
        "log-driver": "json-file",
        "log-level": "warn",
        "data-root": "/var/lib/docker"
        "exec-opts": ["native.cgroupdriver=systemd"]
}
​
[root@k8s-master ~]# systemctl restart docker

安装cri-docker

#####三台机器都要配置#####

# 下载压缩包
[root@k8s-master ~]# wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.4/cri-dockerd-0.3.4.amd64.tgz
# 解压
[root@k8s-master ~]# tar zxvf cri-dockerd-0.3.4.amd64.tgz -C /opt/
# 拷贝二进制命令文件
[root@k8s-master ~]# cp /opt/cri-dockerd/* /usr/bin/

#####三台机器都要配置#####

#配置systemctl管理
[root@k8s-master ~]# vim /usr/lib/systemd/system/cri-docker.service
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
 
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
 
[Install]
WantedBy=multi-user.target

#####三台机器都要配置#####

[root@k8s-master ~]# vim /usr/lib/systemd/system/cri-docker.socket
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
 
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
 
[Install]
WantedBy=sockets.target
 
[root@k8s-master ~]# systemctl daemon-reload
[root@k8s-master ~]# systemctl enable --now cri-docker
Created symlink from /etc/systemd/system/multi-user.target.wants/cri-docker.service to /usr/lib/systemd/system/cri-docker.service.
[root@k8s-master ~]# systemctl status cri-docker

配置K8s所需的源

#####三台机器都要配置#####

[root@k8s-master ~]# cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF
​
# 加载K8是源
[root@k8s-master ~]# yum clean all && yum makecache
​
# 下载kubelet,kubeadm, kubectl
[root@k8s-master ~]# yum -y install kubelet-1.27.4-0 kubeadm-1.27.4-0 kubectl-1.27.4-0 --disableexcludes=kubernetes
​
##设置kubelet开机自启
[root@k8s-master ~]# systemctl enable kubelet.service
​
​
##初始化之前kubelet无法启动,可以查看它的状态,下面这种情况代表正在等待指令
[root@k8s-master ~]# systemctl is-active kubelet
activating

kubeadm初始化

#####三台机器都要配置#####


# 查看初始化需要的镜像
kubeadm config images list --kubernetes-version 1.27.4
​
# 提前拉取镜像并打包
[root@k8s-master ~]# vim dockerPulv1.27.4.sh
#!/bin/bash
docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.27.4
docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.27.4
docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.27.4
docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.27.4
docker pull registry.aliyuncs.com/google_containers/pause:3.9
docker pull registry.aliyuncs.com/google_containers/etcd:3.5.7-0
docker pull registry.aliyuncs.com/google_containers/coredns:v1.10.1
docker tag registry.aliyuncs.com/google_containers/kube-apiserver:v1.27.4 registry.k8s.io/kube-apiserver:v1.27.4-0
docker tag registry.aliyuncs.com/google_containers/kube-controller-manager:v1.27.4 registry.k8s.io/kube-controller-manager:v1.27.4-0
docker tag registry.aliyuncs.com/google_containers/kube-scheduler:v1.27.4 registry.k8s.io/kube-scheduler:v1.27.4-0
docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.27.4 registry.k8s.io/kube-proxy:v1.27.4-0
docker tag registry.aliyuncs.com/google_containers/coredns:v1.10.1 registry.k8s.io/coredns/coredns:v1.10.1
docker tag registry.aliyuncs.com/google_containers/etcd:3.5.7-0 registry.k8s.io/etcd:3.5.7-0
docker tag registry.aliyuncs.com/google_containers/pause:3.9 registry.k8s.io/pause:3.9
​
# 重启docker
systemctl daemon-reload
systemctl restart docker.service

#####Master节点操作#####

# kubeadm init初始化
​
[root@k8s-master ~]# kubeadm init \
--kubernetes-version v1.27.4-0 \
--pod-network-cidr=10.10.20.0/24 \
--cri-socket unix:///var/run/cri-dockerd.sock
​
​
# 初始化参数解释
--kubernetes-version=v1.27.4-0                  --k8s具体的版本
--pod-network-cidr=10.10.20.0/24                --pod网络,与下面部署的CNI网络组件yaml中保持一致
--cri-socket unix:///var/run/cri-dockerd.sock   --指定容器运行时的Socket文件路径,原本默认是dockershim.sock,但现在改成cri-docker.sock
​
​
​
Your Kubernetes control-plane has initialized successfully!
​
To start using your cluster, you need to run the following as a regular user:
​
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
​
Alternatively, if you are the root user, you can run:
​
  export KUBECONFIG=/etc/kubernetes/admin.conf
​
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/
​
Then you can join any number of worker nodes by running the following on each as root:
​
kubeadm join 192.168.9.170:6443 --token g0xopu.i8bzke6fbvkpqw98 \ 
     --discovery-token-ca-cert-hash sha256:1faa21ba5a0f0b5ac82ec2bd3ac5467b190523035e65054401f994a3efc2a623
    

node节点加入集群

kubeadm join 192.168.9.170:6443 --token g0xopu.i8bzke6fbvkpqw98 --discovery-token-ca-cert-hash sha256:1faa21ba5a0f0b5ac82ec2bd3ac5467b190523035e65054401f994a3efc2a623 --cri-socket unix:///var/run/cri-dockerd.sock

如果加入失败可以使用kubeadm reset重置,然后再次加入

kubeadm reset --cri-socket unix:///var/run/cri-dockerd.sock

部署网路插件calico

用的是calico网络插件

calico官网:Quickstart for Calico on Kubernetes | Calico Documentation

[root@k8s-master ~]# wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/tigera-operator.yaml
[root@k8s-master ~]# kubectl create -f tigera-operator.yaml

[root@k8s-master ~]# wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/custom-resources.yaml
[root@k8s-master ~]# vim custom-resources.yaml 

[root@k8s-master ~]# kubectl create -f custom-resources.yaml
[root@k8s-master ~]# kubectl get pod -n calico-system

网路插件创建比较慢大概十几分钟左右,等待全部完成,再次查看

[root@k8s-master ~]# kubectl get pod -n calico-system

验证集群可用性和健康情况

# 在master节点可以查看到加入成功
[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES           AGE   VERSION
k8s-master   Ready    control-plane   18h   v1.27.4
k8s-node1    Ready    <none>          17h   v1.27.4
k8s-node2    Ready    <none>          17h   v1.27.4
[root@k8s-master ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE   ERROR
controller-manager   Healthy   ok        
scheduler            Healthy   ok        
etcd-0               Healthy
[root@k8s-master ~]# kubectl get pod -n kube-system
NAME                                 READY   STATUS    RESTARTS        AGE
coredns-5d78c9869d-hkrng             1/1     Running   222 (12m ago)   18h
coredns-5d78c9869d-v2hfm             1/1     Running   235 (12m ago)   18h
etcd-k8s-master                      1/1     Running   3 (12m ago)     18h
kube-apiserver-k8s-master            1/1     Running   4 (11m ago)     18h
kube-controller-manager-k8s-master   1/1     Running   3 (12m ago)     18h
kube-proxy-jdnm6                     1/1     Running   3 (12m ago)     17h
kube-proxy-mmsdv                     1/1     Running   3 (12m ago)     18h
kube-proxy-mzwqj                     1/1     Running   3 (12m ago)     17h
kube-scheduler-k8s-master            1/1     Running   3 (12m ago)     18h

部署Dashboard界面

[root@k8s-master ~]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v3.0.0-alpha0/charts/kubernetes-dashboard.yaml
[root@k8s-master ~]# vim recommended.yaml 

[root@k8s-master ~]# kubectl apply -f recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created

[root@k8s-master ~]# kubectl get ns
NAME                   STATUS   AGE
calico-apiserver       Active   121m
calico-system          Active   122m
default                Active   19h
kube-flannel           Active   18h
kube-node-lease        Active   19h
kube-public            Active   19h
kube-system            Active   19h
kubernetes-dashboard   Active   65s   # 这个
tigera-operator        Active   123m
​
[root@k8s-master ~]# kubectl get pod -n kubernetes-dashboard
NAME                                         READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-5cb4f4bb9c-5f97s   1/1     Running   0          86s
kubernetes-dashboard-6967859bff-j54fn        1/1     Running   0          86s
​
[root@k8s-master ~]# kubectl get svc -n kubernetes-dashboard
NAME                        TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   10.96.62.80      <none>        8000/TCP        109s
kubernetes-dashboard        NodePort    10.109.179.208   <none>        443:32640/TCP   110s
​
# dashboard暴露了一个端口号31879,使用https://nodeIP:32640访问     
https://192.168.9.170:32640

[root@k8s-master ~]# vim dashboard-adminuser.yaml
# 新建文件并添加进去以下内容
apiVersion: v1
kind: ServiceAccount
metadata:
  name: dashboard-admin
  namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: dashboard-admin
subjects:
  - kind: ServiceAccount
    name: dashboard-admin
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
---
# 上面均为正常的建立ServiceAccount并与集群默认角色cluster-admin进行绑定
# # 下面为手动建立secret文件进行永久token建立
apiVersion: v1
kind: Secret
metadata:
  name: secret-admin
  namespace: kube-system
  annotations:
    kubernetes.io/service-account.name: "dashboard-admin"
type: kubernetes.io/service-account-token

# 创建
[root@k8s-master ~]#  kubectl apply -f dashboard-admin.yaml 
serviceaccount/dashboard-admin created
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin created
secret/secret-admin created
​
# 查看生成的token
[root@k8s-master ~]# kubectl describe -nkube-system secret/secret-admin
Name:         secret-admin
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: dashboard-admin
              kubernetes.io/service-account.uid: 8cedd0ee-d3d6-429b-868c-caee839c953b
​
Type:  kubernetes.io/service-account-token
​
Data
====
ca.crt:     1107 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IkNrblN2Ti1CX2Vrc09mZDVUSnE3Z0lHTzM1RFNhNnU3OHpCdDNESG5EOWMifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJzZWNyZXQtYWRtaW4iLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiOGNlZGQwZWUtZDNkNi00MjliLTg2OGMtY2FlZTgzOWM5NTNiIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.Nwb9UiFS9QMQRkYTdt-mx_rI_OGPIXHHYurYKEJfI0quhw-EJHZoDWAIdthFOKLXSSD8fnSw82wKPYN-J6xi9Aafiha2bEBm2Tslf8kFKiTUo1RpPoj2ka4WKo9Vw8VbG8scnM8U2l6bewOj2O1TvijRd67dYX-COR28N-zGTAaFdW6RT9dCin5rRdQIq5Ni92WGQtGXR_49fcWOPA9YVWL-32R07MdlToSlyzeQLuizNgypdbkGADQmjfP3cbsFRVwLXJcBmpUAUSOToSddsgneim6CuzGOEAxMtmS4mhbZTFR0NZdzZ6FwED_kz46BY8RBDTRIM_nJWs-E1e6kHg
​
#可以将生成的token复制到admin.token文件里保存,发方便以后查询使用
[root@k8s-master dashboard]# vim admin.token 
​

使用生成的token验证登录

猜你喜欢

转载自blog.csdn.net/weixin_63125636/article/details/134577723
今日推荐