kubeadm快速部署k8s集群

环境准备,以下操作在所有节点进行

     #为一主二从,每台服务器都是2C4G(处理器少于2会报错)
    192.168.124.21 master
    192.168.124.20 node1
    192.168.124.19 node2

    #docker 版本为最新
    [root@node1 yum.repos.d]# rpm -q docker-ce
    docker-ce-19.03.12-3.el7.x86_64

    #kuberrnetes版本为18(最新版1.19为尝试)
    [root@master ~]# kubectl get node
    NAME     STATUS   ROLES    AGE   VERSION
    master   Ready    master   24h   v1.18.8
    node1    Ready    <none>   23h   v1.18.8
    node2    Ready    <none>   23h   v1.18.8

演示系统

            [root@node1 ~]# cat /etc/redhat-release
            CentOS Linux release 7.7.1908 (Core)

修改hostname

hostnamectl set-hostname matser|node1|node2

配置IP

[root@node1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33 
BOOTPROTO="static"
IPADDR="192.168.124.20"
NETMASK="255.255.255.0"
GATEWAY="192.168.124.1"
DNS1="233.5.5.5"
DNS2="114.114.114.114"

#修改完之后重启网络服务
[root@node1 ~]# systemctl restart network

禁用防火墙

systemctl stop firewalld && systemctl disable firewalld

禁用selinux

sed -i ‘s/^SELINUX=.*/SELINUX=disabled/’ /etc/selinux/config && setenforce 0

禁用缓冲区

swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

内核参数

cat << EOF > /etc/sysctl.d/k8s.conf 
net.bridge.bridge-nf-call-ip6tables = 1 
net.bridge.bridge-nf-call-iptables = 1 
net.ipv4.ip_forward = 1
EOF

sysctl  --system

安装docker

安装工具和驱动

yum install -y yum-utils device-mapper-persistent-data lvm2

添加阿里云源

yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# 建立缓存
yum makecache

启用源

yum-config-manager  --enable docker-ce-nightly

安装

yum -y install docker-ce docker-ce-cli containerd.io

启动docker

systemctl enable --now docker

添加加速器

cat > /etc/docker/daemon.json << EOF
{
    #增加为systemd管理
    "exec-opts": ["native.cgroupdriver=systemd"], 
    "registry-mirrors": ["https://wjp1ubjz.mirror.aliyuncs.com"]
}

# 重启docker
systemctl restart docker

检查docker,看”Cgroup Driver“是不是systemd 和“Registry Mirrors”是否为阿里源

[root@node2 ~]# docker info
Client:
 Debug Mode: false

Server:
 Containers: 5
    Running: 4
    Paused: 0
    Stopped: 1
 Images: 4
 Server Version: 19.03.12
 Storage Driver: overlay2
    Backing Filesystem: xfs
    Supports d_type: true
    Native Overlay Diff: true
 Logging Driver: json-file
 Cgroup Driver: systemd
 Plugins:
    Volume: local
    Network: bridge host ipvlan macvlan null overlay
    Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog
 Swarm: inactive
 Runtimes: runc
 Default Runtime: runc
 Init Binary: docker-init
 containerd version: 7ad184331fa3e55e52b890ea95e65ba581ae3429
 runc version: dc9208a3303feef5b3839f4323d9beb36df0a9dd
 init version: fec3683
 Security Options:
    seccomp
     Profile: default
 Kernel Version: 3.10.0-1062.el7.x86_64
 Operating System: CentOS Linux 7 (Core)
 OSType: linux
 Architecture: x86_64
 CPUs: 2
 Total Memory: 2.885GiB
 Name: node2
 ID: AB7R:ETA2:2NXL:JMLX:SISA:RH6Z:SIF5:R6YQ:TSEP:LB4W:ZA5Z:ZQT7
 Docker Root Dir: /var/lib/docker
 Debug Mode: false
 Registry: https://index.docker.io/v1/
 Labels:
 Experimental: false
 Insecure Registries:
    127.0.0.0/8
 Registry Mirrors:
    https://wjp1ubjz.mirror.aliyuncs.com/
 Live Restore Enabled: false

测试docker,输出如下

[root@node2 ~]# docker run --rm -it hello-world

Hello from Docker!
This message shows that your installation appears to be working correctly.

To generate this message, Docker took the following steps:
 1. The Docker client contacted the Docker daemon.
 2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
        (amd64)
 3. The Docker daemon created a new container from that image which runs the
        executable that produces the output you are currently reading.
 4. The Docker daemon streamed that output to the Docker client, which sent it
        to your terminal.

To try something more ambitious, you can run an Ubuntu container with:
 $ docker run -it ubuntu bash

Share images, automate workflows, and more with a free Docker ID:
 https://hub.docker.com/

For more examples and ideas, visit:
 https://docs.docker.com/get-started/

failed to resize tty, using default size

kubeadm的安装

添加阿里源

cat <<EOF >/etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装工具

yum -y install kubeadm-1.18.8-0 kubectl-1.18.8-0 kubelet-1.18.8-0

开机启动

systemctl enable kubelet

初始化master节点(只在master节点上配置)

kubeadm init \
--apiserver-advertise-address=192.168.124.21 \  #主节点IP地址
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.18.6 \  #指定版本 
--service-cidr=192.168.120.0/16 \  #api的IP地址
--pod-network-cidr=10.244.0.0/16  #这个地方最好写这个,是为了后期使用flannel插件,flannel的默认配置就是这个网段

## 输入如下,
    [root@master ~]# kubeadm init --apiserver-advertise-address=192.168.124.21 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.18.6 --service-cidr=192.168.120.0/16 --pod-network-cidr=10.244.0.0/16
    W0907 11:48:42.633243   23099 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
    [init] Using Kubernetes version: v1.18.6
    [preflight] Running pre-flight checks
    [WARNING Firewalld]: firewalld is active, please ensure ports [6443 10250] are open or your cluster may not function correctly
    [preflight] Pulling images required for setting up a Kubernetes cluster
    [preflight] This might take a minute or two, depending on the speed of your internet connection
    [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'

    [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
    [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
    [kubelet-start] Starting the kubelet
    [certs] Using certificateDir folder "/etc/kubernetes/pki"
    [certs] Generating "ca" certificate and key
    [certs] Generating "apiserver" certificate and key
    [certs] apiserver serving cert is signed for DNS names [master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [192.168.0.1 192.168.124.21]
    [certs] Generating "apiserver-kubelet-client" certificate and key
    [certs] Generating "front-proxy-ca" certificate and key
    [certs] Generating "front-proxy-client" certificate and key
    [certs] Generating "etcd/ca" certificate and key
    [certs] Generating "etcd/server" certificate and key
    [certs] etcd/server serving cert is signed for DNS names [master localhost] and IPs [192.168.124.21 127.0.0.1 ::1]
    [certs] Generating "etcd/peer" certificate and key
    [certs] etcd/peer serving cert is signed for DNS names [master localhost] and IPs [192.168.124.21 127.0.0.1 ::1]
    [certs] Generating "etcd/healthcheck-client" certificate and key
    [certs] Generating "apiserver-etcd-client" certificate and key
    [certs] Generating "sa" key and public key
    [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
    [kubeconfig] Writing "admin.conf" kubeconfig file
    [kubeconfig] Writing "kubelet.conf" kubeconfig file
    [kubeconfig] Writing "controller-manager.conf" kubeconfig file
    [kubeconfig] Writing "scheduler.conf" kubeconfig file
    [control-plane] Using manifest folder "/etc/kubernetes/manifests"
    [control-plane] Creating static Pod manifest for "kube-apiserver"
    [control-plane] Creating static Pod manifest for "kube-controller-manager"
    W0907 11:55:13.393719   23099 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
    [control-plane] Creating static Pod manifest for "kube-scheduler"
    W0907 11:55:13.395162   23099 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
    [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
    [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
    [apiclient] All control plane components are healthy after 22.508034 seconds
    [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
    [kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster
    [upload-certs] Skipping phase. Please see --upload-certs
    [mark-control-plane] Marking the node master as control-plane by adding the label "node-role.kubernetes.io/master=''"
    [mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
    [bootstrap-token] Using token: 099kxp.q1hnybe4w8rc8g92
    [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
    [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
    [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
    [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
    [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
    [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
    [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
    [addons] Applied essential addon: CoreDNS
    [addons] Applied essential addon: kube-proxy

    Your Kubernetes control-plane has initialized successfully!

    To start using your cluster, you need to run the following as a regular user:

        mkdir -p $HOME/.kube
        sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
        sudo chown $(id -u):$(id -g) $HOME/.kube/config

    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
        https://kubernetes.io/docs/concepts/cluster-administration/addons/

    Then you can join any number of worker nodes by running the following on each as root:

    kubeadm join 192.168.124.21:6443 --token 099kxp.q1hnybe4w8rc8g92 \
            --discovery-token-ca-cert-hash sha256:72dbd0036631cbd3123f8e4159eefaab7a57ab528c274985d6111762b5af38a9

当前节点加入到集群中,最后会输出

        mkdir -p $HOME/.kube
        sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
        sudo chown $(id -u):$(id -g) $HOME/.kube/config

网络插件flannel

       kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 

从节点加入到集群,上面有输出

        kubeadm join 192.168.124.21:6443 --token 099kxp.q1hnybe4w8rc8g92 \
            --discovery-token-ca-cert-hash sha256:72dbd0036631cbd3123f8e4159eefaab7a57ab528c274985d6111762b5af38a9

查看集群的节点

[root@master ~]# kubectl get node
NAME     STATUS   ROLES    AGE   VERSION
master   Ready    master   24h   v1.18.8
node1    Ready    <none>   23h   v1.18.8
node2    Ready    <none>   23h   v1.18.8

查看集群的pod

[root@master ~]# kubectl get pod -A
NAMESPACE     NAME                             READY   STATUS             RESTARTS   AGE
kube-system   coredns-7ff77c879f-db9k9         1/1     Running            0          25h
kube-system   coredns-7ff77c879f-ghphm         1/1     Running            0          25h
kube-system   etcd-master                      1/1     Running            2          25h
kube-system   kube-apiserver-master            1/1     Running            2          25h
kube-system   kube-controller-manager-master   0/1     CrashLoopBackOff   29        25h
kube-system   kube-flannel-ds-amd64-4rfhl      1/1     Running            3          24h
kube-system   kube-flannel-ds-amd64-mbtws      1/1     Running            0          24h
kube-system   kube-flannel-ds-amd64-p6kxj      1/1     Running            2          24h
kube-system   kube-proxy-rbqkg                 1/1     Running            2          25h
kube-system   kube-proxy-rtxwd                 1/1     Running            0          24h
kube-system   kube-proxy-vznmx                 1/1     Running            0          24h
kube-system   kube-scheduler-master            0/1     CrashLoopBackOff   22        25h

至此部署告一段落

猜你喜欢

转载自blog.51cto.com/13805636/2530550