LAN using kubeadm installation k8s

Host List:

ip CPU name node cpu RAM
192.168.23.100 k8smaster master 2 nuclear 2G
192.168.23.101 k8snode01 node 2 nuclear 2G
192.168.23.102 k8snode02 node 2 nuclear 2G

 

1, to configure the local source yum
yum source packet:
Link: https: //pan.baidu.com/s/1KAYWlw5Ky2ESUEZVsphQ0Q 

Yum local source configuration, will be copied to the yum.repo /etc/yum.repos.d/ directory.
[root @ k8smaster yum.repos.d] # More yum.repo 
[Soft]
name = Base
baseurl = HTTP: //192.168.23.100/yum
gpgcheck = 0

[root@k8smaster yum.repos.d]# scp yum.repo 192.168.23.102:/etc/yum.repos.d/
[email protected]'s password: 
yum.repo                                                                                       100%   63     0.1KB/s   00:00    
[root@k8smaster yum.repos.d]# scp yum.repo 192.168.23.101:/etc/yum.repos.d/
[email protected]'s password: 
yum.repo   

2、修改/etc/hosts
[root@k8smaster yum.repos.d]# cat >> /etc/hosts << EOF
> 192.168.23.100 k8smaster
> 192.168.23.101 k8snode01
> 192.168.23.102 k8snode02
> EOF

[root@k8smaster yum.repos.d]#

3、安装依赖
yum install -y conntrack ntpdate ntp ipvsadm ipset iptables curl sysstat libseccomp wget vim net-tools git iproute lrzsz bash-completion tree bridge-utils unzip bind-utils gcc

4、关闭selinux
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

5, turn off the firewall, firewall settings to iptables and set the empty Rule
# close firewalld and canceled since the start
systemctl STOP firewalld && systemctl disable firewalld
# install iptables, start iptables, set the boot from Kai, empty iptables rules, save the current rules to the default rules
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save
 

6, close the swap partition
# turn off virtual memory swap partition [permanent] and turn off virtual memory.

swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

7, the kernel configuration parameters, for K8S
CAT> kubernetes.conf the EOF <<
# open bridge mode Important]
net.bridge.bridge-NF-Call-iptables. 1 =
# open bridge mode Important]
net.bridge.bridge = the ip6tables-Call--nf. 1
is named net.ipv4.ip_forward and =. 1
net.ipv4.tcp_tw_recycle = 0
# prohibit the use of swap space, the system only when it is allowed to use only OOM 
vm.swappiness = 0
# does not check the adequacy of the physical memory
=. 1 vm.overcommit_memory
# open the OOM 
vm.panic_on_oom = 0
fs.inotify.max_user_instances = 8192
fs.inotify.max_user_watches = 1048576
fs.file-max = 52,706,963
fs.nr_open = 52,706,963
# ipv6 [Close] important
net.ipv6.conf =. 1 .all.disable_ipv6
net.netfilter.nf_conntrack_max = 2.31072 million
the EOF

# Will be optimized kernel files are copied to /etc/sysctl.d/ folder, so files optimized boot time can be called
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf 

# Manual refresh, so the optimized file with immediate effect
sysctl -p /etc/sysctl.d/kubernetes.conf

8, adjust the system time zone
# Set the system area to China / Shanghai
timedatectl the SET-TimeZone Asia / on Shanghai
# The current UTC time into the hardware clock 
timedatectl the SET-local-the RTC 0
# restart depends on the system time of service 
systemctl restart rsyslog
systemctl restart crond

9, the system shut down unnecessary services
# shut down and disable the mail service
systemctl stop postfix && systemctl disable postfix

10, save the log provided
after Centos7, because the boot mode to the system.d, so there are two log system while at work, the default is rsyslogd, and systemd journald
use systemd journald better, so we change the default systemd journald, retaining only way to save a log.
1) Create a directory to save the log
mkdir / var / log /. TECHNOLOGY INFORMATION
2) Create the configuration file storage directory
mkdir /etc/systemd/journald.conf.d
3) Create the configuration file
cat> /etc/systemd/journald.conf .d / 99-prophet.conf << EOF
[Journal]
# persistently saved to disk 
Storage persistent =
# compression history log 
compress = yes
SyncIntervalSec = 5m
RateLimitInterval = 30s
RateLimitBurst = 1000
# maximum space 10G 
SystemMaxUse = 10G
# single log the maximum file size 200M 
SystemMaxFileSize = 200M
# log save time two weeks 
MaxRetentionSec = 2week
# will not be forwarded to the syslog log 
= NO ForwardToSyslog
the EOF

. 4). Systemd journald Restart configuration of
systemctl restart systemd-journald

11, to adjust the number of open files
echo "* Soft nofile 65536" >> /etc/security/limits.conf
echo "* Hard nofile 65536" >> /etc/security/limits.conf

12、升级Linux内核为4.44版本
[root@k8smaster yum.repos.d]# yum install kernel-lt.x86_64 -y  (4.4.213-1.el7.elrepo)
[root@k8smaster yum.repos.d]# awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg 
CentOS Linux (4.4.213-1.el7.elrepo.x86_64) 7 (Core)
CentOS Linux, with Linux 3.10.0-123.el7.x86_64
CentOS Linux, with Linux 0-rescue-b7478dd50b1d41a5836a6a670b5cd8c1
[root@k8smaster yum.repos.d]#grub2-set-default 'CentOS Linux (4.4.213-1.el7.elrepo.x86_64) 7 (Core)'
[root@k8snode01 ~]# uname -a
Linux k8snode01 4.4.213-1.el7.elrepo.x86_64 #1 SMP Wed Feb 5 10:44:50 EST 2020 x86_64 x86_64 x86_64 GNU/Linux

12, the pre-opened condition kube-proxy ipvs of
modprobe br_netfilter # netfilter loading module
CAT> /etc/sysconfig/modules/ipvs.modules the EOF <<
# / bin / the bash!
Modprobe - ip_vs
modprobe - ip_vs_rr
modprobe - ip_vs_wrr
modprobe - ip_vs_sh
modprobe - nf_conntrack
EOF

755 /etc/sysconfig/modules/ipvs.modules chmod
bash /etc/sysconfig/modules/ipvs.modules  
lsmod | grep -e -e nf_conntrack_ipv4 ip_vs
 # using the lsmod command to see whether these files are directed.

13, mounting docker
dependent yum the install yum-utils Device-Mapper Data LVM2 -Y-persistent-
yum the install docker -Y-CE  # mounting docker

Creating / etc / docker directory
[! -D / etc / docker] && mkdir / etc / docker

配置daemon
cat > /etc/docker/daemon.json <<EOF
{
 "exec-opts": ["native.cgroupdriver=systemd"],
 "log-driver": "json-file",
 "log-opts": {
  "max-size": "100m"
 }
}

 

修改docker.service文件
/usr/lib/systemd/system/docker.service 
ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry 0.0.0.0/0 -H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375 --containerd=/run/containerd/containerd.sock

# 重启docker服务
systemctl daemon-reload && systemctl restart docker && systemctl enable docker

14, image storage and installation initialization
docker run -d -p 5000: 5000 --restart = always --name private-docker-registry --privileged = true -v / data / registry: / var / lib / registry 192.168.23.100 : 5000 / registry: v1

flannel network mirror package
links: https: //pan.baidu.com/s/1-DYxDoU2X85aobaGFclKfA 
extraction code: nson

k8s base image packet
link: https: //pan.baidu.com/s/17uV90VPXqoaezwccpTj2GQ 
extraction code: 13t3 

导入镜像
[root@k8smaster k8s_image]# more load_image.sh 
#!/bin/bash
ls /home/zhaiky/k8s_image|grep -v load > /tmp/image-list.txt
cd /home/zhaiky/k8s_image
for i in $( cat /tmp/image-list.txt )
do
    docker load -i $i
done
rm -rf /tmp/image-list.txt

上传镜像到私有仓库
docker push 192.168.23.100:5000/kube-apiserver:v1.15.1
docker push 192.168.23.100:5000/kube-proxy:v1.15.1
docker push 192.168.23.100:5000/kube-controller-manager:v1.15.1
docker push 192.168.23.100:5000/kube-scheduler:v1.15.1
docker push 192.168.23.100:5000/registry:v1
docker push 192.168.23.100:5000/coreos/flannel:v0.11.0-s390x
docker push 192.168.23.100:5000/coreos/flannel:v0.11.0-ppc64le
docker push 192.168.23.100:5000/coreos/flannel:v0.11.0-arm64
docker push 192.168.23.100:5000/coreos/flannel:v0.11.0-arm
docker push 192.168.23.100:5000/coreos/flannel:v0.11.0-amd64
docker push 192.168.23.100:5000/coredns:1.3.1
docker push 192.168.23.100:5000/etcd:3.3.10
docker push 192.168.23.100:5000/pause:3.1

15,安装kubeadm, omelets, kubectl
yum -y install kubeadm-1.15.1-omelet-kubectl 1.15.1 1.15.1
systemctl enable omelet omelet start && systemctl

16, kubectl enable auto-completion command
#-Completion install and configure the bash
yum the install the bash -Y-Completion
echo 'Source / usr / Share / the bash-Completion / bash_completion' >> / etc / Profile
Source / etc / Profile
echo "Source <(kubectl Completion the bash)" >> ~ / .bashrc
Source ~ / .bashrc

17, Master initialization
configuration package, and comprising kubeadm-config.yaml kube-flannel.yml are inside
link: https: //pan.baidu.com/s/1g0G7Ion0n6lERpluNjh_9A 
extraction code: 6pxt 

[root@k8smaster ~]# cp /home/zhaiky/kubeadm-config.yaml .
kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log

Key Logging
Your Kubernetes control-plane has initialized successfully !

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.23.100:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:78c3f1e110ed1f954665ba55a689397c2dc4d35243dc4516dd00b0bac97172f6 

18, plug mounting flannel network
[k8smaster the root @ ~] # CP /home/zhaiky/kube-flannel.yml.
[K8smaster the root @ ~] # kubectl Create -f Kube-flannel.yml    

19、将k8s子节点加入到k8s主节点
kubeadm join 192.168.23.100:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:78c3f1e110ed1f954665ba55a689397c2dc4d35243dc4516dd00b0bac97172f6 
[root@k8smaster zhaiky]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}
[root@k8smaster ~]# kubectl get node
NAME        STATUS     ROLES    AGE     VERSION
k8smaster   Ready      master   4m58s   v1.15.1
k8snode01   NotReady   <none>   21s     v1.15.1
k8snode02   NotReady   <none>   16s     v1.15.1
[root@k8smaster ~]#

20、简单操作
使用k8s运行一个nginx实例
[root@k8smaster ~]# kubectl run nginx --image=192.168.23.100:5000/nginx:v1 --port=80  --replicas=1
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
[root@k8smaster ~]# 
[root@k8smaster ~]# kubectl get pod -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE        NOMINATED NODE   READINESS GATES
nginx-5bbb49fb76-xzj6x   1/1     Running   0          59s   10.244.1.2   k8snode01   <none>           <none>
[root@k8smaster ~]# 

[root@k8smaster ~]# kubectl get deployment
NAME    READY   UP-TO-DATE   AVAILABLE   AGE
nginx   1/1     1            1           2m15s
[root@k8smaster ~]# 
[root@k8smaster ~]# curl "http://10.244.1.2"
<title>Welcome to nginx!</title>
[root@k8smaster ~]# kubectl expose deployment nginx --port=80 --type=LoadBalancer
service/nginx exposed
[root@k8smaster ~]# kubectl get service
NAME         TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP      10.96.0.1       <none>        443/TCP        14h
nginx        LoadBalancer   10.99.225.215   <pending>     80:32461/TCP   13s
[root@k8smaster ~]# 
[root@k8smaster ~]# curl "http://192.168.23.101:32461"
<title>Welcome to nginx!</title>
[root@k8smaster ~]# curl "http://10.99.225.215"
<title>Welcome to nginx!</title>

Published 60 original articles · won praise 20 · views 4571

Guess you like

Origin blog.csdn.net/zhaikaiyun/article/details/104273974