kubeadm安装kubernetes单主集群


https://blog.csdn.net/networken/article/details/84991940
#master节点:
hostnamectl set-hostname k8s-master
#node1节点:
hostnamectl set-hostname k8s-node1
#node2节点:
hostnamectl set-hostname k8s-node2


#!/bin/bash
#修改/etc/hosts文件
cat >> /etc/hosts << EOF
192.168.1.132 k8s-master
192.168.1.129 k8s-node1
192.168.1.130 k8s-node2
192.168.1.132 harbor
EOF
ssh-keygen -t rsa
for n in `seq -w 1 2`;do ssh-copy-id k8s-node$n;done

#关闭防火墙和selinux
systemctl stop firewalld && systemctl disable firewalld
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config && setenforce 0
#关闭swap
swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab

#####################master
#!/bin/bash
#安装chrony:
yum install -y chrony
#注释默认ntp服务器
sed -i 's/^server/#&/' /etc/chrony.conf
#指定上游公共 ntp 服务器,并允许其他节点同步时间
cat >> /etc/chrony.conf << EOF
server 0.asia.pool.ntp.org iburst
server 1.asia.pool.ntp.org iburst
server 2.asia.pool.ntp.org iburst
server 3.asia.pool.ntp.org iburst
allow all
EOF
#重启chronyd服务并设为开机启动:
systemctl enable chronyd && systemctl restart chronyd
#开启网络时间同步功能
timedatectl set-ntp true

#####################note 
#安装chrony:
yum install -y chrony
#注释默认服务器
sed -i 's/^server/#&/' /etc/chrony.conf
#指定内网 master节点为上游NTP服务器
echo server 192.168.1.128 iburst >> /etc/chrony.conf
#重启服务并设为开机启动:
systemctl enable chronyd && systemctl restart chronyd

#检查时间同步
timedatectl
#注意NTP synchronized:yes值,只有时间服务器自己同步完成时间之后,才能为其它服务器提供时间同步服务。
chronyc sources -v
#时间同步源

#kube-proxy开启ipvs的前提条件
#由于ipvs已经加入到了内核的主干,所以为kube-proxy开启ipvs的前提需要加载以下的内核模块:
#在所有的Kubernetes节点执行以下脚本:
#!/bin/bash
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
#执行脚本
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
lsmod | grep -e ip_vs -e nf_conntrack_ipv4

yum install ipset ipvsadm -y

cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

wget http://mirrors.aliyun.com/repo/Centos-7.repo -O /etc/yum.repos.d/CentOS-Base.repo
wget http://mirrors.aliyun.com/repo/epel-7.repo -O /etc/yum.repos.d/epel.repo 
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

yum -y install kubernetes-cni = 0.6.0
yum install -y kubelet-1.16.1 kubeadm-1.16.1 kubectl-1.16.1 ipvsadm ipset docker-ce-18.06.1.ce

systemctl enable docker && systemctl start docker
systemctl enable kubelet && systemctl start kubelet

# kubeadm init \
#     --apiserver-advertise-address=192.168.1.128 \
#     --image-repository registry.aliyuncs.com/google_containers \
#     --kubernetes-version v1.16.1 \
#     --pod-network-cidr=10.244.0.0/16

cat >/etc/docker/daemon.json <<EOF
{
    "exec-opts":["native.cgroupdriver=systemd"],#配置systemd的cgroup隔离方式
    "log-driver":"json-file",#把日志的存储方式改为json-file的类型
    "log-opts":{
        "max-size":"100m"#最大大小为100m
    }
}
EOF
systemctl daemon-reload && systemctl restart docker && systemctl enable docker

#下载镜像
docker pull mirrorgooglecontainers/kube-apiserver-amd64:v1.16.1 
docker pull mirrorgooglecontainers/kube-controller-manager-amd64:v1.16.1 
docker pull gcr.azk8s.cn/google_containers/etcd-amd64:3.3.15
docker pull mirrorgooglecontainers/kube-scheduler-amd64:v1.16.1 
docker pull mirrorgooglecontainers/kube-proxy-amd64:v1.16.1
docker pull coredns/coredns:1.6.2 
docker pull mirrorgooglecontainers/pause-amd64:3.1
#修改标签
docker tag mirrorgooglecontainers/kube-controller-manager-amd64:v1.16.1 k8s.gcr.io/kube-controller-manager:v1.16.0
docker tag mirrorgooglecontainers/kube-apiserver-amd64:v1.16.1 k8s.gcr.io/kube-apiserver:v1.16.0
docker tag mirrorgooglecontainers/kube-proxy-amd64:v1.16.1 k8s.gcr.io/kube-proxy:v1.16.0
docker tag mirrorgooglecontainers/kube-scheduler-amd64:v1.16.1 k8s.gcr.io/kube-scheduler:v1.16.0
docker tag coredns/coredns:1.6.2 k8s.gcr.io/coredns:1.6.2
docker tag gcr.azk8s.cn/google_containers/etcd-amd64:3.3.15 k8s.gcr.io/etcd:3.3.15-0
docker tag mirrorgooglecontainers/pause-amd64:3.1 k8s.gcr.io/pause:3.1

docker images | grep k8s

mkdir ~/install-k8s/
mkdir ~/install-k8s/core
mkdir ~/install-k8s/plugin
cd ~/install-k8s/core
kubeadm config print init-defaults > kubeadm-config.yaml
cat -n kubeadm-config.yaml|sed -n '12p;34p;37p;40,45p'
#配置文件末尾添加
--- 
apiVersion: kubeproxy.config.k8s.io/v1alpha1 
kind:KubeProxyConfiguration 
featureGates: 
  SupportIPVSProxyMode:true
mode: ipvs

#集群初始化
kubeadm init --config=kubeadm-config.yaml | tee kubeadm-init.log

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

#node节点加入之前,将镜像导入node
kubeadm join 192.168.1.131:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:022226e355198e32ecc4e735ae39f4b10a8eba8ad782aa66bac448d881684d94
#清除节点
kubectl delete node k8s-node2
kubeadm reset

#安装flannel
cd ~/install-k8s/plugin
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl create -f kube-flannel.yml

kubectl get node --all-namespaces -o wide

#查看日志
journalctl -u docker.service
#查看节点信息
kubectl describe pod coredns-5644d7b6d9-5rfc8 --namespace=kube-system

猜你喜欢

转载自www.cnblogs.com/orange-lsc/p/11712486.html