kubernets安装文档

主机信息及节点介绍:

master01

192.168.3.120

master + etcd (2c4g)

master02

192.168.3.121

master + etcd (2c4g)

master03

192.168.3.122

master + etcd (2c4g)

node01

192.168.3.321

node

 

软件版本:

RPM_KUBEADM="kubeadm-1.9.1-0.x86_64.rpm"

RPM_KUBECTL="kubectl-1.9.1-0.x86_64.rpm"

RPM_KUBELET="kubelet-1.9.1-0.x86_64.rpm"

RPM_KUBECNI="kubernetes-cni-0.6.0-0.x86_64.rpm"

RPM_SOCAT="socat-1.7.3.2-2.el7.x86_64.rpm"

 

主机名修改:

192.168.3.120# hostnamectl set-hostname master01 192.168.3.121# hostnamectl set-hostname master02 192.168.3.122# hostnamectl set-hostname master03 192.168.3.231# hostnamectl set-hostname node01

 

每台主机上面都执行:

cat <<EOF > /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.3.120 master01 192.168.3.121 master02 192.168.3.122 master03 192.168.3.231 node01 EOF

 

master01上执行免密码登陆,发送到每一个节点上:

ssh-keygen #一路回车即可 ssh-copy-id master02 ssh-copy-id master03 ssh-copy-id node01

 

四台主机配置、停防火墙、关闭Swap、关闭Selinux、设置内核、配置ntp(配置完后重启一次):

systemctl stop firewalld systemctl disable firewalld swapoff -a sed -i 's/.*swap.*/#&/' /etc/fstab setenforce 0 sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config

 

systemctl enable ntpdate.service echo '*/30 * * * * /usr/sbin/ntpdate time7.aliyun.com >/dev/null 2>&1' > /tmp/crontab2.tmp crontab /tmp/crontab2.tmp systemctl start ntpdate.service echo "* soft nofile 65536" >> /etc/security/limits.conf echo "* hard nofile 65536" >> /etc/security/limits.conf echo "* soft nproc 65536" >> /etc/security/limits.conf echo "* hard nproc 65536" >> /etc/security/limits.conf echo "* soft memlock unlimited" >> /etc/security/limits.conf echo "* hard memlock unlimited" >> /etc/security/limits.conf

创建etcd证书(node01上执行即可)

设置cfssl环境:

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 chmod +x cfssl_linux-amd64 mv cfssl_linux-amd64 /usr/local/bin/cfssl chmod +x cfssljson_linux-amd64 mv cfssljson_linux-amd64 /usr/local/bin/cfssljson chmod +x cfssl-certinfo_linux-amd64 mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo export PATH=/usr/local/bin:$PATH

创建 CA 配置文件(下面配置的IP为etc节点的IP):

mkdir /root/ssl cd /root/ssl cat > ca-config.json <<EOF { "signing": { "default": { "expiry": "8760h" }, "profiles": { "kubernetes-Soulmate": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "8760h" } } } } EOF

cat > ca-csr.json <<EOF { "CN": "kubernetes-Soulmate", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "shanghai", "L": "shanghai", "O": "k8s", "OU": "System" } ] } EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca cat > etcd-csr.json <<EOF { "CN": "etcd", "hosts": [ "127.0.0.1", "192.168.3.120", "192.168.3.121", "192.168.3.122" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "shanghai", "L": "shanghai", "O": "k8s", "OU": "System" } ] } EOF cfssl gencert -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes-Soulmate etcd-csr.json | cfssljson -bare etcd

master01分发etcd证书到master02、master03上面

mkdir -p /etc/etcd/ssl cp etcd.pem etcd-key.pem ca.pem /etc/etcd/ssl/ ssh -n node02 "mkdir -p /etc/etcd/ssl && exit" ssh -n node03 "mkdir -p /etc/etcd/ssl && exit" scp -r /etc/etcd/ssl/*.pem node02:/etc/etcd/ssl/ scp -r /etc/etcd/ssl/*.pem node03:/etc/etcd/ssl/

安装配置etcd (三主节点)

安装etcd

yum install etcd -y mkdir -p /var/lib/etcd

master01的etcd.service,master02跟master03一样的配置把对应的IP修改为本机IP

cat <<EOF >/etc/systemd/system/etcd.service [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ ExecStart=/usr/bin/etcd \ --name node01 \ --cert-file=/etc/etcd/ssl/etcd.pem \ --key-file=/etc/etcd/ssl/etcd-key.pem \ --peer-cert-file=/etc/etcd/ssl/etcd.pem \ --peer-key-file=/etc/etcd/ssl/etcd-key.pem \ --trusted-ca-file=/etc/etcd/ssl/ca.pem \ --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \ --initial-advertise-peer-urls https://192.168.3.120:2380 \ --listen-peer-urls https://192.168.3.120:2380 \ --listen-client-urls https://192.168.3.120:2379,http://127.0.0.1:2379 \ --advertise-client-urls https://192.168.3.120:2379 \ --initial-cluster-token etcd-cluster-0 \ --initial-cluster master01=https://192.168.3.120:2380,master02=https://192.168.3.121:2380,master03=https://192.168.3.122:2380 \ --initial-cluster-state new \ --data-dir=/var/lib/etcd Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF

 

添加自启动(etc集群最少2个节点才能启动,启动报错看mesages日志)

mv etcd.service /usr/lib/systemd/system/ systemctl daemon-reload systemctl enable etcd systemctl start etcd systemctl status etcd

在三个etcd节点执行一下命令检查

etcdctl --endpoints=https://192.168.3.120:2379,https://192.168.3.121:2379,https://192.168.3.122:2379 \ --ca-file=/etc/etcd/ssl/ca.pem \ --cert-file=/etc/etcd/ssl/etcd.pem \ --key-file=/etc/etcd/ssl/etcd-key.pem cluster-health

docker_install()

{

yum clean all

yum makecache

yum install -y yum-utils

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum-config-manager --enable docker-ce-edge

yum install -y ebtables wget yum-utils device-mapper-persistent-data lvm2

yum install -y docker-ce

echo "Docker installed successfully!"

#docker存储目录

if [ ! -n "$DOCKER_GRAPH" ]; then

export DOCKER_GRAPH="/mnt/docker"

fi

#docker加速器

if [ ! -n "$DOCKER_MIRRORS" ]; then

export DOCKER_MIRRORS="https://5md0553g.mirror.aliyuncs.com"

fi

if [ ! -d "/etc/docker" ]; then

mkdir -p /etc/docker

fi

# 配置加速器

cat > /etc/docker/daemon.json <<EOF

{

"registry-mirrors": ["${DOCKER_MIRRORS}"],

"graph":"${DOCKER_GRAPH}"

}

EOF

echo "Config docker success!"

systemctl daemon-reload

systemctl enable docker

systemctl start docker

echo "Docker start successfully!"

}

安装kubernetes的rpm包

kube_install()

{

cat >> /etc/sysctl.d/k8s.conf <<EOF

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

vm.swappiness=0

EOF

modprobe br_netfilter

# 生效配置

sysctl -p /etc/sysctl.d/k8s.conf

echo "Network configuration success!"

#kubelet kubeadm kubectl kubernetes-cni安装包

 

if [ ! -n "$KUBE_VERSION" ]; then

export KUBE_VERSION="1.9.1"

fi

if [ ! -n "$KUBE_CNI_VERSION" ]; then

export KUBE_CNI_VERSION="0.6.0"

fi

if [ ! -n "$SOCAT_VERSION" ]; then

export SOCAT_VERSION="1.7.3.2"

fi

export OSS_URL="http://centos-k8s.oss-cn-hangzhou.aliyuncs.com/rpm/"${KUBE_VERSION}"/"

export RPM_KUBEADM="kubeadm-"${KUBE_VERSION}"-0.x86_64.rpm"

export RPM_KUBECTL="kubectl-"${KUBE_VERSION}"-0.x86_64.rpm"

export RPM_KUBELET="kubelet-"${KUBE_VERSION}"-0.x86_64.rpm"

export RPM_KUBECNI="kubernetes-cni-"${KUBE_CNI_VERSION}"-0.x86_64.rpm"

export RPM_SOCAT="socat-"${SOCAT_VERSION}"-2.el7.x86_64.rpm"

 

export RPM_KUBEADM_URL=${OSS_URL}${RPM_KUBEADM}

export RPM_KUBECTL_URL=${OSS_URL}${RPM_KUBECTL}

export RPM_KUBELET_URL=${OSS_URL}${RPM_KUBELET}

export RPM_KUBECNI_URL=${OSS_URL}${RPM_KUBECNI}

export RPM_SOCAT_URL=${OSS_URL}${RPM_SOCAT}

 

if [ ! -n "$ETCD_VERSION" ]; then

export ETCD_VERSION="3.1.10"

fi

if [ ! -n "$PAUSE_VERSION" ]; then

export PAUSE_VERSION="3.0"

fi

if [ ! -n "$FLANNEL_VERSION" ]; then

export FLANNEL_VERSION="v0.9.1"

fi

export KUBE_REPO_PREFIX=registry.cn-hangzhou.aliyuncs.com/szss_k8s

 

 

#下载安装包

if [ ! -f $PWD"/"$RPM_KUBEADM ]; then

wget $RPM_KUBEADM_URL

fi

if [ ! -f $PWD"/"$RPM_KUBECTL ]; then

wget $RPM_KUBECTL_URL

fi

if [ ! -f $PWD"/"$RPM_KUBELET ]; then

wget $RPM_KUBELET_URL

fi

if [ ! -f $PWD"/"$RPM_KUBECNI ]; then

wget $RPM_KUBECNI_URL

fi

if [ ! -f $PWD"/"$RPM_SOCAT ]; then

wget $RPM_SOCAT_URL

fi

rpm -ivh $PWD"/"$RPM_KUBECNI $PWD"/"$RPM_SOCAT $PWD"/"$RPM_KUBEADM $PWD"/"$RPM_KUBECTL $PWD"/"$RPM_KUBELET

echo "kubelet kubeadm kubectl kubernetes-cni installed successfully!"

 

sed -i 's/cgroup-driver=systemd/cgroup-driver=cgroupfs/g' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf

echo "config cgroup-driver=cgroupfs success!"

 

export KUBE_PAUSE_IMAGE=${KUBE_REPO_PREFIX}"/pause-amd64:${PAUSE_VERSION}"

 

cat > /etc/systemd/system/kubelet.service.d/20-pod-infra-image.conf <<EOF

[Service]

Environment="KUBELET_EXTRA_ARGS=--pod-infra-container-image=${KUBE_PAUSE_IMAGE}"

EOF

echo "config --pod-infra-container-image=${KUBE_PAUSE_IMAGE} success!"

 

systemctl daemon-reload

systemctl enable kubelet

systemctl start kubelet

echo "Kubelet installed successfully!"

}

 

初始化集群

master01、master02、master03添加集群初始配置文件(集群配置文件一样)

kubeadm init --config /etc/kubernetes/kubeadm.conf

cat <<EOF > /etc/kubernetes/kubeadm.conf apiVersion: kubeadm.k8s.io/v1alpha1 kind: MasterConfiguration

kubernetesVersion: 1.9.1

etcd: endpoints: - https://192.168.3.120:2379 - https://192.168.3.121:2379 - https://192.168.3.122:2379 caFile: /etc/etcd/ssl/ca.pem certFile: /etc/etcd/ssl/etcd.pem keyFile: /etc/etcd/ssl/etcd-key.pem dataDir: /var/lib/etcd networking: podSubnet: 10.244.0.0/16 #kubeadmin init –hlep可以看出,service默认网段是10.96.0.0/12 api: advertiseAddress: "192.168.3.120" token: "b99a00.a144ef80536d4344" tokenTTL: "0s" apiServerCertSANs: - master01 - master02 - master03 - 192.168.3.120 - 192.168.3.121 - 192.168.3.122 - 192.168.3.231 featureGates: CoreDNS: true imageRepository: "registry.cn-hangzhou.aliyuncs.com/k8sth" EOF

 

master01上面执行如下命令

 

# $HOME/.kube目录不存在就创建

if [ ! -d "$HOME/.kube" ]; then

mkdir -p $HOME/.kube

fi

 

# $HOME/.kube/config文件存在就删除

if [ -f "$HOME/.kube/config" ]; then

rm -rf $HOME/.kube/config

fi

 

cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

chown $(id -u):$(id -g) $HOME/.kube/config

echo "Config admin success!"

 

kubeadm生成证书密码文件分发到master02和master03上面去

 

scp -r /etc/kubernetes/pki master03:/etc/kubernetes/ scp -r /etc/kubernetes/pki master02:/etc/kubernetes/

部署flannel网络,只需要在master01执行就行

 

 

if [ -f "$HOME/kube-flannel.yml" ]; then

rm -rf $HOME/kube-flannel.yml

fi

wget -P $HOME/ https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

kubectl --namespace kube-system apply -f $HOME/kube-flannel.yml

 

 

 

猜你喜欢

转载自blog.csdn.net/yaodunlin/article/details/88052956