一、环境说明
操作系统 | 主机名 | 节点及功能 | IP | 备注 |
OLE7.5 X86_64 | K8S-master | Master/etcd/registry | 192.168.168.2 | etcd/docker/kube-apiserver/kube-controller-manager/kube-scheduler/flannel |
OLE7.5 X86_64 | K8S-node01 | Node01 | 192.168.168.3 | etcd/docker/kube-proxy/kubelet/flannel |
OLE7.5 X86_64 | K8S-node02 | Node02 | 192.168.168.4 | etcd/docker/kube-proxy/kubelet/flannel |
二、3台主机安装前准备
1)更新软件包和内核
yum -y update
2) 关闭防火墙
systemclt disable firewalld.service
3) 关闭SELinux
vi /etc/selinux/config
改SELINUX=enforcing为SELINUX=disabled
4)安装常用网络命令
yum -y install net-tools
5) 安装ntpdate
yum -y install ntpdate
三、修改三台主机命名
1) K8S-master
hostnamectl --static set-hostname k8s-master
2) K8S-node01
hostnamectl --static set-hostname k8s-node01
3) K8S-node02
hostnamectl --static set-hostname k8s-node02
1.修改hosts
vi /etc/hosts
echo '192.168.168.2 k8s-master 192.168.168.2 etcd 192.168.168.2 registry 192.168.168.3 k8s-node01 192.168.168.4 k8s-node02' >> /etc/hosts
2.下载etcd for OLE7 rpm包
3.安装etcd
rpm -ivh etcd-3.2.18-1.el7.x86_64.rpm
4.k8s-master(192.168.168.2)编译etcd.conf文件
vi /etc/etcd/ectd.conf
#[Member] #ETCD_CORS="" ETCD_DATA_DIR="/var/lib/etcd/k8s-master.etcd" #ETCD_WAL_DIR="" ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380" ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379,http://0.0.0.0:4001" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" ETCD_NAME="k8s-master" #ETCD_SNAPSHOT_COUNT="100000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" #ETCD_QUOTA_BACKEND_BYTES="0" #ETCD_MAX_REQUEST_BYTES="1572864" #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" # #[Clustering] #ETCD_INITIAL_ADVERTISE_PEER_URLS="http://localhost:2380" ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.168.2:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.168.2:2379,http://192.168.168.2:4001" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" #ETCD_DISCOVERY_SRV="" ETCD_INITIAL_CLUSTER="k8s-master=http://192.168.168.2:2380,k8s-node01=http://192.168.168.3:2380,k8s-node02=http://192.168.168.4:2380" #ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" #ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_STRICT_RECONFIG_CHECK="true" #ETCD_ENABLE_V2="true" # #[Proxy] #ETCD_PROXY="off" #ETCD_PROXY_FAILURE_WAIT="5000" #ETCD_PROXY_REFRESH_INTERVAL="30000" #ETCD_PROXY_DIAL_TIMEOUT="1000" #ETCD_PROXY_WRITE_TIMEOUT="5000" #ETCD_PROXY_READ_TIMEOUT="0" # #[Security] #ETCD_CERT_FILE="" #ETCD_KEY_FILE="" #ETCD_CLIENT_CERT_AUTH="false" #ETCD_TRUSTED_CA_FILE="" #ETCD_AUTO_TLS="false" #ETCD_PEER_CERT_FILE="" #ETCD_PEER_KEY_FILE="" #ETCD_PEER_CLIENT_CERT_AUTH="false" #ETCD_PEER_TRUSTED_CA_FILE="" #ETCD_PEER_AUTO_TLS="false" # #[Logging] #ETCD_DEBUG="false" #ETCD_LOG_PACKAGE_LEVELS="" #ETCD_LOG_OUTPUT="default" # #[Unsafe] #ETCD_FORCE_NEW_CLUSTER="false" # #[Version] #ETCD_VERSION="false" #ETCD_AUTO_COMPACTION_RETENTION="0" # #[Profiling] #ETCD_ENABLE_PPROF="false" #ETCD_METRICS="basic" # #[Auth] #ETCD_AUTH_TOKEN="simple"
5.k8s-node01(192.168.168.3)编译etcd.conf文件
vi /etc/etcd/ectd.conf
#[Member] #ETCD_CORS="" ETCD_DATA_DIR="/var/lib/etcd/k8s-node01.etcd" #ETCD_WAL_DIR="" ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380" ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379,http://0.0.0.0:4001" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" ETCD_NAME="k8s-node01" #ETCD_SNAPSHOT_COUNT="100000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" #ETCD_QUOTA_BACKEND_BYTES="0" #ETCD_MAX_REQUEST_BYTES="1572864" #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" # #[Clustering] #ETCD_INITIAL_ADVERTISE_PEER_URLS="http://localhost:2380" ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.168.3:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.168.3:2379,http://192.168.168.3:4001" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" #ETCD_DISCOVERY_SRV="" ETCD_INITIAL_CLUSTER="k8s-master=http://192.168.168.2:2380,k8s-node01=http://192.168.168.3:2380,k8s-node02=http://192.168.168.4:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_STRICT_RECONFIG_CHECK="true" #ETCD_ENABLE_V2="true" # #[Proxy] #ETCD_PROXY="off" #ETCD_PROXY_FAILURE_WAIT="5000" #ETCD_PROXY_REFRESH_INTERVAL="30000" #ETCD_PROXY_DIAL_TIMEOUT="1000" #ETCD_PROXY_WRITE_TIMEOUT="5000" #ETCD_PROXY_READ_TIMEOUT="0" # #[Security] #ETCD_CERT_FILE="" #ETCD_KEY_FILE="" #ETCD_CLIENT_CERT_AUTH="false" #ETCD_TRUSTED_CA_FILE="" #ETCD_AUTO_TLS="false" #ETCD_PEER_CERT_FILE="" #ETCD_PEER_KEY_FILE="" #ETCD_PEER_CLIENT_CERT_AUTH="false" #ETCD_PEER_TRUSTED_CA_FILE="" #ETCD_PEER_AUTO_TLS="false" # #[Logging] #ETCD_DEBUG="false" #ETCD_LOG_PACKAGE_LEVELS="" #ETCD_LOG_OUTPUT="default" # #[Unsafe] #ETCD_FORCE_NEW_CLUSTER="false" # #[Version] #ETCD_VERSION="false" #ETCD_AUTO_COMPACTION_RETENTION="0" # #[Profiling] #ETCD_ENABLE_PPROF="false" #ETCD_METRICS="basic" # #[Auth] #ETCD_AUTH_TOKEN="simple"
6.k8s-node02编译etcd.conf文件
vi /etc/etcd/ectd.conf
#[Member] #ETCD_CORS="" ETCD_DATA_DIR="/var/lib/etcd/k8s-node02.etcd" #ETCD_WAL_DIR="" ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380" ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379,http://0.0.0.0:4001" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" ETCD_NAME="k8s-node02" #ETCD_SNAPSHOT_COUNT="100000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" #ETCD_QUOTA_BACKEND_BYTES="0" #ETCD_MAX_REQUEST_BYTES="1572864" #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" # #[Clustering] #ETCD_INITIAL_ADVERTISE_PEER_URLS="http://localhost:2380" ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.168.4:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.168.4:2379,http://192.168.168.4:4001" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" #ETCD_DISCOVERY_SRV="" ETCD_INITIAL_CLUSTER="k8s-master=http://192.168.168.2:2380,k8s-node01=http://192.168.168.3:2380,k8s-node02=http://192.168.168.4:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_STRICT_RECONFIG_CHECK="true" #ETCD_ENABLE_V2="true" # #[Proxy] #ETCD_PROXY="off" #ETCD_PROXY_FAILURE_WAIT="5000" #ETCD_PROXY_REFRESH_INTERVAL="30000" #ETCD_PROXY_DIAL_TIMEOUT="1000" #ETCD_PROXY_WRITE_TIMEOUT="5000" #ETCD_PROXY_READ_TIMEOUT="0" # #[Security] #ETCD_CERT_FILE="" #ETCD_KEY_FILE="" #ETCD_CLIENT_CERT_AUTH="false" #ETCD_TRUSTED_CA_FILE="" #ETCD_AUTO_TLS="false" #ETCD_PEER_CERT_FILE="" #ETCD_PEER_KEY_FILE="" #ETCD_PEER_CLIENT_CERT_AUTH="false" #ETCD_PEER_TRUSTED_CA_FILE="" #ETCD_PEER_AUTO_TLS="false" # #[Logging] #ETCD_DEBUG="false" #ETCD_LOG_PACKAGE_LEVELS="" #ETCD_LOG_OUTPUT="default" # #[Unsafe] #ETCD_FORCE_NEW_CLUSTER="false" # #[Version] #ETCD_VERSION="false" #ETCD_AUTO_COMPACTION_RETENTION="0" # #[Profiling] #ETCD_ENABLE_PPROF="false" #ETCD_METRICS="basic" # #[Auth] #ETCD_AUTH_TOKEN="simple"
7.启动etcd并设置开机自动启动
systemctl enable etcd systemctl start etcd.service
# etcd --version //查看etcd安装版本 etcd Version: 3.2.18 Git SHA: eddf599 Go Version: go1.9.4 Go OS/Arch: linux/amd64
# etcdctl member list //查看集群实力状态 4cafd7b2cc7ff9f0: name=k8s-node02 peerURLs=http://192.168.168.4:2380 clientURLs=http://192.168.168.4:2379,http://192.168.168.4:4001 isLeader=false 8f7cc7a0e8df6229: name=k8s-master peerURLs=http://192.168.168.2:2380 clientURLs=http://192.168.168.2:2379,http://192.168.168.2:4001 isLeader=true a02cf1d23cbc3507: name=k8s-node01 peerURLs=http://192.168.168.3:2380 clientURLs=http://192.168.168.3:2379,http://192.168.168.3:4001 isLeader=false
# etcdctl cluster-health //查看etcd集群状态 member 4cafd7b2cc7ff9f0 is healthy: got healthy result from http://192.168.168.4:2379 member 8f7cc7a0e8df6229 is healthy: got healthy result from http://192.168.168.2:2379 member a02cf1d23cbc3507 is healthy: got healthy result from http://192.168.168.3:2379 cluster is healthy在etcd master进行操作数据
etcdctl set name testk8s
在node01和node02节点查看
etcdctl get name
如显示testk8s则etcd集群正常
五、3台主机上安装docker-engine
详细步骤见“Oracle Linux7安装Docker”
systemctl enable docker.service systemctl start docker.service
六、Kubernetes安装配置
软件包别表: kubeadm-1.8.4-2.0.1.el7.x86_64.rpm kubectl-1.8.4-2.0.1.el7.x86_64.rpm kubernetes-1.8.4-2.0.1.el7.x86_64.rpm kubernetes-client-1.8.4-2.0.1.el7.x86_64.rpm kubernetes-cni-0.5.2-2.0.2.el7.x86_64.rpm kubernetes-dns-1.14.5-2.0.1.el7.x86_64.rpm kubernetes-master-1.8.4-2.0.1.el7.x86_64.rpm kubernetes-node-1.8.4-2.0.1.el7.x86_64.rpm
2.安装Kubernetes rpm包
rpm -ivh --nodeps --force kubeadm-1.8.4-2.0.1.el7.x86_64.rpm kubectl-1.8.4-2.0.1.el7.x86_64.rpm kubernetes-1.8.4-2.0.1.el7.x86_64.rpm kubernetes-client-1.8.4-2.0.1.el7.x86_64.rpm kubernetes-cni-0.5.2-2.0.2.el7.x86_64.rpm kubernetes-dns-1.14.5-2.0.1.el7.x86_64.rpm kubernetes-master-1.8.4-2.0.1.el7.x86_64.rpm kubernetes-node-1.8.4-2.0.1.el7.x86_64.rpm
3.master节点配置
vi /etc/kubernetes/apiserver
### # kubernetes system config # # The following values are used to configure the kube-apiserver # # The address on the local server to listen to. #KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0" # The port on the local server to listen on. # KUBE_API_PORT="--port=8080" KUBE_API_PORT="--port=8080" # Port minions listen on # KUBELET_PORT="--kubelet-port=10250" KUBELET_PORT="--kubelet-port=10250" # Comma separated list of nodes in the etcd cluster KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.168.2:2379,http://192.168.168.3:2379,http://192.168.168.4:2379" # Address range to use for services #KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=192.168.200.0/24" //此为docker容器地址段 # default admission control policies #KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota" KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota" # Add your own! KUBE_API_ARGS=""
vi /etc/kubernetes/config
### # kubernetes system config # # The following values are used to configure various aspects of all # kubernetes services, including # # kube-apiserver.service # kube-controller-manager.service # kube-scheduler.service # kubelet.service # kube-proxy.service # logging to stderr means we get it in the systemd journal KUBE_LOGTOSTDERR="--logtostderr=true" # journal message level, 0 is debug KUBE_LOG_LEVEL="--v=0" # Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV="--allow-privileged=false" # How the controller-manager, scheduler, and proxy find the apiserver KUBE_MASTER="--master=http://192.168.168.2:8080"
启动服务
systemctl start kube-apiserver systemctl start kube-controller-manager systemctl start kube-scheduler systemctl enable kube-apiserver systemctl enable kube-controller-manager systemctl enable kube-scheduler
4.node01和node02节点配置
vi /etc/kubernetes/config
### # kubernetes system config # # The following values are used to configure various aspects of all # kubernetes services, including # # kube-apiserver.service # kube-controller-manager.service # kube-scheduler.service # kubelet.service # kube-proxy.service # logging to stderr means we get it in the systemd journal KUBE_LOGTOSTDERR="--logtostderr=true" # journal message level, 0 is debug KUBE_LOG_LEVEL="--v=0" # Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV="--allow-privileged=false" # How the controller-manager, scheduler, and proxy find the apiserver KUBE_MASTER="--master=http://192.168.168.2:8080"
vi /etc/kubernetes/kubelet
### # kubernetes kubelet (minion) config # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) KUBELET_ADDRESS="--address=127.0.0.1" # The port for the info server to serve on # KUBELET_PORT="--port=10250" KUBELET_PORT="--port=10250" # You may leave this blank to use the actual hostname KUBELET_HOSTNAME="--hostname-override=192.168.168.3" //此处node02节点为192.168.168.4 # location of the api-server KUBELET_API_SERVER="--api-servers=http://192.168.168.2:8080" # pod infrastructure container KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest" # Add your own! KUBELET_ARGS=""
启动服务
systemctl enable kubelet.service systemctl enable kube-proxy.service systemctl start kubelet.service systemctl start kube-proxy.service