centos7 使用二进制包搭建kubernetes 1.11 集群

OS:CentOS Linux release 7.5.1804 (Core)


节点名              主机名            IP地址 

etcd                 etcd                 192.168.98.18 

k8s-master      k8s-master      192.168.98.18 

k8s-node1       k8s-node1       192.168.98.19

k8s-node2       k8s-node2       192.168.98.20 


三台机器之间做好时间同步:


yum install ntpdate  -y  
systemctl start ntpdate 
systemctl enable ntpdate

  

master 上部署以下服务

etcd , flanneld ,kubernetes-server(kube-apiserver, kube-controller-manager, kube-scheduler)


一,配置ETCD (192.168.98.18 )


 yum  install etcd 
vim /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
User=etcd    (普通用户启动)
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\""
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

创建普通用户

  1. [root@etcd system]# groupadd -g 990 etcd 
    [root@etcd system]# useradd -s /sbin/nologin -M -c "etcd user" -u 991 etcd -g etcd
  1. systemctl start etcd 
    systemctl enable etcd

二、部署flanneld 

yum  install flanneld -y


配置文件:

root@localhost kubernetes]# cat /etc/sysconfig/flanneld 
# Flanneld configuration options  
# etcd url location.  Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://192.168.98.18:2379"
# etcd config key.  This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/atomic.io/network"
# Any additional options that you want to pass
#FLANNEL_OPTIONS=""

添加网路

systemctl enable etcd.service
systemctl start etcd.service
etcdctl mk //atomic.io/network/config '{"Network":"172.17.0.0/16"}'   创建
etcdctl rm //atomic.io/network/config '{"Network":"172.17.0.0/16"}'   删除



三、 部署kubernetes (192.168.98.18 )

Master节点上只需要运行这几个服务:apiserver、controller-manager、scheduler


tar xvf kubernetes-server-linux-amd64.tar.gz   (注意这个是server,master上部署)
mv   kubernetes   /usr/local

添加环境变量

##kubernetes
export  PATH=$PATH:/usr/local/kubernetes/server/bin/
source   /etc/profile


开始制作启动文件

kube-apiserver.service

[root@localhost ~]# cat /usr/lib/systemd/system/kube-apiserver.service 
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=etcd.service
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/apiserver
User=kube
#(注意红色部分,需要手动修改为安装目录)
ExecStart=/usr/local/kubernetes/server/bin/kube-apiserver \ 
            $KUBE_LOGTOSTDERR \
            $KUBE_LOG_LEVEL \
            $KUBE_ETCD_SERVERS \
            $KUBE_API_ADDRESS \
            $KUBE_API_PORT \
            $KUBELET_PORT \
            $KUBE_ALLOW_PRIV \
            $KUBE_SERVICE_ADDRESSES \
            $KUBE_ADMISSION_CONTROL \
            $KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target



启动文件:kube-controller-manager.service


[root@localhost ~]# cat /usr/lib/systemd/system/kube-controller-manager.service 
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/controller-manager
User=kube
#(注意红色部分,需要手动修改为安装目录)
ExecStart=/usr/local/kubernetes/server/bin/kube-controller-manager \
            $KUBE_LOGTOSTDERR \
            $KUBE_LOG_LEVEL \
            $KUBE_MASTER \
            $KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target


启动文件:kube-scheduler.service



[root@localhost ~]# cat /usr/lib/systemd/system/kube-scheduler.service 
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/scheduler
User=kube
ExecStart=/usr/local/kubernetes/server/bin/kube-scheduler \
            $KUBE_LOGTOSTDERR \
            $KUBE_LOG_LEVEL \
            $KUBE_MASTER \
            $KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target


创建普通用户

  1. groupadd -g 992 kube 
    useradd -s /sbin/nologin -M -c "kube user" -u 996 kube -g kube


启动服务

for 
service in kube-apiserver kube-controller-manager kube-scheduler;
do systemctl restart $service && systemctl enable $service ;
done

记得检查服务启动状态:
systemctl status    kube-apiserver
systemctl status    kube-controller-manager
systemctl status    kube-scheduler

 ########################master 部署完毕

Node节点安装(192.168.98.19/20)

node 节点安装以下服务:

docker  flanneld  kubernetes-node ( kube-proxy, kubelet)

一、安装 docker-ce

参考官网:https://docs.docker.com/install/linux/docker-ce/centos/#install-docker-ce-1

 yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-selinux \
                  docker-engine-selinux \
                  docker-engine
yum install -y yum-utils
yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo
yum-config-manager --enable docker-ce-edge
yum list docker-ce --showduplicates | sort -r
yum install docker-ce-<VERSION STRING>
systemctl start docker
systemctl enable  docker

二、安装 flanneld

  yum  install flanneld   -y

配置文件:

[root@localhost ~]# cat /etc/sysconfig/flanneld 

# Flanneld configuration options  
# etcd url location.  Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://192.168.98.18:2379"
# etcd config key.  This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/atomic.io/network"
# Any additional options that you want to pass
#FLANNEL_OPTIONS=""

启动服务:

systemctl enable etcd.service
systemctl start etcd.service
etcdctl mk //atomic.io/network/config '{"Network":"172.17.0.0/16"}'   创建
etcdctl rm //atomic.io/network/config '{"Network":"172.17.0.0/16"}'   删除


三、安装kubernetes  node

 添加用户和组:

groupadd -g 992 kube useradd -s /sbin/nologin -M -c "kube user" -u 996 kube -g kube

添加启动文件:kubelet.service

cat /usr/lib/systemd/system/kubelet.service 
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/local/kubernetes/node/bin/kubelet \
            $KUBE_LOGTOSTDERR \
            $KUBE_LOG_LEVEL \
            $KUBELET_ADDRESS \
            $KUBELET_PORT \
            $KUBELET_HOSTNAME \
            $KUBE_ALLOW_PRIV \
            $KUBELET_POD_INFRA_CONTAINER \
            $KUBELET_ARGS
Restart=on-failure
[Install]
WantedBy=multi-user.target

添加启动文件:kubelet.service


cat /usr/lib/systemd/system/kube-proxy.service 
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/proxy
ExecStart=/usr/local/kubernetes/node/bin/kube-proxy \
            $KUBE_LOGTOSTDERR \
            $KUBE_LOG_LEVEL \
            $KUBE_MASTER \
            $KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target


添加配置文件到/etc/kubernetes目录下:

config

[root@localhost kubernetes]# cat config 
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.98.18:8080"

 

proxy 

[root@k8s-node2 kubernetes]# egrep -v "^$|^#" proxy 
[root@localhost kubernetes]# cat proxy 
KUBE_PROXY_ARGS="--master=http://192.168.98.18:8080  --logtostderr=true --log-dir=/var/log/kubernetes --v=2"

 

kubelet

[root@localhost kubernetes]# cat kubelet 
KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_PORT="--port=10250"
KUBELET_HOSTNAME="--hostname-override=192.168.98.20" 
KUBELET_API_SERVER="--api-servers=http://192.168.98.18:8080"     
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=" --enable-server=true  --logtostderr=true  --log-dir=/var/log/kubernetes --v=2 --fail-swap-on=false --kubeconfig=/var/lib/kubelet/kubeconfig"

这里需要注意的,如果机器开启了swap分区的话,kubernetes会无法启动,需要关闭。

关闭swap分区: swapoff -a  

 

然后还要添加一个配置文件,因为1.9.0在kubelet里不再使用KUBELET_API_SERVER来跟API通信,而是通过别一个yaml的配置来实现。

 

[root@localhost kubernetes]# cat /var/lib/kubelet/kubeconfig 
apiVersion: v1
kind: Config
users:
- name: kubelet
clusters:
- name: kubernetes
  cluster:
    server: http://192.168.98.18:8080
contexts:
- context:
    cluster: kubernetes
    user: kubelet
  name: service-account-context
current-context: service-account-context

添加后注意授权,不然会报没权限:

chown -R kube.kube /var/lib/kubelet/

启动服务:

for service in kube-proxy kubelet docker;do systemctl start $service && systemctl enable $service;done

检查状态:

systemctl status    kube-proxy
systemctl status    kubelet

完毕,开始测试集群是否可用

1,新建nginx.yaml

[root@localhost Dockerfile]# cat nginx-pod.yaml 
apiVersion: v1
kind: Pod
metadata:
 name: nginx-pod
 labels:
  name: nginx-pod
spec:
 containers:
 - name: nginx
   image: nginx
   ports:
   - containerPort: 80

2,验证

[root@localhost Dockerfile]#  kubectl create -f nginx-pod.yaml 
pod/nginx-pod created


[root@localhost Dockerfile]# kubectl get pods -o wide
NAME        READY     STATUS    RESTARTS   AGE       IP           NODE
nginx-pod   1/1       Running   0          13s       172.17.0.2   192.168.98.20

在node 192.168.98.20检查,是否有nginx容器运行

image.png

说明集群环境正常




猜你喜欢

转载自blog.51cto.com/douya/2160375