文章目录
一、Docker Swarm实践
Swarm是原生支持Docker集群管理的工具。
Docker Swarm是一个为IT运维团队提供集群和调度能力的编排工具。
Swarm可以把多个Docker主机组成的系统转换为单一的虚拟Docker主机,使得容器可以组成跨主机的子网网络。
1、创建Swarm集群
初始化集群
[root@server1 ~]# docker swarm init
根据提示在其他docker节点上执行命令
[root@server2 ~]# docker swarm join --token SWMTKN-1-1ym2b7zqmvj1trzvd6rwbx3mdc77wa6fmqrq0gdwtf7hhsyahz-5shc2cr07u7ewi7iph5jwvk4i 192.168.0.1:2377
[root@server3 ~]# docker swarm join --token SWMTKN-1-1ym2b7zqmvj1trzvd6rwbx3mdc77wa6fmqrq0gdwtf7hhsyahz-5shc2cr07u7ewi7iph5jwvk4i 192.168.0.1:2377
查看swarm集群节点
[root@server1 ~]# docker node ls
[root@server2 ~]# scp nginx.tar server3:
[root@server3 ~]# docker load -i nginx.tar
[root@server1 ~]# docker service create --name my_cluster --replicas 2 -p 80:80 nginx
server2 server3 80端口不能被占用 宿主机:容器
[root@server1 harbor]# netstat -antlp | grep :80
tcp6 0 0 :::80 :::* LISTEN 3321/dockerd
[root@server2 ~]# netstat -antlp | grep :80
tcp6 0 0 :::80 :::* LISTEN 24924/dockerd
[root@server3 ~]# netstat -antlp | grep :80
tcp6 0 0 :::80 :::* LISTEN 14773/dockerd
[root@server2 ~]# echo server2 > index.html
[root@server2 ~]# docker ps
[root@server2 ~]# docker cp index.html 6386513fc87f:/usr/share/nginx/html
[root@server3 ~]# echo server3 > index.html
[root@server3 ~]# docker cp index.html ecb82d7971bc:/usr/share/nginx/html
2、部署swarm监控:
(各节点提前导入dockersamples/visualizer镜像)
[root@server1 ~]# docker service scale my_cluster=4
[root@server1 ~]# docker service ps my_cluster
[root@server1 ~]# docker service rm my_cluster
my_cluster
[root@server1 ~]# docker service ls
server1 2 3
[root@server1 ~]# docker pull ikubernetes/myapp:v1
[root@server1 ~]# docker tag ikubernetes/myapp:v1 myapp:v1
[root@server1 ~]# docker rmi ikubernetes/myapp:v1
[root@server2 ~]# docker pull ikubernetes/myapp:v1
[root@server3 ~]# docker pull ikubernetes/myapp:v1
[root@server1 ~]# docker service create --name my_cluster --replicas 2 -p 80:80 myapp:v1
[root@server1 ~]# docker service scale my_cluster=6
[root@server1 ~]# docker pull dockersamples/visualizer
[root@server1 ~]# docker service create \
--name=viz \
--publish=8080:8080/tcp \
--constraint=node.role==manager \
--mount=type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock \
dockersamples/visualizer
[root@server3 ~]# systemctl stop docker
[root@server3 ~]# systemctl start docker
[root@server1 ~]# docker service scale my_cluster=10
3、节点升级降级
[root@server1 ~]# cd harbor/
[root@server1 harbor]# docker-compose down
[root@server1 ~]# docker node promote server2
[root@server1 ~]# docker node demote server1
[root@server2 ~]# docker node ls
[root@server1 ~]# cd harbor/
[root@server1 harbor]# docker-compose down
[root@server1 ~]# docker node promote server2
[root@server1 ~]# docker node demote server1
[root@server2 ~]# docker node ls
4、加入本地私有仓库
[root@server2 ~]# docker swarm init
[root@server4 ~]# docker swarm join --token SWMTKN-1-1ym2b7zqmvj1trzvd6rwbx3mdc77wa6fmqrq0gdwtf7hhsyahz-5shc2cr07u7ewi7iph5jwvk4i 192.168.0.2:2377
[root@server2 ~]# docker node ls
[root@server2 ~]# docker ps
[root@server1 ~]# docker swarm leave
Node left the swarm.
[root@server2 ~]# docker node rm server1
[root@server2 ~]# docker node ls
[root@server1 harbor]# ./install.sh --with-chartmuseum
[root@server2 ~]# cd /etc/docker/
[root@server2 docker]# vim daemon.json
{
"registry-mirrors": ["https://reg.westos.org"]
}
[root@server2 docker]# systemctl reload docker
[root@server2 docker]# scp daemon.json server3:/etc/docker/
[root@server2 docker]# scp daemon.json server4:/etc/docker/
[root@server3 docker-ce]# systemctl reload docker
[root@server4 docker-ce]# systemctl reload docker
[root@server2 docker]# scp -r certs.d/ server3:/etc/docker/
[root@server2 docker]# scp -r certs.d/ server4:/etc/docker/
[root@server3 docker-ce]# vim /etc/hosts
[root@server4 docker-ce]# vim /etc/hosts
192.168.0.1 server1 reg.westos.org
[root@server1 ~]# docker tag myapp:v1 reg.westos.org/library/myapp:v1
[root@server1 ~]# docker push reg.westos.org/library/myapp:v1
[root@server4 ~]# docker pull myapp:v1
5、实现业务滚动更新
[root@server2 docker]# docker service create --name my_web --replicas 3 -p 80:80 myapp:v1
[root@server3 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
myapp <none> d4a5e0eaa84f 2 years ago 15.5MB
[root@server4 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
myapp <none> d4a5e0eaa84f 2 years ago 15.5MB
[root@server2 docker]# docker service scale my_web=10
[root@server1 ~]# docker pull ikubernetes/myapp:v2
[root@server1 ~]# docker tag ikubernetes/myapp:v2 reg.westos.org/library/myapp:v2
[root@server1 ~]# docker push reg.westos.org/library/myapp:v2
[root@server2 docker]# docker service update --image myapp:v2 --update-parallelism 2 --update-delay 5s my_web
my_web
6、脚本自动化部署
部署服务web,定义监控
[root@server1 ~]# docker images
[root@server1 ~]# docker tag dockersamples/visualizer:latest reg.westos.org/library/visualizer:latest
[root@server1 ~]# docker push reg.westos.org/library/visualizer:latest
[root@server2 ~]# mkdir compose
[root@server2 ~]# cd compose/
[root@server2 compose]# ls
[root@server2 compose]# vim docker-compose.yml
[root@server2 compose]# docker service ls
[root@server2 compose]# docker service rm my_web
[root@server2 compose]# docker service rm viz
[root@server2 compose]# docker stack deploy -c docker-compose.yml my_cluster
[root@server2 compose]# docker stack ls
[root@server2 compose]# docker stack ps my_cluster
[root@server2 compose]# docker stack services my_cluster
[root@server2 compose]# vim docker-compose.yml
replicas: 6
[root@server2 compose]# docker stack deploy -c docker-compose.yml my_cluster
version: "3.9"
services:
web:
image: myapp:v1
networks:
- mynet
deploy:
replicas: 2
update_config:
parallelism: 2
delay: 10s
restart_policy:
condition: on-failure
visualizer:
image: visualizer:latest
ports:
- "8080:8080"
stop_grace_period: 1m30s
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
deploy:
placement:
constraints:
- "node.role==manager"
networks:
mynet:
7、可视化管理工具portainer
[root@server1 ~]# mkdir portainer
[root@server1 ~]# mv portainer-* portainer
[root@server1 portainer]# ls
portainer-agent-stack.yml portainer-agent.tar portainer.tar
[root@server1 portainer]# vim portainer-agent-stack.yml
[root@server1 portainer]# docker load -i portainer.tar
[root@server1 portainer]# docker load -i portainer-agent.tar
[root@server1 portainer]# docker tag portainer/portainer:latest reg.westos.org/library/portainer:latest
[root@server1 portainer]# docker push reg.westos.org/library/portainer:latest
[root@server1 portainer]# docker tag portainer/agent:latest reg.westos.org/library/agent:latest
[root@server1 portainer]# docker push reg.westos.org/library/agent:latest
[root@server2 ~]# mv portainer-agent-stack.yml compose/
[root@server2 ~]# cd compose/
[root@server2 compose]# vim portainer-agent-stack.yml
[root@server2 compose]# docker stack rm my_cluster
[root@server2 compose]# docker stack deploy -c portainer-agent-stack.yml portainer
[root@server2 compose]# docker stack ps portainer
创建仓库
二、Kubernetes部署
Kubernetes的好处:
隐藏资源管理和错误处理,用户仅需要关注应用的开发。
服务高可用,高可靠。
可将负载运行在由成千上万的机器联合而成的集群中。
1、systemd
关闭节点的selinux和iptables防火墙
所有节点部署docker引擎
server2 3 4相同部署
[root@server2 ~]# vim /etc/docker/daemon.json
[root@server2 ~]# systemctl reload docker
{
"registry-mirrors": ["https://reg.westos.org"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
[root@server2 ~]# cd /etc/systemd/system/docker.service.d/
[root@server2 docker.service.d]# ls
10-machine.conf
[root@server2 docker.service.d]# rm -f 10-machine.conf
[root@server2 docker.service.d]# ls
[root@server2 docker.service.d]# cd
[root@server2 ~]# systemctl daemon-reload
[root@server2 ~]# systemctl reload docker
[root@server2 ~]# yum remove docker-ce
[root@server2 ~]# yum install -y docker-ce
[root@server2 ~]# systemctl restart docker
[root@server2 ~# docker info
2、禁用swap分区
server2 3 4
[root@server2 ~]# swapoff -a
[root@server2 ~]# vim /etc/fstab
#/dev/mapper/rhel-swap swap swap defaults 0 0
[root@server2 ~]# swapon -s
[root@server2 ~]#
注释掉/etc/fstab 文件中的swap定义
3、安装部署软件kubeadm
server2 3 4
[root@server2 ~]# cd /etc/yum.repos.d/
[root@server2 yum.repos.d]# vim k8s.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
[root@server2 yum.repos.d]# yum install -y kubelet kubeadm kubectl
[root@server2 yum.repos.d]# systemctl enable --now kubelet
[root@server2 yum.repos.d]# systemctl enable docker.service
4、拉取镜像
查看默认配置信息
[root@server2 yum.repos.d]# kubeadm config print init-defaults
修改镜像仓库,列出所需镜像
[root@server2 yum.repos.d]# kubeadm config images list --image-repository registry.aliyuncs.com/google_containers
拉取镜像
[root@server2 yum.repos.d]# kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers
[root@server2 yum.repos.d]# docker images | grep registry.aliyuncs.com
5、初始化集群
[root@server2 yum.repos.d]# kubeadm init --pod-network-cidr=10.244.0.0/16 --image-repository registry.aliyuncs.com/google_containers
#--pod-network-cidr=10.244.0.0/16使用flannel网络组件时必须添加
[root@server2 ~]# mkdir -p $HOME/.kube
[root@server2 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@server2 ~]# kubectl get ns
6、安装flanne网络组件
配置kubectl命令补齐功能
[root@server2 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc
[root@server2 ~]# kubectl get pod --namespace kube-system
安装flanne网络组件
[root@server2 ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
or
[root@server2 ~]# kubectl apply -f kube-flannel.yml
[root@server2 ~]# docker images
[root@server2 ~]# kubectl get node
[root@server2 ~]# docker save quay.io/coreos/flannel:v0.12.0-amd64 registry.aliyuncs.com/google_containers/pause:3.2 registry.aliyuncs.com/google_containers/coredns:1.7.0 registry.aliyuncs.com/google_containers/kube-proxy:v1.20.2 > node.tar
[root@server2 ~]# scp node.tar server3:
[root@server2 ~]# scp node.tar server4:
[root@server3 ~]# docker load -i node.tar
[root@server3 ~]# docker images
[root@server4 ~]# docker load -i node.tar
[root@server4 ~]# docker images
7、各节点join
[root@server3 ~]# kubeadm join 192.168.0.2:6443 --token 0i7uif.7lkge845m8tevvru \
--discovery-token-ca-cert-hash sha256:736721d3d4ffd24daf69051c20be321184fcbc7e6536d3d0c54f6417527482b9
[root@server4 ~]# kubeadm join 192.168.0.2:6443 --token 0i7uif.7lkge845m8tevvru \
--discovery-token-ca-cert-hash sha256:736721d3d4ffd24daf69051c20be321184fcbc7e6536d3d0c54f6417527482b9
[root@server2 ~]# kubectl get node
[root@server2 ~]# kubectl get pod -n kube-system