Docker Kubernetes存储--Volumes配置管理--持久卷(动态静态分配,StatefulSet)

1. 简介

官网

  • PersistentVolume(持久卷,简称PV)是集群内,由管理员提供的网络存储的一部分。就像集群中的节点一样,PV也是集群中的一种资源。它也像Volume一样,是一种volume插件,但是它的生命周期却是和使用它的Pod相互独立的。PV这个API对象,捕获了诸如NFS、ISCSI、或其他云存储系统的实现细节。

  • PersistentVolumeClaim(持久卷声明,简称PVC)是用户的一种存储请求。它和Pod类似,Pod消耗Node资源,而PVC消耗PV资源。Pod能够请求特定的资源(如CPU和内存)。PVC能够请求指定的大小和访问的模式(可以被映射为一次读写或者多次只读)。

  • 有两种PV提供的方式:静态和动态。
    静态PV:集群管理员创建多个PV,它们携带着真实存储的详细信息,这些存储对于集群用户是可用的。它们存在于Kubernetes API中,并可用于存储使用。
    动态PV:当管理员创建的静态PV都不匹配用户的PVC时,集群可能会尝试专门地供给volume给PVC。这种供给基于StorageClass。

  • PVC与PV的绑定是一对一的映射。没找到匹配的PV,那么PVC会无限期得处于unbound未绑定状态。

2. NFS PV示例(静态分配)

2.1 清理实验环境

[root@server2 volumes]# kubectl delete -f nfs.yaml 
pod "nfs-pd" deleted
[root@server2 volumes]# kubectl get pod
No resources found in default namespace.
[root@server2 volumes]# kubectl get pv
No resources found
[root@server2 volumes]# kubectl get pvc
No resources found in default namespace.

在这里插入图片描述

2.2 创建所需资源

## 1. 安装配置NFS服务:(前面已经做过了)
# yum install -y nfs-utils
# mkdir -m 777 /nfsdata
# vim /etc/exports
#      /nfsdata	*(rw,sync,no_root_squash)
# systemctl enable --now rpcbind
# systemctl enbale --now nfs

## 2. server1和每个节点的环境
[root@server1 nfsdata]# mkdir pv1 pv2 pv3   ##创建相应的目录
[root@server1 nfsdata]# ls
index.html  pv1  pv2  pv3
[root@server1 pv1]# echo www.westos.org > index.html   ##分别书写测试文件
[root@server1 pv2]# echo www.redhat.org > index.html
[root@server1 pv3]# echo www.baidu.com > index.html

[root@server3 ~]# yum install nfs-utils -y   ##都需要安装nfs服务
[root@server4 ~]# yum install nfs-utils -y 

在这里插入图片描述

2.3 创建pv

[root@server2 volumes]# vim pv1.yaml 
[root@server2 volumes]# cat pv1.yaml    ##pv文件
apiVersion: v1
kind: PersistentVolume       ##pv模型
metadata:
  name: pv1
spec:
  capacity:
    storage: 5Gi           ##大小限制,并不一定会都用完
  volumeMode: Filesystem   ##卷模式是文件系统
  accessModes:
    - ReadWriteOnce       ## 单点读写
  persistentVolumeReclaimPolicy: Recycle    ##回收
  storageClassName: nfs
  nfs:
    path: /nfsdata/pv1
    server: 192.168.0.1

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv2
spec:
  capacity:
    storage: 10Gi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Recycle
  storageClassName: nfs
  nfs:
    path: /nfsdata/pv2
    server: 192.168.0.1
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv3
spec:
  capacity:
    storage: 20Gi
  volumeMode: Filesystem
  accessModes:
    - ReadOnlyMany
  persistentVolumeReclaimPolicy: Recycle
  storageClassName: nfs
  nfs:
    path: /nfsdata/pv3
    server: 192.168.0.1
 
[root@server2 volumes]# kubectl apply -f pv1.yaml  ##应用
persistentvolume/pv1 unchanged
persistentvolume/pv2 created
persistentvolume/pv3 created
[root@server2 volumes]# kubectl get pv    ##查看pv
NAME   CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
pv1    5Gi        RWO            Recycle          Available           nfs                     2m53s
pv2    10Gi       RWX            Recycle          Available           nfs                     6s
pv3    20Gi       ROX            Recycle          Available           nfs                     6s

在这里插入图片描述

2.4 创建pvc,pod

[root@server2 volumes]# vim pvc.yaml     ##创建pvc和pod
apiVersion: v1
kind: PersistentVolumeClaim     ##pvc模式
metadata:
  name: pvc1
spec:(下面的内容可以理解为匹配规则,如果匹配不到pvc就会一直等待合适的pv出现,处于pending状态)
  storageClassName: nfs   ##类名nfs
  accessModes:
    - ReadWriteOnce       ##匹配单点读写
  resources:
    requests:
      storage: 5Gi       ##匹配的pv大小必须在5G以内
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc2
spec:
  storageClassName: nfs
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 10Gi

---
apiVersion: v1
kind: Pod
metadata:
  name: test-pd
spec:
  containers:
  - image: myapp:v1
    name: nginx
    volumeMounts:
    - mountPath: /usr/share/nginx/html
      name: nfs-pv
  volumes:
  - name: nfs-pv
    persistentVolumeClaim:
      claimName: pvc1

---
apiVersion: v1
kind: Pod
metadata:
  name: test-pd-2
spec:
  containers:
  - image: myapp:v1
    name: nginx
    volumeMounts:
    - mountPath: /usr/share/nginx/html
      name: nfs-pv-2
  volumes:
  - name: nfs-pv-2
    persistentVolumeClaim:    ##指定pvc
      claimName: pvc2
      

[root@server2 volumes]# kubectl  apply -f pvc.yaml
persistentvolumeclaim/pvc1 created
persistentvolumeclaim/pvc2 created
pod/test-pd created
pod/test-pd-2 created
[root@server2 volumes]# kubectl get pvc
NAME   STATUS   VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
pvc1   Bound    pv1      5Gi        RWO            nfs            10s
pvc2   Bound    pv2      10Gi       RWX            nfs            10s
[root@server2 volumes]# kubectl get pv  ##pv与pvc进行匹配并绑定
NAME   CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM          STORAGECLASS   REASON   AGE
pv1    5Gi        RWO            Recycle          Bound       default/pvc1   nfs                     82s
pv2    10Gi       RWX            Recycle          Bound       default/pvc2   nfs                     82s
pv3    20Gi       ROX            Recycle          Available                  nfs                     82s
[root@server2 volumes]# kubectl get pod
NAME        READY   STATUS    RESTARTS   AGE
test-pd     1/1     Running   0          30s
test-pd-2   1/1     Running   0          29s

在这里插入图片描述

2.5 测试

[root@server2 volumes]# kubectl get pod -o wide
NAME        READY   STATUS    RESTARTS   AGE     IP               NODE      NOMINATED NODE   READINESS GATES
test-pd     1/1     Running   0          4m35s   10.244.22.20     server4   <none>           <none>
test-pd-2   1/1     Running   0          4m34s   10.244.141.200   server3   <none>           <none>
[root@server2 volumes]# curl 10.244.22.20  ##访问ip,观察是否是自己书写的对应文件
www.westos.org
[root@server2 volumes]# curl 10.244.141.200
www.redhat.org

[root@server3 ~]# cd /mnt
[root@server3 mnt]# ls
10-flannel.conflist
[root@server4 ~]# cd /mnt
[root@server4 mnt]# ls
10-flannel.conflist

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

2.6 删除

[root@server2 volumes]# kubectl delete pod test-pd-2 --force
[root@server2 volumes]# kubectl get pod
[root@server2 volumes]# kubectl delete pvc pvc2
persistentvolumeclaim "pvc2" deleted
[root@server2 volumes]# kubectl get pvc
[root@server2 volumes]# kubectl get pv  ##回收再利用

[root@server1 nfsdata]# cd pv2/
[root@server1 pv2]# ls
index.html
[root@server1 pv2]# ll   ###删除pvc2后文件里内容消失
total 0

在这里插入图片描述
在这里插入图片描述

3. 动态分配

3.1 简介

nfs-client-provisioner源码地址

  • StorageClass提供了一种描述存储类(class)的方法,不同的class可能会映射到不同的服务质量等级和备份策略或其他策略等。

  • 每个 StorageClass 都包含 provisioner、parameters 和 reclaimPolicy 字段, 这些字段会在StorageClass需要动态分配 PersistentVolume 时会使用到。

  • StorageClass的属性
    Provisioner(存储分配器):用来决定使用哪个卷插件分配 PV,该字段必须指定。可以指定内部分配器,也可以指定外部分配器。外部分配器的代码地址为: kubernetes-incubator/external-storage,其中包括NFS和Ceph等。
    Reclaim Policy(回收策略):通过reclaimPolicy字段指定创建的Persistent Volume的回收策略,回收策略包括:Delete 或者 Retain,没有指定默认为Delete。
    更多属性查看:https://kubernetes.io/zh/docs/concepts/storage/storage-classes/

  • NFS Client Provisioner是一个automatic provisioner,使用NFS作为存储,自动创建PV和对应的PVC,本身不提供NFS存储,需要外部先有一套NFS存储服务。
    PV以 n a m e s p a c e − {namespace}- namespace{pvcName}- p v N a m e 的 命 名 格 式 提 供 ( 在 N F S 服 务 器 上 ) P V 回 收 的 时 候 以 a r c h i e v e d − {pvName}的命名格式提供(在NFS服务器上) PV回收的时候以 archieved- pvNameNFSPVarchieved{namespace}- p v c N a m e − {pvcName}- pvcName{pvName} 的命名格式(在NFS服务器上)

3.2 清理环境

## 1.清理环境
[root@server2 volumes]# kubectl delete -f pvc.yaml   ##清理环境
[root@server2 volumes]# kubectl delete -f pv1.yaml
[root@server1 ~]# cd /nfsdata/     ##删除nfs端的数据
[root@server1 nfsdata]# ls
pv1  pv2  pv3
[root@server1 nfsdata]# rm -fr *

在这里插入图片描述
在这里插入图片描述

3.3 下载镜像,并上传

[root@server1 nfsdata]# docker search k8s-staging-sig-storage
NAME                                      DESCRIPTION                                     STARS     OFFICIAL   AUTOMATED
yuufnn/nfs-external-provisioner           gcr.io/k8s-staging-sig-storage/nfs-subdir-ex…   0                    
heegor/nfs-subdir-external-provisioner    Image backup for gcr.io/k8s-staging-sig-stor…   0                    
zelaxyz/nfs-subdir-external-provisioner   #Dockerfile FROM gcr.io/k8s-staging-sig-stor…   0                    
yuufnn/nfs-subdir-external-provisioner    gcr.io/k8s-staging-sig-storage/nfs-subdir-ex…   0                    
[root@server1 nfsdata]# docker pull heegor/nfs-subdir-external-provisioner:v4.0.0
[root@server1 nfsdata]# docker tag heegor/nfs-subdir-external-provisioner:v4.0.0 reg.westos.org/library/nfs-subdir-external-provisioner:v4.0.0
[root@server1 nfsdata]# docker push reg.westos.org/library/nfs-subdir-external-provisioner:v4.0.0

在这里插入图片描述

3.4 配置

[root@server2 volumes]# mkdir nfs-client
[root@server2 volumes]# cd nfs-client/
[root@server2 nfs-client]# pwd
/root/volumes/nfs-client
[root@server2 nfs-client]# vim nfs-client-provisioner.yaml
[root@server2 nfs-client]# cat nfs-client-provisioner.yaml     ##动态分配源码
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-client-provisioner           ##新建一个namespace
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs-client-provisioner
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-client-provisioner
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs-client-provisioner
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-client-provisioner
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: nfs-subdir-external-provisioner:v4.0.0
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: k8s-sigs.io/nfs-subdir-external-provisioner
            - name: NFS_SERVER
              value: 192.168.0.1
            - name: NFS_PATH
              value: /nfsdata
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.0.1
            path: /nfsdata
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
  archiveOnDelete: "true"     ##是否在回收之后自动备份,生成备份文件夹


[root@server2 nfs-client]# vim pvc.yaml   ##测试文件,pvc和pod 
[root@server2 nfs-client]# cat pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim
spec:
  storageClassName: managed-nfs-storage
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 2Gi
---
kind: Pod
apiVersion: v1
metadata:
  name: test-pod
spec:
  containers:
  - name: test-pod
    image: myapp:v1
    volumeMounts:
      - name: nfs-pvc
        mountPath: "/usr/share/nginx/html"
  volumes:
    - name: nfs-pvc
      persistentVolumeClaim:
        claimName: test-claim

[root@server2 nfs-client]# kubectl create namespace nfs-client-provisioner  ##创建相应的namespace,方便管理
[root@server2 nfs-client]# kubectl apply -f nfs-client-provisioner.yaml   ##应用动态分配u
[root@server2 nfs-client]# kubectl get pod -n nfs-client-provisioner   ##查看生成的分配器pod
[root@server2 nfs-client]# kubectl get sc     
[root@server2 nfs-client]# kubectl get ns
[root@server2 nfs-client]# kubectl get pod -n nfs-client-provisioner
  
[root@server2 nfs-client]# kubectl apply -f pvc.yaml     ##应用测试文件
[root@server2 nfs-client]# kubectl get pv      ##
[root@server2 nfs-client]# kubectl get pvc     ##

在这里插入图片描述
!在这里插入图片描述
在这里插入图片描述

3.5 测试

[root@server1 nfsdata]# ls   ##按照命名规则生成数据卷
default-test-claim-pvc-bc952d4e-47a5-4ac4-9d95-5cd2e6132ebf
[root@server1 nfsdata]# cd default-test-claim-pvc-bc952d4e-47a5-4ac4-9d95-5cd2e6132ebf/
[root@server1 default-test-claim-pvc-bc952d4e-47a5-4ac4-9d95-5cd2e6132ebf]# echo www.westos.org > index.html
[root@server1 default-test-claim-pvc-bc952d4e-47a5-4ac4-9d95-5cd2e6132ebf]# 

[root@server2 nfs-client]# kubectl get pod -o wide 
NAME       READY   STATUS    RESTARTS   AGE    IP             NODE      NOMINATED NODE   READINESS GATES
test-pod   1/1     Running   0          5m5s   10.244.22.10   server4   <none>           <none>
[root@server2 nfs-client]# curl 10.244.22.10
www.westos.org

在这里插入图片描述
在这里插入图片描述

4. 默认的 StorageClass

默认的 StorageClass 将被用于动态的为没有特定 storage class 需求的 PersistentVolumeClaims 配置存储:(只能有一个默认StorageClass)
如果没有默认StorageClass,PVC 也没有指定storageClassName 的值,那么意味着它只能够跟 storageClassName 也是“”的 PV 进行绑定。

4.1 没有StorageClass的情况

[root@server2 nfs-client]# kubectl get pod
[root@server2 nfs-client]# kubectl get pvc

[root@server2 nfs-client]# vim demo.yaml   ##测试没有StorageClass
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim-2
spec:
  storageClassName: managed-nfs-storage
  accessModes:
    - ReadOnlyMany
  resources:
    requests:
      storage: 5Gi

[root@server2 nfs-client]# kubectl apply -f demo.yaml 
[root@server2 nfs-client]# kubectl get pvc    ##没有指定一直处于pending状态

在这里插入图片描述

4.2 设置默认的StorageClass

[root@server2 nfs-client]# kubectl patch storageclass managed-nfs-storage -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'   ##指定sc
[root@server2 nfs-client]# kubectl get sc   ##查看是否设置成功

##查看效果
[root@server2 nfs-client]# kubectl delete -f demo.yaml 
[root@server2 nfs-client]# kubectl apply -f demo.yaml 
[root@server2 nfs-client]# kubectl get pvc

在这里插入图片描述

5. StatefulSet

5.1 简介

  • StatefulSet将应用状态抽象成了两种情况:
    拓扑状态:应用实例必须按照某种顺序启动。新创建的Pod必须和原来Pod的网络标识一样
    存储状态:应用的多个实例分别绑定了不同存储数据。

  • StatefulSet给所有的Pod进行了编号,编号规则是: ( s t a t e f u l s e t 名 称 ) − (statefulset名称)- (statefulset)(序号),从0开始。

  • StatefulSet还会为每一个Pod分配并创建一个同样编号的PVC。这样,kubernetes就可以通过Persistent Volume机制为这个PVC绑定对应的PV,从而保证每一个Pod都拥有一个独立的Volume。

  • Pod被删除后重建,重建Pod的网络标识也不会改变,Pod的拓扑状态按照Pod的“名字+编号”的方式固定下来,并且为每个Pod提供了一个固定且唯一的访问入口,即Pod对应的DNS记录。

5.2 清理环境

[root@server2 nfs-client]# kubectl delete -f demo.yaml   
[root@server2 nfs-client]# kubectl delete -f pvc.yaml 
[root@server2 nfs-client]# kubectl get pvc
No resources found in default namespace.
[root@server2 nfs-client]# kubectl get pv
No resources found
[root@server2 nfs-client]# kubectl get pod
No resources found in default namespace.

在这里插入图片描述

5.3 配置

[root@server2 volumes]# pwd
/root/volumes
[root@server2 volumes]# mkdir statefulset
[root@server2 volumes]# cd statefulset/
[root@server2 statefulset]# vim service.yaml      ##实验文件
apiVersion: v1          ##StatefulSet如何通过Headless Service维持Pod的拓扑状态
kind: Service
metadata:
 name: nginx-svc
 labels:
  app: nginx
spec:
 ports:
 - port: 80
   name: web
 clusterIP: None     ##无头服务
 selector:
  app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
 name: web
spec:
 serviceName: "nginx-svc"
 replicas: 2               ##副本数,如果只删除pod可以改为0。全部删除需要删除控制器
 selector:
  matchLabels:
   app: nginx
 template:
  metadata:
   labels:
    app: nginx
  spec:
   containers:
   - name: nginx
     image: myapp:v1           ##myapp其实就是nginx
     ports:
     - containerPort: 80
       name: web
     volumeMounts:          #PV和PVC的设计,使得StatefulSet对存储状态的管理成为了可能:
       - name: www
         mountPath: /usr/share/nginx/html
 volumeClaimTemplates:
  - metadata:
     name: www
    spec:
     storageClassName: managed-nfs-storage    ##sc
     accessModes:
     - ReadWriteOnce
     resources:
      requests:
       storage: 1Gi
[root@server2 statefulset]# kubectl apply -f service.yaml 
service/nginx-svc created
statefulset.apps/web created

[root@server2 statefulset]# kubectl get pod
NAME    READY   STATUS    RESTARTS   AGE
web-0   1/1     Running   0          9s
web-1   1/1     Running   0          5s

[root@server2 statefulset]# kubectl get pv
[root@server2 statefulset]# kubectl get pvc
[root@server2 statefulset]# kubectl get svc
[root@server2 statefulset]# kubectl describe svc nginx-svc    ##查看svc详细信息

在这里插入图片描述
在这里插入图片描述

5.4 测试

[root@server1 nfsdata]# pwd
/nfsdata
[root@server1 nfsdata]# ls
archived-pvc-3056d280-5f67-4ddc-ad8c-35d3c2ec982d  default-www-web-0-pvc-76350051-8ae2-424a-b51f-e2d609717ffe
archived-pvc-7aee6373-17be-4544-bb04-3189458eec55  default-www-web-1-pvc-c3388d08-2630-4419-be05-b4a928945919
[root@server1 nfsdata]# echo web-0 > default-www-web-0-pvc-76350051-8ae2-424a-b51f-e2d609717ffe/index.html
[root@server1 nfsdata]# echo web-1 > default-www-web-1-pvc-c3388d08-2630-4419-be05-b4a928945919/index.html

[root@server2 statefulset]# kubectl get pod -o wide
NAME    READY   STATUS    RESTARTS   AGE     IP               NODE      NOMINATED NODE   READINESS GATES
demo    1/1     Running   0          12m     10.244.141.204   server3   <none>           <none>
web-0   1/1     Running   0          2m44s   10.244.22.24     server4   <none>           <none>
web-1   1/1     Running   0          2m40s   10.244.141.205   server3   <none>           <none>
[root@server2 statefulset]# curl 10.244.22.24
web-0
[root@server2 statefulset]# curl 10.244.141.205
web-1

[root@server2 statefulset]# kubectl run demo --image=busyboxplus -it   ##测试
/ # nslookup nginx-svc
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name:      nginx-svc
Address 1: 10.244.22.11 web-0.nginx-svc.default.svc.cluster.local
Address 2: 10.244.141.211 web-1.nginx-svc.default.svc.cluster.local
/ # curl web-0.nginx-svc
web-0
/ # curl web-1.nginx-svc
web-1

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

6 statefullset部署mysql主从集群

官网代码

6.1 清理实验环境

## 清理实验环境
[root@server2 statefulset]# kubectl delete -f service.yaml 
service "nginx-svc" deleted
statefulset.apps "web" deleted
[root@server2 statefulset]# kubectl delete pvc --all
[root@server2 statefulset]# kubectl delete pod demo --force 
[root@server2 statefulset]# kubectl get pv
No resources found
[root@server2 statefulset]# kubectl get pvc
No resources found in default namespace.
[root@server2 statefulset]# kubectl get pod
No resources found in default namespace.
[root@server2 statefulset]# kubectl get all

在这里插入图片描述

6.2 配置

[root@server2 ~]# cd volumes/statefulset/
[root@server2 statefulset]# ls
service.yaml
[root@server2 statefulset]# mkdir mysql
[root@server2 statefulset]# cd mysql/
[root@server2 mysql]# vim configmap.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: mysql
  labels:
    app: mysql
data:
  master.cnf: |
    # Apply this config only on the master.
    [mysqld]
    log-bin    
  slave.cnf: |
    # Apply this config only on slaves.
    [mysqld]
    super-read-only
[root@server2 mysql]# kubectl apply -f configmap.yaml
configmap/mysql created
[root@server2 mysql]# kubectl get cm 
[root@server2 mysql]# kubectl describe cm mysql 
[root@server2 statefulset]# kubectl delete cm cm1-config my-config my-config-2 my-config-3 nginx-config
[root@server2 statefulset]# kubectl get cm 

在这里插入图片描述
在这里插入图片描述

[root@server2 mysql]# vim services.yaml 
apiVersion: v1
kind: Service
metadata:
  name: mysql
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  clusterIP: None
  selector:
    app: mysql
---
apiVersion: v1
kind: Service
metadata:
  name: mysql-read
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  selector:
    app: mysql
[root@server2 mysql]# kubectl apply -f services.yaml 
[root@server2 mysql]# kubectl get pod
No resources found in default namespace.
[root@server2 mysql]# kubectl get svc

在这里插入图片描述

##镜像拉取
[root@server1 nfsdata]# docker pull mysql:5.7
[root@server1 nfsdata]# docker tag mysql:5.7 reg.westos.org/library/mysql:5.7
[root@server1 nfsdata]# docker push reg.westos.org/library/mysql:5.7 

[root@server1 nfsdata]# docker pull yizhiyong/xtrabackup
[root@server1 nfsdata]# docker tag yizhiyong/xtrabackup:latest reg.westos.org/library/xtrabackup:1.0
[root@server1 nfsdata]# docker push reg.westos.org/library/xtrabackup:1.0

[root@server2 mysql]# \vi statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  serviceName: mysql
  replicas: 3
  template:
    metadata:
      labels:
        app: mysql
    spec:
      initContainers:
      - name: init-mysql
        image: mysql:5.7
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Generate mysql server-id from pod ordinal index.
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${
    
    BASH_REMATCH[1]}
          echo [mysqld] > /mnt/conf.d/server-id.cnf
          # Add an offset to avoid reserved server-id=0 value.
          echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
          # Copy appropriate conf.d files from config-map to emptyDir.
          if [[ $ordinal -eq 0 ]]; then
            cp /mnt/config-map/master.cnf /mnt/conf.d/
          else
            cp /mnt/config-map/slave.cnf /mnt/conf.d/
          fi          
        volumeMounts:
        - name: conf
          mountPath: /mnt/conf.d
        - name: config-map
          mountPath: /mnt/config-map
      - name: clone-mysql
        image: xtrabackup:1.0
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Skip the clone if data already exists.
          [[ -d /var/lib/mysql/mysql ]] && exit 0
          # Skip the clone on master (ordinal index 0).
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${
    
    BASH_REMATCH[1]}
          [[ $ordinal -eq 0 ]] && exit 0
          # Clone data from previous peer.
          ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
          # Prepare the backup.
          xtrabackup --prepare --target-dir=/var/lib/mysql          
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
      containers:
      - name: mysql
        image: mysql:5.7
        env:
        - name: MYSQL_ALLOW_EMPTY_PASSWORD
          value: "1"
        ports:
        - name: mysql
          containerPort: 3306
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 500m
            memory: 512Mi
        livenessProbe:
          exec:
            command: ["mysqladmin", "ping"]
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
        readinessProbe:
          exec:
            # Check we can execute queries over TCP (skip-networking is off).
            command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
          initialDelaySeconds: 5
          periodSeconds: 2
          timeoutSeconds: 1      
      - name: xtrabackup
        image: xtrabackup:1.0
        ports:
        - name: xtrabackup
          containerPort: 3307
        command:
        - bash
        - "-c"
        - |
          set -ex
          cd /var/lib/mysql

          # Determine binlog position of cloned data, if any.
          if [[ -f xtrabackup_slave_info && "x$(<xtrabackup_slave_info)" != "x" ]]; then
            # XtraBackup already generated a partial "CHANGE MASTER TO" query
            # because we're cloning from an existing slave. (Need to remove the tailing semicolon!)
            cat xtrabackup_slave_info | sed -E 's/;$//g' > change_master_to.sql.in
            # Ignore xtrabackup_binlog_info in this case (it's useless).
            rm -f xtrabackup_slave_info xtrabackup_binlog_info
          elif [[ -f xtrabackup_binlog_info ]]; then
            # We're cloning directly from master. Parse binlog position.
            [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
            rm -f xtrabackup_binlog_info xtrabackup_slave_info
            echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
                  MASTER_LOG_POS=${
    
    BASH_REMATCH[2]}" > change_master_to.sql.in
          fi

          # Check if we need to complete a clone by starting replication.
          if [[ -f change_master_to.sql.in ]]; then
            echo "Waiting for mysqld to be ready (accepting connections)"
            until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done

            echo "Initializing replication from clone position"
            mysql -h 127.0.0.1 \
                  -e "$(<change_master_to.sql.in), \
                          MASTER_HOST='mysql-0.mysql', \
                          MASTER_USER='root', \
                          MASTER_PASSWORD='', \
                          MASTER_CONNECT_RETRY=10; \
                        START SLAVE;" || exit 1
            # In case of container restart, attempt this at-most-once.
            mv change_master_to.sql.in change_master_to.sql.orig
          fi

          # Start a server to send backups when requested by peers.
          exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
            "xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"          
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
      volumes:
      - name: conf
        emptyDir: {
    
    }
      - name: config-map
        configMap:
          name: mysql
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteOnce"]
      resources:
        requests:
          storage: 5Gi

[root@server2 mysql]# kubectl apply -f statefulset.yaml  ##
[root@server2 mysql]# kubectl get pod  
[root@server2 mysql]# kubectl logs mysql-0
[root@server2 mysql]# kubectl logs mysql-0 -c init-mysql
[root@server2 mysql]# kubectl logs mysql-1 -c init-mysql
[root@server2 mysql]# kubectl describe pod mysql-0 

[root@server2 mysql]# kubectl logs mysql-1 -c clone-mysql
[root@server2 mysql]# kubectl logs mysql-2 -c clone-mysql
[root@server2 mysql]# kubectl describe svc mysql-read

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

6.3 测试

[root@server2 mysql]# kubectl get pod -o wide
[root@server2 mysql]# yum install mariadb -y   ##需要安装数据库
[root@server2 mysql]# mysql -h 10.244.141.209
[root@server2 mysql]# mysql -h 10.244.22.27
[root@server2 mysql]# mysql -h 10.244.22.28 

[root@server2 mysql]# kubectl get pvc
[root@server2 mysql]# kubectl get pv
[root@server1 harbor]# cd /nfsdata/
[root@server1 nfsdata]# ls
archived-pvc-3056d280-5f67-4ddc-ad8c-35d3c2ec982d  default-data-mysql-0-pvc-ef70fbb9-eb81-4d38-8927-e459ffe3e531
archived-pvc-76350051-8ae2-424a-b51f-e2d609717ffe  default-data-mysql-1-pvc-934bc589-36b6-4a2b-aa78-22713ef2513b
archived-pvc-7aee6373-17be-4544-bb04-3189458eec55  default-data-mysql-2-pvc-dd2d3b00-56ab-4abf-a7f0-50cdfaca0887
archived-pvc-c3388d08-2630-4419-be05-b4a928945919

在这里插入图片描述
westos三个数据库同步成功
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

猜你喜欢

转载自blog.csdn.net/weixin_45777669/article/details/114747587