Ali cloud using the NAS to achieve Kubernetes persistent dynamic storage

A dynamic memory supplied Introduction

Core Dynamic Provisioning working mechanism of API objects that StorageClass
StorageClass statement memory card, used to automatically create PV
Kubernetes support dynamic supply of memory card: https://kubernetes.io/docs/concepts/storage/storage-classes/
 
Flow diagram:
The principle:
The memory controller Volume Controller, is designed to handle a persistent storage controller, which is responsible for a sub-control cycle of the PV and PVC PersistentVolumeController binding. PersistentVolumeController will watch PVC target of kube-apiserver. If you find a PVC object is created, you will see all the available PV, if there is binding, if not, create PV will be used to bind StorageClass configuration and PVC description
characteristic:
Dynamic volume supply is kubernetes unique features, this feature allows to create on-demand storage built. Prior to this, the cluster administrator must create a storage volume by the cloud storage provider or providers outside the cluster, and then create an object after a successful PersistentVolume to be able to use in kubernetes in. Dynamic volume supply allows cluster administrators do not have to pre-create a storage volume, but as the user needs to create.

Second, the deployment steps

1. Create provisioner NFS services
# Vim nfs-client-commission-deploy.yaml
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
  name: nfs-client-provisioner
spec:
  replicas: 1
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: quay.io/external_storage/nfs-client-provisioner:latest
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs
            - name: NFS_SERVER
              value: *-*.cn-beijing.nas.aliyuncs.com
            - name: NFS_PATH
              value: /pods-volumes
      volumes:
        - name: nfs-client-root
          nfs:
            server:  *-*-beijing.nas.aliyuncs.com
            path: /pods-volumes

# kubectl apply -f  nfs-client-provisioner-deploy.yaml

 

2. Create SA and RBAC authorization

# Vim nfs-client-rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io

# kubectl apply -f nfs-client-rbac.yaml

 

3, create a storage class

# Vim-nfs-sotrage class.yaml

apiVersion: storage.k8s.io/ V1 
#allowVolumeE Xpansion: to true turn allow the expansion function, but does not support the type of nfs 
kind: StorageClass 
Metadata: 
  name: yiruike -nfs- Storage 
mountOptions:
 - = Vers . 4 
- MinorVersion = 0 
- noresvport 
Provisioner: fuseim.pri / IFS 
the Parameters: 
  archiveOnDelete: " false "
# kubectl apply -f nfs-storage-class.yaml

Provided pointsmart-nfs-storage sc default storage class backend: 

# kubectl patch storageclass yiruike-nfs-storage -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

# kubecctl get sc
[root@master-92 pv-pvc]# kubectl get sc
NAME                           PROVISIONER      AGE
yiruike-nfs-storage(default)   fuseim.pri/ifs   48s

Third, the deployment of verification results

1, create a test file PVC

# vim test-claim.yaml

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim
  annotations:
    volume.beta.kubernetes.io/storage-class: "yiruike-nfs-storage"
spec:
  accessModes:
    - ReadWriteMany
  #persistentVolumeReclaimPolicy: Retain
  resources:
    requests:
      storage: 2Gi

# kubectl apply -f test-claim.yaml

# kubectl get pv,pvc

NAME                     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                STORAGECLASS          REASON   AGE
persistentvolume/pvc-*   2Gi        RWX            Delete           Bound    default/test-claim   yiruike-nfs-storage            1s

NAME                               STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS          AGE
persistentvolumeclaim/test-claim   Bound    pvc-2fc935df-62f2-11ea-9e5a-00163e0a8e3e   2Gi        RWX            yiruike-nfs-storage   5s

 

2. Create a test POD

Start a pod touch a test SUCCESS file in PV test-claim's

# Vim test-pod.yaml

kind: Pod
apiVersion: v1
metadata:
  name: test-pod
spec:
  containers:
  - name: test-pod
    image: busybox:1.24
    command:
      - "/bin/sh"
    args:
      - "-c"
      - "touch /mnt/SUCCESS && exit 0 || exit 1"
    volumeMounts:
      - name: nfs-pvc
        mountPath: "/mnt"
  restartPolicy: "Never"
  volumes:
    - name: nfs-pvc
      persistentVolumeClaim:
        claimName: test-claim

# kubectl apply -f test-pod.yaml

# Df -Th | grep aliyun

*-*.cn-beijing.nas.aliyuncs.com:/pods-volumes nfs4  10P  0  10P  0%  /data/k8s/k8s/kubelet/pods/77a4ad8b-62e1-11ea-89e3-00163e301bb2/volumes/kubernetes.io~nfs/nfs-client-root

# Ls /data/k8s/k8s/kubelet/pods/77a4ad8b-62e1-11ea-89e3-00163e301bb2/volumes/kubernetes.io~nfs/nfs-client-root

default-test-claim-pvc-0b1ce53d-62f4-11ea-9e5a-00163e0a8e3e

# Ls /data/k8s/k8s/kubelet/pods/77a4ad8b-62e1-11ea-89e3-00163e301bb2/volumes/kubernetes.io~nfs/nfs-client-root/ default-test-claim-pvc-0b1ce53d-62f4-11ea -9e5a-00163e0a8e3e

SUCCESS

Thus, normal deployment, and may be dynamically allocated shared volumes NFS

 

3, authentication data persistence

Now we can test-pod This pod delete, test data volume inside the file will not go away. 

# Kubectl delete pod / test-pod

After viewing that can later delete this pod, data is not lost, so that we will achieve the dynamic data persistence 

 

 

Guess you like

Origin www.cnblogs.com/wjoyxt/p/12459969.html