【云原生 | Kubernetes 系列】--K8s环境rdb,cefs的使用

1. Ceph K8s环境rdb,cefs的使用

1.1 基于rbd结合k8s提供存储卷及动态存储

在K8s中的pod可以访问ceph中的rbd提供的镜像作为存储设备,需要在ceph创建rbd,并且让k8s node节点能够通过ceph的认证.

k8s在使用ceph作为动态存储卷的时候,需要kube-controller-manager组件能够访问ceph,因此需要在包括master,node节点在内的每一个node同步认证文件.

1.1.1 创建初始化rbd

## 创建存储池 k8s-rbd-pool1
# ceph osd pool create k8s-rbd-pool1 32 32
pool 'k8s-rbd-pool1' created
## 存储池启用rbd
# ceph osd pool application enable k8s-rbd-pool1 rbd
enabled application 'rbd' on pool 'k8s-rbd-pool1'
## 初始化rbd
# rbd pool init -p k8s-rbd-pool1

1.1.2 创建image

## 创建镜像
# rbd create k8s-rbd-img1 --size 4G --pool k8s-rbd-pool1 --image-feature layering
## 创建用户
# ceph auth get-or-create client.k8s-rbd mon 'allow r' osd 'allow * pool=k8s-rbd-pool1'
[client.k8s-rbd]
	key = AQBQpCpjmCM5DRAAv1YpUWtGIxTA4rs69RcvSw==
## 验证用户
# ceph auth get client.k8s-rbd
[client.k8s-rbd]
	key = AQBQpCpjmCM5DRAAv1YpUWtGIxTA4rs69RcvSw==
	caps mon = "allow r"
	caps osd = "allow * pool=k8s-rbd-pool1"
exported keyring for client.k8s-rbd
## 保存用户keyring
# ceph auth get client.k8s-rbd -o ceph.client.k8s-rbd.keyring
exported keyring for client.k8s-rbd
## 验证镜像
# rbd --pool k8s-rbd-pool1 --image k8s-rbd-img1 info
rbd image 'k8s-rbd-img1':
	size 4 GiB in 1024 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 987c7795d788
	block_name_prefix: rbd_data.987c7795d788
	format: 2
	features: layering
	op_features: 
	flags: 
	create_timestamp: Wed Sep 21 13:03:07 2022
	access_timestamp: Wed Sep 21 13:03:07 2022
	modify_timestamp: Wed Sep 21 13:03:07 2022

1.1.3 客户端安装ceph-common

在k8s-master和各个node节点上安装ceph-common
k8s环境为Ubuntu1804
配置ubuntu源,ceph源,并安装ceph-common包

cat > /etc/apt/sources.list <<EOF
deb https://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse
deb-src https://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse
deb https://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse
deb-src https://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse
deb https://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse
deb-src https://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse
deb https://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
deb-src https://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
deb-src https://mirrors.aliyun.com/ceph/debian-octopus/ buster main
deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic main
EOF
wget -q -O- 'https://mirrors.aliyun.com/ceph/keys/release.asc' | sudo apt-key add -
sudo apt-add-repository 'deb https://mirrors.aliyun.com/ceph/debian-octopus/ buster main'
sudo apt update
apt install -y ceph-common

1.1.4 将keyring和ceph.conf复制到所有k8s节点

## master节点
# for ip in {101..103};do scp /etc/ceph/ceph.conf /apps/ceph.client.k8s-rbd.keyring 192.168.31.$ip:/etc/ceph/;done
## node节点
# for ip in {111..114};do scp /etc/ceph/ceph.conf /apps/ceph.client.k8s-rbd.keyring 192.168.31.$ip:/etc/ceph/;done
ceph.conf                                                                                                      100%  771     1.3MB/s   00:00    
ceph.client.k8s-rbd.keyring                                                                                    100%  128   256.3KB/s   00:00    
ceph.conf                                                                                                      100%  771     1.2MB/s   00:00    
ceph.client.k8s-rbd.keyring                                                                                    100%  128   263.5KB/s   00:00    
ceph.conf                                                                                                      100%  771   983.9KB/s   00:00    
ceph.client.k8s-rbd.keyring                                                                                    100%  128   217.4KB/s   00:00    
ceph.conf                                                                                                      100%  771   869.9KB/s   00:00    
ceph.client.k8s-rbd.keyring                                                                                    100%  128   102.6KB/s   00:00  

在所有节点确认用户可以正常访问ceph

## 在所有k8s环境节点/etc/hosts加上,用以解析mon服务器
192.168.31.81 ceph-mon01
# ceph --user k8s-rbd -s
  cluster:
    id:     86c42734-37fc-4091-b543-be6ff23e5134
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-mon01,ceph-mon02,ceph-mon03 (age 5h)
    mgr: ceph-mgr01(active, since 2h)
    mds: 2/2 daemons up, 2 standby
    osd: 16 osds: 16 up (since 5h), 16 in (since 5d)
    rgw: 2 daemons active (2 hosts, 1 zones)
 
  data:
    volumes: 1/1 healthy
    pools:   11 pools, 305 pgs
    objects: 1.13k objects, 17 MiB
    usage:   1.1 GiB used, 15 GiB / 16 GiB avail
    pgs:     305 active+clean

1.2 通过keyring挂载rbd

k8s环境可以有2种方式挂载rbd.

  1. 基于keyring
  2. 基于k8s secret

创建基于busybox的镜像

# cat case1-busybox-keyring.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - image: busybox 
    command:
      - sleep
      - "3600"
    imagePullPolicy: Always 
    name: busybox
    #restartPolicy: Always
    volumeMounts:
    - name: rbd-data1
      mountPath: /data
  volumes:
    - name: rbd-data1
      rbd:
        monitors:
        - '192.168.31.81:6789'
        - '192.168.31.82:6789'
        - '192.168.31.83:6789'
        pool: k8s-rbd-pool1
        image: k8s-rbd-img1
        fsType: ext4
        readOnly: false
        user: k8s-rbd
        keyring: /etc/ceph/ceph.client.k8s-rbd.keyring
## 部署busybox
# kubectl apply -f case1-busybox-keyring.yaml
## 此时busybox已经启动
# kubectl get pods -o wide
NAME      READY   STATUS    RESTARTS   AGE   IP                NODE             NOMINATED NODE   READINESS GATES
busybox   1/1     Running   0          25s   172.100.183.175   192.168.31.103   <none>           <none>

部署节点后确认卷的挂载

/ # df -h
Filesystem                Size      Used Available Use% Mounted on
overlay                  38.2G     12.9G     23.5G  35% /
tmpfs                    64.0M         0     64.0M   0% /dev
tmpfs                     1.9G         0      1.9G   0% /sys/fs/cgroup
/dev/rbd0                 3.9G     24.0K      3.8G   0% /data
/dev/mapper/ubuntu--vg-ubuntu--lv
                         38.2G     12.9G     23.5G  35% /dev/termination-log
/dev/mapper/ubuntu--vg-ubuntu--lv
                         38.2G     12.9G     23.5G  35% /etc/resolv.conf
/dev/mapper/ubuntu--vg-ubuntu--lv
                         38.2G     12.9G     23.5G  35% /etc/hostname
/dev/mapper/ubuntu--vg-ubuntu--lv
                         38.2G     12.9G     23.5G  35% /etc/hosts
shm                      64.0M         0     64.0M   0% /dev/shm
tmpfs                     3.1G     12.0K      3.1G   0% /var/run/secrets/kubernetes.io/serviceaccount
tmpfs                     1.9G         0      1.9G   0% /proc/acpi
tmpfs                    64.0M         0     64.0M   0% /proc/kcore
tmpfs                    64.0M         0     64.0M   0% /proc/keys
tmpfs                    64.0M         0     64.0M   0% /proc/timer_list
tmpfs                    64.0M         0     64.0M   0% /proc/sched_debug
tmpfs                     1.9G         0      1.9G   0% /proc/scsi
tmpfs                     1.9G         0      1.9G   0% /sys/firmware
/ # dd if=/dev/zero of=/data/file bs=1M count=50
50+0 records in
50+0 records out
52428800 bytes (50.0MB) copied, 0.035521 seconds, 1.4GB/s
/ # ls /data/file  -lh
-rw-r--r--    1 root     root       50.0M Sep 21 06:15 /data/file

此时在ceph下数据也已经可以看到

root@ceph-mgr01:/apps# ceph df 
--- RAW STORAGE ---
CLASS    SIZE   AVAIL     USED  RAW USED  %RAW USED
hdd    16 GiB  15 GiB  1.3 GiB   1.3 GiB       8.31
TOTAL  16 GiB  15 GiB  1.3 GiB   1.3 GiB       8.31
 
--- POOLS ---
POOL                       ID  PGS   STORED  OBJECTS     USED  %USED  MAX AVAIL
device_health_metrics       1    1    619 B        1  1.8 KiB      0    4.4 GiB
mypool                      2   32   10 MiB       16   31 MiB   0.23    4.4 GiB
.rgw.root                   4   32  1.3 KiB        4   48 KiB      0    4.4 GiB
default.rgw.log             5   32  3.6 KiB      177  408 KiB      0    4.4 GiB
default.rgw.control         6   32      0 B        8      0 B      0    4.4 GiB
default.rgw.meta            7    8  1.8 KiB        9   96 KiB      0    4.4 GiB
cephfs-metadata             8   32   48 KiB       41  290 KiB      0    4.4 GiB
cephfs-data                 9   64    301 B        3   36 KiB      0    4.4 GiB
default.rgw.buckets.index  12    8      0 B       22      0 B      0    4.4 GiB
default.rgw.buckets.data   13   32  2.3 MiB      847   14 MiB   0.11    4.4 GiB
k8s-rbd-pool1              16   32   66 MiB       31  199 MiB   1.45    4.4 GiB
root@ceph-mgr01:/apps# ceph df 
--- RAW STORAGE ---
CLASS    SIZE   AVAIL     USED  RAW USED  %RAW USED
hdd    16 GiB  14 GiB  1.5 GiB   1.5 GiB       9.23
TOTAL  16 GiB  14 GiB  1.5 GiB   1.5 GiB       9.23
 
--- POOLS ---
POOL                       ID  PGS   STORED  OBJECTS     USED  %USED  MAX AVAIL
device_health_metrics       1    1    619 B        1  1.8 KiB      0    4.3 GiB
mypool                      2   32   10 MiB       16   31 MiB   0.23    4.3 GiB
.rgw.root                   4   32  1.3 KiB        4   48 KiB      0    4.3 GiB
default.rgw.log             5   32  3.6 KiB      177  408 KiB      0    4.3 GiB
default.rgw.control         6   32      0 B        8      0 B      0    4.3 GiB
default.rgw.meta            7    8  1.8 KiB        9   96 KiB      0    4.3 GiB
cephfs-metadata             8   32   48 KiB       41  290 KiB      0    4.3 GiB
cephfs-data                 9   64    301 B        3   36 KiB      0    4.3 GiB
default.rgw.buckets.index  12    8      0 B       22      0 B      0    4.3 GiB
default.rgw.buckets.data   13   32  2.3 MiB      847   14 MiB   0.11    4.3 GiB
k8s-rbd-pool1              16   32  116 MiB       44  349 MiB   2.56    4.3 GiB

1.3 通过secret挂载rbd

先获取base64加密后的auth key

# ceph auth print-key client.k8s-rbd| base64
QVFCUXBDcGptQ001RFJBQXYxWXBVV3RHSXhUQTRyczY5UmN2U3c9PQ==
## 可以用base64解密
root@ceph-mgr01:/apps# echo QVFCUXBDcGptQ001RFJBQXYxWXBVV3RHSXhUQTRyczY5UmN2U3c9PQ== |base64 -d
AQBQpCpjmCM5DRAAv1YpUWtGIxTA4rs69RcvSw==

将这个key写入secret.yaml

# cat case3-secret-client-k8s-rbd.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-k8s-rbd
type: "kubernetes.io/rbd"
data:
  key: QVFCUXBDcGptQ001RFJBQXYxWXBVV3RHSXhUQTRyczY5UmN2U3c9PQ==
# kubectl apply -f case3-secret-client-k8s-rbd.yaml
secret/ceph-secret-k8s-rbd created

编写deployment的yaml

扫描二维码关注公众号,回复: 14542780 查看本文章
# cat case4-nginx-secret.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:
        - name: rbd-data1
          mountPath: /usr/share/nginx/html/rbd
      volumes:
        - name: rbd-data1
          rbd:
            monitors:
            - '192.168.31.81:6789'
            - '192.168.31.82:6789'
            - '192.168.31.83:6789'
            pool: k8s-rbd-pool1
            image: k8s-rbd-img1
            fsType: ext4
            readOnly: false
            user: k8s-rbd
            secretRef:
              name: ceph-secret-k8s-rbd

部署yaml

# kubectl apply -f case4-nginx-secret.yaml 
deployment.apps/nginx-deployment created
# kubectl get pods 
NAME                                READY   STATUS    RESTARTS   AGE
nginx-deployment-75f9fc6d4d-wl75x   1/1     Running   0          63s
## 登录测试
# kubectl exec -it nginx-deployment-75f9fc6d4d-wl75x bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-75f9fc6d4d-wl75x:/# df -Th
Filesystem                        Type     Size  Used Avail Use% Mounted on
overlay                           overlay   39G   14G   24G  36% /
tmpfs                             tmpfs     64M     0   64M   0% /dev
tmpfs                             tmpfs    2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/mapper/ubuntu--vg-ubuntu--lv ext4      39G   14G   24G  36% /etc/hosts
shm                               tmpfs     64M     0   64M   0% /dev/shm
/dev/rbd0                         ext4     3.9G   51M  3.8G   2% /usr/share/nginx/html/rbd
tmpfs                             tmpfs    3.2G   12K  3.2G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                             tmpfs    2.0G     0  2.0G   0% /proc/acpi
tmpfs                             tmpfs    2.0G     0  2.0G   0% /proc/scsi
tmpfs                             tmpfs    2.0G     0  2.0G   0% /sys/firmware
root@nginx-deployment-75f9fc6d4d-wl75x:/# ls /usr/share/nginx/html/rbd -l
total 51216
-rw-r--r-- 1 root root 52428800 Sep 21 06:15 file
drwx------ 2 root root    16384 Sep 21 06:09 lost+found
## 可以看到刚才我们dd 的file还在这里

1.4 通过pv/pvc供给

1.4.1 创建普通用户和admin用户的secret

## 拿到admin的key
# ceph auth print-key client.admin| base64
QVFBQXhpRmpOb0s1RlJBQXk4RFVxRnNPb0NkMkgwbTlRMVN1SVE9PQ==
## 配置secret.yaml
# vi case5-secret-admin.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-admin
type: "kubernetes.io/rbd"
data:
  key: QVFBQXhpRmpOb0s1RlJBQXk4RFVxRnNPb0NkMkgwbTlRMVN1SVE9PQ==
## 创建secret
# kubectl apply -f case5-secret-admin.yaml 
secret/ceph-secret-admin created
## admin用户用作创建镜像
## 普通用户用于挂载镜像,就还用刚才创建的k8s-rbd,之前已经创建就不再创建了
# kubectl get secret
NAME                  TYPE                                  DATA   AGE
ceph-secret-admin     kubernetes.io/rbd                     1      42s
ceph-secret-k8s-rbd   kubernetes.io/rbd                     1      34m
default-token-ftvnx   kubernetes.io/service-account-token   3      147d

1.4.2 创建存储类

# cat case6-ceph-storage-class.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: ceph-storage-class-k8s-rbd
  annotations:
    storageclass.kubernetes.io/is-default-class: "false" #设置为默认存储类
provisioner: kubernetes.io/rbd
parameters:
  monitors: 192.168.31.81:6789,192.168.31.82:6789,192.168.31.83:6789
  adminId: admin
  adminSecretName: ceph-secret-admin
  adminSecretNamespace: default 
  pool: k8s-rbd-pool1
  userId: k8s-rbd
  userSecretName: ceph-secret-k8s-rbd
## 创建存储类
# kubectl apply -f case6-ceph-storage-class.yaml
storageclass.storage.k8s.io/ceph-storage-class-k8s-rbd created
# kubectl get storageclasses.storage.k8s.io 
NAME                         PROVISIONER         RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
ceph-storage-class-k8s-rbd   kubernetes.io/rbd   Delete          Immediate           false                  25s
# kubectl describe storageclasses.storage.k8s.io ceph-storage-class-k8s-rbd
Name:            ceph-storage-class-k8s-rbd
IsDefaultClass:  No
Annotations:     kubectl.kubernetes.io/last-applied-configuration={
    
    "apiVersion":"storage.k8s.io/v1","kind":"StorageClass","metadata":{
    
    "annotations":{
    
    "storageclass.kubernetes.io/is-default-class":"false"},"name":"ceph-storage-class-k8s-rbd"},"parameters":{
    
    "adminId":"admin","adminSecretName":"ceph-secret-admin","adminSecretNamespace":"default","monitors":"192.168.31.81:6789,192.168.31.82:6789,192.168.31.83:6789","pool":"k8s-rbd-pool1","userId":"k8s-rbd","userSecretName":"ceph-secret-k8s-rbd"},"provisioner":"kubernetes.io/rbd"}
,storageclass.kubernetes.io/is-default-class=false
Provisioner:           kubernetes.io/rbd
Parameters:            adminId=admin,adminSecretName=ceph-secret-admin,adminSecretNamespace=default,monitors=192.168.31.81:6789,192.168.31.82:6789,192.168.31.83:6789,pool=k8s-rbd-pool1,userId=k8s-rbd,userSecretName=ceph-secret-k8s-rbd
AllowVolumeExpansion:  <unset>
MountOptions:          <none>
ReclaimPolicy:         Delete
VolumeBindingMode:     Immediate
Events:                <none>

1.4.3 创建pvc

创建pvc时先找存储类(ceph-storage-class-k8s-rbd),到monitors以ceph-secret-admin权限创建,使用的时候用k8s-rbd权限进行挂载

# cat case7-mysql-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-data-pvc
spec:
  accessModes:
    - ReadWriteOnce
  storageClassName: ceph-storage-class-k8s-rbd
  resources:
    requests:
      storage: '5Gi'
# kubectl apply -f case7-mysql-pvc.yaml 
persistentvolumeclaim/mysql-data-pvc created
# kubectl get pvc
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS                 AGE
mysql-data-pvc   Bound    pvc-9810f08e-c6fd-4b30-8bd5-448590b6fc09   5Gi        RWO            ceph-storage-class-k8s-rbd   3m40s
## mysql-data-pvc对应的就是名字是pvc-9810f08e-c6fd-4b30-8bd5-448590b6fc09的pv
# kubectl get pv|grep pvc-9810f08e-c6fd-4b30-8bd5-448590b6fc09
pvc-9810f08e-c6fd-4b30-8bd5-448590b6fc09   5Gi        RWO            Delete           Bound       default/mysql-data-pvc           ceph-storage-class-k8s-rbd            13m

此时在ceph节点可以看到,k8s-rbd-pool1下面有一个名字为kubernetes-dynamic-pvc-c97841e9-2979-4713-be89-cdad8937d9c9的image,这个image对应的就是pvc-9810f08e-c6fd-4b30-8bd5-448590b6fc09

root@ceph-mgr01:/apps# rbd --pool k8s-rbd-pool1 ls
k8s-rbd-img1
kubernetes-dynamic-pvc-c97841e9-2979-4713-be89-cdad8937d9c9
root@ceph-mgr01:/apps# rbd --pool k8s-rbd-pool1 --image kubernetes-dynamic-pvc-c97841e9-2979-4713-be89-cdad8937d9c9 info
rbd image 'kubernetes-dynamic-pvc-c97841e9-2979-4713-be89-cdad8937d9c9':
	size 5 GiB in 1280 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 9ad1988440b1
	block_name_prefix: rbd_data.9ad1988440b1
	format: 2
	features: 
	op_features: 
	flags: 
	create_timestamp: Wed Sep 21 15:45:02 2022
	access_timestamp: Wed Sep 21 15:45:02 2022
	modify_timestamp: Wed Sep 21 15:45:02 2022

1.4.4 创建depolyment

# cat case8-mysql-single.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - image: mysql:5.6.46
        name: mysql
        env:
          # Use secret in real usage
        - name: MYSQL_ROOT_PASSWORD
          value: root123
        ports:
        - containerPort: 3306
          name: mysql
        volumeMounts:
        - name: mysql-persistent-storage
          mountPath: /var/lib/mysql
      volumes:
      - name: mysql-persistent-storage
        persistentVolumeClaim:
          claimName: mysql-data-pvc 
---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: mysql-service-label 
  name: mysql-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 3306
    protocol: TCP
    targetPort: 3306
    nodePort: 33306
  selector:
    app: mysql
    
# kubectl apply -f case8-mysql-single.yaml 
deployment.apps/mysql created
service/mysql-service created

查看pvc和pv可以得知,mysql-data-pvc这个pvc是由pvc-9810f08e-c6fd-4b30-8bd5-448590b6fc09进行创建,而pvc-9810f08e-c6fd-4b30-8bd5-448590b6fc09则是由之前定义的 ceph-storage-class-k8s-rbd进行创建

root@k8s-master-01:/opt/k8s-data/yaml/ceph# kubectl get pvc
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS                 AGE
mysql-data-pvc   Bound    pvc-9810f08e-c6fd-4b30-8bd5-448590b6fc09   5Gi        RWO            ceph-storage-class-k8s-rbd   18h
root@k8s-master-01:/opt/k8s-data/yaml/ceph# kubectl get pv pvc-9810f08e-c6fd-4b30-8bd5-448590b6fc09
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                    STORAGECLASS                 REASON   AGE
pvc-9810f08e-c6fd-4b30-8bd5-448590b6fc09   5Gi        RWO            Delete           Bound    default/mysql-data-pvc   ceph-storage-class-k8s-rbd            18h
## 进入到容器查看
root@k8s-master-01:/opt/k8s-data/yaml/ceph# kubectl exec -it mysql-f4c6f6668-dtf6j bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
## 可以看到 rbd0挂载到了/var/lib/mysql
root@mysql-f4c6f6668-dtf6j:/# df -TH
Filesystem                        Type     Size  Used Avail Use% Mounted on
overlay                           overlay   42G   14G   26G  36% /
tmpfs                             tmpfs     68M     0   68M   0% /dev
tmpfs                             tmpfs    2.1G     0  2.1G   0% /sys/fs/cgroup
/dev/mapper/ubuntu--vg-ubuntu--lv ext4      42G   14G   26G  36% /etc/hosts
shm                               tmpfs     68M     0   68M   0% /dev/shm
/dev/rbd0                         ext4     5.2G  122M  5.1G   3% /var/lib/mysql
tmpfs                             tmpfs    3.4G   13k  3.4G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                             tmpfs    2.1G     0  2.1G   0% /proc/acpi
tmpfs                             tmpfs    2.1G     0  2.1G   0% /proc/scsi
tmpfs                             tmpfs    2.1G     0  2.1G   0% /sys/firmware
## /var/lib/mysql下数据文件也已经创建完成
root@mysql-f4c6f6668-dtf6j:/# ls /var/lib/mysql -l
total 110620
-rw-rw---- 1 mysql mysql       56 Sep 21 08:03 auto.cnf
-rw-rw---- 1 mysql mysql 50331648 Sep 22 01:50 ib_logfile0
-rw-rw---- 1 mysql mysql 50331648 Sep 21 08:03 ib_logfile1
-rw-rw---- 1 mysql mysql 12582912 Sep 22 01:50 ibdata1
drwx------ 2 mysql root     16384 Sep 21 08:01 lost+found
drwx------ 2 mysql mysql     4096 Sep 21 08:03 mysql
drwx------ 2 mysql mysql     4096 Sep 21 08:03 performance_schema
## 往目录下写一个文件
root@mysql-f4c6f6668-dtf6j:/var/lib/mysql# echo 123 > aa.db
root@mysql-f4c6f6668-dtf6j:/var/lib/mysql# cat aa.db
123
## 此时模拟下容器删除后重建数据是否有影响
root@k8s-master-01:/opt/k8s-data/yaml/ceph# kubectl delete -f case8-mysql-single.yaml 
deployment.apps "mysql" deleted
service "mysql-service" deleted
root@k8s-master-01:/opt/k8s-data/yaml/ceph# kubectl apply -f case8-mysql-single.yaml 
deployment.apps/mysql created
service/mysql-service created
root@k8s-master-01:/opt/k8s-data/yaml/ceph# kubectl get pods |grep mysql
mysql-f4c6f6668-dm6tr              1/1     Running   0          17s
root@k8s-master-01:/opt/k8s-data/yaml/ceph# kubectl exec -it mysql-f4c6f6668-dm6tr bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@mysql-f4c6f6668-dm6tr:/# cat /var/lib/mysql/aa.db 
123
## 可以看到pvc内数据并不受影响

1.5 通过cephfs供给

之前的mysql如果使用主从结构,每个容器都需要自己单独的存储用来持久化数据,那么就比较适合使用rbd的存储.
这里我们要模拟一个deployment下多个副本共用一个存储,这样rbd就不是很适合了,这里我们使用cephfs方式挂载存储实现多pod间数据共享.

root@k8s-master-01:/opt/k8s-data/yaml/ceph# cat case9-nginx-cephfs.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - name: nginx-cephfs 
          mountPath: /usr/share/nginx/html/cephfs
      volumes:
        - name: nginx-cephfs
          cephfs:
            monitors:
            - '192.168.31.81:6789'
            - '192.168.31.82:6789'
            - '192.168.31.83:6789'
            path: /
            user: admin
            secretRef:
              name: ceph-secret-admin

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: ng-deploy-80-service-label
  name: ng-deploy-80-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 33380
  selector:
    app: ng-deploy-80

我们将cephfs以secret的方式挂载到容器/usr/share/nginx/html/cephfs目录,并将容器以nodeport方式暴露到33380端口.

# kubectl apply -f case9-nginx-cephfs.yaml 
root@k8s-master-01:/opt/k8s-data/yaml/ceph# kubectl get pods
NAME                               READY   STATUS    RESTARTS   AGE
nginx-deployment-78894754c-j6nm2   1/1     Running   0          91s
nginx-deployment-78894754c-ssvlc   1/1     Running   0          91s
nginx-deployment-78894754c-x7cwp   1/1     Running   0          91s
## 进入容器查看cephfs挂载情况
root@k8s-master-01:/opt/k8s-data/yaml/ceph# kubectl exec -it nginx-deployment-78894754c-j6nm2 bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
## 这里可以看到cephfs已经被挂载到了/usr/share/nginx/html/cephfs目录
root@nginx-deployment-78894754c-j6nm2:/# df -Th
Filesystem                                                 Type     Size  Used Avail Use% Mounted on
overlay                                                    overlay   39G   17G   21G  45% /
tmpfs                                                      tmpfs     64M     0   64M   0% /dev
tmpfs                                                      tmpfs    3.9G     0  3.9G   0% /sys/fs/cgroup
/dev/mapper/ubuntu--vg-ubuntu--lv                          ext4      39G   17G   21G  45% /etc/hosts
shm                                                        tmpfs     64M     0   64M   0% /dev/shm
192.168.31.81:6789,192.168.31.82:6789,192.168.31.83:6789:/ ceph     4.0G     0  4.0G   0% /usr/share/nginx/html/cephfs
tmpfs                                                      tmpfs    7.1G   12K  7.1G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                                                      tmpfs    3.9G     0  3.9G   0% /proc/acpi
tmpfs                                                      tmpfs    3.9G     0  3.9G   0% /proc/scsi
tmpfs                                                      tmpfs    3.9G     0  3.9G   0% /sys/firmware
root@nginx-deployment-78894754c-j6nm2:/# cd /usr/share/nginx/html/cephfs
root@nginx-deployment-78894754c-j6nm2:/usr/share/nginx/html/cephfs# echo 123 > index.html
root@nginx-deployment-78894754c-j6nm2:~# curl 127.0.0.1/cephfs/index.html
123
## 在node节点上可以看到cephfs被挂载过来
# df -Th|grep nginx
192.168.31.81:6789,192.168.31.82:6789,192.168.31.83:6789:/ ceph      4.0G     0  4.0G   0% /var/lib/kubelet/pods/69ee718d-579b-4c8e-81be-cfe032c0f115/volumes/kubernetes.io~cephfs/nginx-cephfs
## 到不同容器查看cephfs下的文件,可以看到另外两个容器都能读到相同的内容
root@k8s-master-01:/opt/k8s-data/yaml/ceph# kubectl exec -it nginx-deployment-78894754c-ssvlc cat /usr/share/nginx/html/cephfs/index.html
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
123
root@k8s-master-01:/opt/k8s-data/yaml/ceph# kubectl exec -it nginx-deployment-78894754c-x7cwp cat /usr/share/nginx/html/cephfs/index.html
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
123

猜你喜欢

转载自blog.csdn.net/qq_29974229/article/details/126987203