k8s中的存储卷-节点和POD存储数据(一)

版权声明:知识就是为了传播! https://blog.csdn.net/weixin_36171533/article/details/82627736

容器的存储卷

Pod是自己有生命周期的
Pod消失后数据也会消失
所以我们要把数据放在一个容器的外面

docker存储卷在k8s上只有一定的存储性,因为k8s是调度的,Pod挂掉之后再启动不会默认之前的数据位置

脱离节点的存储设备才可以解决持久能力

在K8s上Pod删除,存储卷也会随之而删除的,这一点区分docker

emptyDir 空目录
hostPath 主机目录

分布式存储:
glusterfs,rbd,cephfs,云存储(EBS,等)

查看K8s支持多少种存储:
kubectl explain pods.spec.volumes

mark

[root@master volumes]# cat pod-vol-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
  annotations:
    node1/create-by: "cluster admin"  #备注
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    ports: 
    - name: http
      containerPort: 80
    - name: https
      containerPort: 443
    volumeMounts:
    - name: html
      mountPath: /data/web/html/   #myapp的存储地址
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent      #Always,Never,IfNotPresent 分别对应:总是去下载,总是不去下载,如果本地不存在就下载
    volumeMounts:
    - name: html
      mountPath: /data/           #busybox的存储地址
    command:
    - "/bin/sh"
    - "-c"
    - "sleep 7200"
  volumes:
  - name: html
    emptyDir: {}

kubectl create -f pod-vol-demo.yaml 

查看是否创建成功:
[root@master volumes]# kubectl get pods
NAME       READY     STATUS    RESTARTS   AGE
client     1/1       Running   0          5d
pod-demo   2/2       Running   0          2m

进入busybox容器:
[root@master volumes]# kubectl exec -it pod-demo -c busybox -- /bin/sh
/ # ls
bin   data  dev   etc   home  proc  root  sys   tmp   usr   var

查看挂载:

/ # mount
rootfs on / type rootfs (rw)
overlay on / type overlay (rw,relatime,lowerdir=/var/lib/docker/overlay2/l/4VOPP4JKAAV5FCYGWSVSBG55BW:/var/lib/docker/overlay2/l/D64M6ZROC774RMFNS4RKLUNME7,upperdir=/var/lib/docker/overlay2/fe23f482f33db7242b7f6acf54964c273c025db3568020fc12acfa8d60b331bf/diff,workdir=/var/lib/docker/overlay2/fe23f482f33db7242b7f6acf54964c273c025db3568020fc12acfa8d60b331bf/work)
proc on /proc type proc (rw,nosuid,nodev,noexec,relatime)
tmpfs on /dev type tmpfs (rw,nosuid,size=65536k,mode=755)
devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=666)
sysfs on /sys type sysfs (ro,nosuid,nodev,noexec,relatime)
tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,relatime,mode=755)
cgroup on /sys/fs/cgroup/systemd type cgroup (ro,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (ro,nosuid,nodev,noexec,relatime,net_prio,net_cls)
cgroup on /sys/fs/cgroup/devices type cgroup (ro,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (ro,nosuid,nodev,noexec,relatime,cpuacct,cpu)
cgroup on /sys/fs/cgroup/pids type cgroup (ro,nosuid,nodev,noexec,relatime,pids)
cgroup on /sys/fs/cgroup/freezer type cgroup (ro,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/perf_event type cgroup (ro,nosuid,nodev,noexec,relatime,perf_event)
cgroup on /sys/fs/cgroup/cpuset type cgroup (ro,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/memory type cgroup (ro,nosuid,nodev,noexec,relatime,memory)
cgroup on /sys/fs/cgroup/blkio type cgroup (ro,nosuid,nodev,noexec,relatime,blkio)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (ro,nosuid,nodev,noexec,relatime,hugetlb)
mqueue on /dev/mqueue type mqueue (rw,nosuid,nodev,noexec,relatime)
/dev/mapper/centos-root on /data type xfs (rw,relatime,attr2,inode64,noquota)
/dev/mapper/centos-root on /dev/termination-log type xfs (rw,relatime,attr2,inode64,noquota)
/dev/mapper/centos-root on /etc/resolv.conf type xfs (rw,relatime,attr2,inode64,noquota)
/dev/mapper/centos-root on /etc/hostname type xfs (rw,relatime,attr2,inode64,noquota)
/dev/mapper/centos-root on /etc/hosts type xfs (rw,relatime,attr2,inode64,noquota)
shm on /dev/shm type tmpfs (rw,nosuid,nodev,noexec,relatime,size=65536k)
tmpfs on /var/run/secrets/kubernetes.io/serviceaccount type tmpfs (ro,relatime)
proc on /proc/asound type proc (ro,relatime)
proc on /proc/bus type proc (ro,relatime)
proc on /proc/fs type proc (ro,relatime)
proc on /proc/irq type proc (ro,relatime)
proc on /proc/sys type proc (ro,relatime)
proc on /proc/sysrq-trigger type proc (ro,relatime)
tmpfs on /proc/acpi type tmpfs (ro,relatime)
tmpfs on /proc/kcore type tmpfs (rw,nosuid,size=65536k,mode=755)
tmpfs on /proc/keys type tmpfs (rw,nosuid,size=65536k,mode=755)
tmpfs on /proc/timer_list type tmpfs (rw,nosuid,size=65536k,mode=755)
tmpfs on /proc/timer_stats type tmpfs (rw,nosuid,size=65536k,mode=755)
tmpfs on /proc/sched_debug type tmpfs (rw,nosuid,size=65536k,mode=755)
tmpfs on /proc/scsi type tmpfs (ro,relatime)
tmpfs on /sys/firmware type tmpfs (ro,relatime)

开始向目录写文件(busybox):
写入成功

/ # date
Mon Sep 10 07:59:18 UTC 2018
/ # echo $(date) >> /date/index.html
/ # echo $(date) >> /data/index.html
/ # echo $(date) >> /data/index.html
/ # cat /data/index.html 
Mon Sep 10 07:59:56 UTC 2018
Mon Sep 10 08:00:02 UTC 2018

进入myapp容器:
[root@master volumes]# kubectl exec -it pod-demo -c myapp -- /bin/sh
通过发现,数据是共享的
[root@master volumes]# kubectl exec -it pod-demo -c myapp -- /bin/sh
/ # cat /data/web/html/index.html 
Mon Sep 10 07:59:56 UTC 2018
Mon Sep 10 08:00:02 UTC 2018

################
由此说明busyboy的/data和myapp的/data/web/html/是共享的
################

我们删除后开始尝试:
第一个容器是开始对外面提供web服务。主容器
第二个容器是对外面提供存储

重新编辑yaml脚本
#注意,有时候镜像command报错不一定是命令的问题,也有可能是镜像不支持的问题
[root@master volumes]# cat pod-vol-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
  annotations:
    node1/create-by: "cluster admin"  #备注
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    imagePullPolicy: IfNotPresent
    ports: 
    - name: http
      containerPort: 80
    - name: https
      containerPort: 443
    volumeMounts:
    - name: html
      mountPath: /usr/share/nginx/html/
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent      #Always,Never,IfNotPresent 分别对应:总是去下载,总是不去下载,如果本地不存在就下载
    volumeMounts:
    - name: html
      mountPath: /data/
    command: ["bin/sh"]
    args: ["-c","while true; do echo $(date) >> /data/index.html; sleep 2; done"]

  volumes:
  - name: html
    emptyDir: {}
开始创建:
kubectl apply -f pod-demo.yaml
[root@master volumes]# kubectl get pods
NAME       READY     STATUS    RESTARTS   AGE
client     1/1       Running   0          5d
pod-demo   2/2       Running   0          1m

查看是否成功:
[root@master volumes]# kubectl get pods -o wide
NAME       READY     STATUS    RESTARTS   AGE       IP            NODE      NOMINATED NODE
client     1/1       Running   0          5d        10.244.2.3    node2     <none>
pod-demo   2/2       Running   0          2m        10.244.2.60   node2     <none>
[root@master volumes]# curl 10.244.2.60
Mon Sep 10 08:46:41 UTC 2018
Mon Sep 10 08:46:43 UTC 2018
Mon Sep 10 08:46:45 UTC 2018
Mon Sep 10 08:46:47 UTC 2018
Mon Sep 10 08:46:49 UTC 2018
Mon Sep 10 08:46:51 UTC 2018
Mon Sep 10 08:46:53 UTC 2018
Mon Sep 10 08:46:55 UTC 2018
Mon Sep 10 08:46:57 UTC 2018

每隔两秒生成一个新数据

我们验证了同一个存储卷在同一个Pod不同的容器可以共同的调用
生命周期也随着Pod的消失而消失

实验:验证数据在node节点上存储:

mark

[root@master volumes]# cat pod-hostpath-vol.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-vol-hostpath
  namespace: default
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    volumeMounts:
    - name: html
      mountPath: /usr/share/nginx/html/
  volumes:
  - name: html
    hostPath:
      path: /data/pod/volume1
      type: DirectoryOrCreate  #没有路径的话自动创建路径

kubectl apply -f pod-hostpath-vol.yaml

node1执行:
[root@node1 ~]# mkdir -p /data/pod/volume1
[root@node1 ~]# vim /data/pod/volume1/index.html
node1
node2执行:
[root@node2 ~]# mkdir -p /data/pod/volume1
[root@node2 ~]# vim /data/pod/volume1/index.html
node2

[root@master volumes]# kubectl get pods -o wide
NAME               READY     STATUS    RESTARTS   AGE       IP            NODE      NOMINATED NODE
client             1/1       Running   0          5d        10.244.2.3    node2     <none>
pod-vol-hostpath   1/1       Running   0          9s        10.244.2.62   node2     <none>
[root@master volumes]# curl 10.244.2.62
node2
如果节点挂了,数据还是会丢失

猜你喜欢

转载自blog.csdn.net/weixin_36171533/article/details/82627736