Prepare a machine to install nfs
#安装nfs-utils
yum -y install nfs-utils
systemctl start nfs
systemctl enable nfs
# Create a directory
mkdir / Data / Volumes -pv
CD / Data / Volumes
mkdir {1,2,3,4,5} V
Configuration File Sharing
vim /etc/exports
/data/volumes/v1 192.168.1.0/24(rw,sync,no_root_squash)
/data/volumes/v2 192.168.1.0/24(rw,sync,no_root_squash)
/data/volumes/v3 192.168.1.0/24(rw,sync,no_root_squash)
/data/volumes/v4 192.168.1.0/24(rw,sync,no_root_squash)
/data/volumes/v5 192.168.1.0/24(rw,sync,no_root_squash)
# Set remount / etc / exports of
exportfs -arv
# Show host's / etc / exports shared directory data
showmount -e
K8s nfs mounted on all nodes node
yum the install nfs -Y-utils
systemctl Start nfs
systemctl enable nfs
Whether nfs functioning test
mkdir / v1
echo the Test v1 >> /v1/test.txt
# 192.168.1.190 is nfs host ip
Mount -t nfs 192.168.1.190:/data/volumes/v1 / v1
echo the Test v1 >> / v1 / test.txt
# Nfs node
cat /data/volumes/v1/test.txt
test.txt on node node node has been synchronized to nfs, No problem.
# Uninstall / v1 directory on the node test node
umount / v1
Creating PersistentVolume, PersistentVolumeClaim, Pod on the master node
vim pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv1
labels:
name: pv1
spec:
nfs:
# path 挂载路径
path: /data/volumes/v1
# server nfs机器的ip
server: 192.168.1.190
# ReadWriteOnce能以读写模式被加载到一个节点上
accessModes: ["ReadWriteOnce"]
capacity:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv2
labels:
name: pv2
spec:
nfs:
path: /data/volumes/v2
server: 192.168.1.190
# ReadOnlyMany以只读模式加载到多个节点上
accessModes: ["ReadOnlyMany"]
capacity:
storage: 5Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv3
labels:
name: pv3
spec:
nfs:
path: /data/volumes/v3
server: 192.168.1.190
# ReadWriteMany以读写模式被加载到多个节点上
accessModes: ["ReadWriteMany", "ReadWriteOnce"]
capacity:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv4
labels:
name: pv4
spec:
nfs:
path: /data/volumes/v4
server: 192.168.1.190
accessModes: ["ReadWriteMany", "ReadWriteOnce"]
capacity:
storage: 10Gi
vim pod-pvc.yaml
apiVersion: v1
# 存储卷声明
kind: PersistentVolumeClaim
metadata:
name: pvc-myapp
namespace: default
spec:
accessModes: ["ReadWriteMany"]
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Pod
metadata:
name: pod-myapp
namespace: default
spec:
volumes:
- name: html
# 使用pvc-myapp存储卷声明
persistentVolumeClaim:
claimName: pvc-myapp
containers:
- name: myapp
image: ikubernetes/myapp:v1
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
deploy
kubectl apply -f pv.yaml
kubectl apply -f pod-pvc.yaml
View pv
kubectl get pv
View pvc
kubectl get pvc
Use the pv3, nfs node to the new /data/volumes/v3/index.html
echo This is the home page pv3 >> /data/volumes/v3/index.html
View pod
kubectl get pods -o wide
curl 10.244.2.39
The nfs / data / volumes / v3 pod container and / usr / share / nginx / html / directory sync
Modify / usr / share / nginx / html / sync to see if the pod container / data / volumes / v3
Exec -it POD-myapp kubectl - / bin / SH
cd / usr / report this content share / nginx / HTML
echo add, modify index.html >>
echo add files >> new.txt
Available to the nfs / data / volumes / v3 to see whether the changes synchronized.
Delete pod, pvc
kubectl delete -f pod-pvc.yaml
kubectl get pv
pv3 state is Released
Released: release state, indicating that PVC unbundling PV, but not yet performed the recovery strategy.
That is, after deleting pvc, pv can not be bound with the use of new pvc.
Pv3 want to use again can be achieved by modifying the definition of pv3
kubectl edit pv pv3
Delete spec.claimRef (the contents of the red box) to
kubectl get pv
Available became a pv.