GlusterFS
A distributed file is open, has a strong ability to scale to support a number of storage capacity PB and thousands of clients, interconnected by a network into a parallel network file system. Scalable, high-performance, high availability characteristics.
Prerequisite: must be deployed in a lab environment Gluster FS clusters, the paper created called: gv0 storage volumes
1. Create a endpoint
file namedglusterfs_ep.yaml
$ vi glusterfs_ep.yaml
apiVersion: v1
kind: Endpoints
metadata:
name: glusterfs
namespace: default
subsets:
# 添加GlusterFS各个集群的IP地址
- addresses:
- ip: 10.0.0.41
- ip: 10.0.0.42
ports:
# 添加GlusterFS端口号
- port: 49152
protocol: TCP
Execution yaml
$ kubectl create -f glusterfs_ep.yaml
endpoints/glusterfs created
// 查看创建好的endpoints
[root@k8s-master01 ~]# kubectl get ep
NAME ENDPOINTS AGE
glusterfs 10.0.0.41:49152,10.0.0.42:49152 15s
2. Create svc for Endpoint
Endpoint is the GlusterFS cluster nodes, you need access to these nodes, you need to create svc
$ vi glusterfs_svc.yaml
apiVersion: v1
kind: Service
metadata:
# 该名称必须要和endpoint里的name一致
name: glusterfs
spec:
ports:
- port: 49152
protocol: TCP
targetPort: 49152
sessionAffinity: None
type: ClusterIP
Execution yaml
$ kubectl create -f glusterfs_svc.yaml
service/glusterfs created
$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
glusterfs ClusterIP 10.1.104.145 <none> 49152/TCP 20s
3. Create pv is Glusterfs
$ vi glusterfs_pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: gluster
labels:
type: glusterfs
spec:
capacity:
# 指定该pv的容量
storage: 50Gi
accessModes:
- ReadWriteMany
glusterfs:
# 指定glusterfs的endpoint名称
endpoints: "glusterfs"
# path名称是在glusterfs里创建的卷
# 可登录到glusterfs集群执行"gluster volume list"命令来查看已创建的卷
path: "gv0"
readOnly: false
Execution yaml
$ kubectl create -f glusterfs_pv.yaml
persistentvolume/gluster created
$ kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
gluster 50Gi RWX Retain Available 10s
4. Create pvc is Glusterfs
$ vi glusterfs_pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
# 名称必须和指定的pv一致
name: gluster
spec:
accessModes:
- ReadWriteMany
resources:
requests:
# 指定该pvc使用pv的容量空间
storage: 20Gi
Execution yaml
$ kubectl create -f glusterfs_pvc.yaml
persistentvolumeclaim/gluster created
$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
gluster Bound gluster 50Gi RWX 83s
5. Create and mount to nginx pod cluster of pvc nginx_pod.yaml
$ vim nginx-demo.yaml
---
# Pod
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
app: web
env: test
spec:
containers:
- name: nginx
image: nginx:1.13
ports:
- containerPort: 80
volumeMounts:
- name: data-gv0
mountPath: /usr/share/nginx/html
volumes:
- name: data-gv0
persistentVolumeClaim:
# 绑定指定的pv
claimName: gluster
Execution yaml
$ kubectl create -f nginx-demo.yaml
pod/nginx created
[root@k8s-master01 ~]# kubectl get pods | grep "nginx"
nginx 1/1 Running 0 2m 10.244.1.222 k8s-node01 <none> <none>
In any client mount /mnt
to glusterfs目录
, and then create a index.html
file
$ mount -t glusterfs k8s-store01:/gv0 /mnt/
$ cd /mnt && echo "this nginx store used gluterfs cluster" >index.html
On the master node through the curl access pod
$ curl 10.244.1.220/index.html
this nginx store used gluterfs cluster