本篇文章为大家展示了k8s如何使用emptyDir,hostPath,nfs,pv,pvc做存储,内容简明扼要并且容易理解,绝对能使你眼前一亮,通过这篇文章的详细介绍希望你能有所收获。
存储卷三种方式:emptyDir,gitRepo,hostPath
emptyDir:一个pod创建两个容器,一个pod提供请求服务,另一个pod提供文件存储,pod删除,存储卷就删除。
gitRepo:使用docker镜像提供存储
hostPath:宿主机路径,pod删除,存储卷还在(在多个node节点要创建路径)
nfs:使用共享存储(多个pod要在共享存储中创建多个目录)
帮助:
[root@k8s1 ~]# kubectl explain pods.spec.volumes.persistentVolumeClaim --pvc帮助
[root@k8s1 ~]# kubectl explain pods.spec.volumes --查看帮助
[root@k8s1 ~]# kubectl explain pv --pv帮助
1.使用emptyDir做存储(两个pod,一个做存储,一个提供服务)
[root@k8s1 ~]# vim 11.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-demo --定义一个pod
namespace: default
labels:
app: myapp
tier: frontend
spec:
containers:
- name: myapp --定义一个容器
image: ikubernetes/myapp:v1
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html --myapp容器html卷挂载到/usr/share/nginx/html(是nginx默认路径)
- name: busybox
image: busybox:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html --busybox容器将html卷挂载到/data
mountPath: /data/
command: ["/bin/sh","-c","while true;do echo $(date) >> /data/index.html;sleep 2;done"]
volumes: --定义一个html卷
- name: html
emptyDir: {}
[root@k8s1 ~]# kubectl apply -f 11.yaml
pod/pod-demo created
[root@k8s1 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-demo 2/2 Running 0 103s 10.244.1.13 k8s2 <none> <none>
[root@k8s1 ~]# kubectl exec -it pod-demo -c busybox -- /bin/sh
/ # cat /data/index.html
Fri Feb 22 09:39:53 UTC 2019
Fri Feb 22 09:39:55 UTC 2019
Fri Feb 22 09:39:57 UTC 2019
Fri Feb 22 09:39:59 UTC 2019
[root@k8s1 ~]# curl http://10.244.1.13
Fri Feb 22 09:39:53 UTC 2019
Fri Feb 22 09:39:55 UTC 2019
Fri Feb 22 09:39:57 UTC 2019
Fri Feb 22 09:39:59 UTC 2019
Fri Feb 22 09:40:01 UTC 2019
Fri Feb 22 09:40:03 UTC 2019
Fri Feb 22 09:40:05 UTC 2019
[root@k8s1 ~]#
2.使用hostPath做存储(如果node节点宕机,pod访问宕机node的数据就不存在了)
node1节点:
[root@k8s2 ~]# mkdir -p /data/pod
[root@k8s2 ~]# cat /data/pod/index.html --为了区分node节点,将文件内容写不一样
node1
[root@k8s2 ~]#
node2节点:
[root@k8s3 ~]# mkdir -p /data/pod
[root@k8s3 ~]# cat /data/pod/index.html
node2
[root@k8s3 ~]#
master节点:
[root@k8s1 ~]# vim 12.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-vol-hostpath
namespace: default
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
volumeMounts:
- name: html --使用html卷存储
mountPath: /usr/share/nginx/html --nginx网页根目录
volumes:
- name: html
hostPath:
path: /data/pod/ --html卷的路径(对应的node节点新建目录,pod在哪个node上就要新建)
type: DirectoryOrCreate
[root@k8s1 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-demo 2/2 Running 0 64m 10.244.1.13 k8s2 <none> <none>
pod-vol-hostpath 1/1 Running 0 4s 10.244.2.22 k8s3 <none> <none>
[root@k8s1 ~]# curl http://10.244.2.22 --pod在node2节点上,所以访问的是node2的网页,如果在node1就是node1的内容
node2
[root@k8s1 ~]#
3.使用nfs共享存储
nfs存储:
[root@liutie1 ~]# mkdir /data/v6
[root@liutie1 ~]# vim /etc/exports
/data/v6 172.16.8.0/24(rw,no_root_squash)
[root@liutie1 ~]# systemctl restart nfs
[root@liutie1 ~]# exportfs -arv
exporting 172.16.8.0/24:/data/v6
[root@liutie1 ~]# showmount -e
Export list for liutie1:
/data/v6 172.16.8.0/24
[root@liutie1 ~]#
k8s节点:
[root@k8s1 ~]# mkdir /data/v6 --创建共享目录
[root@k8s1 ~]# mount.nfs 172.16.8.108:/data/v6 /data/v6 --测试手动挂载
[root@k8s1 ~]# umount /data/v6
[root@k8s1 ~]# vim nfs.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-vol-nfs
namespace: default
spec:
containers:
- name: pod-nfs
image: ikubernetes/myapp:v1
volumeMounts:
- name: html1
mountPath: /usr/share/nginx/html
volumes:
- name: html1
nfs:
path: /data/v6
server: 172.16.8.108
[root@k8s1 ~]# kubectl apply -f nfs.yaml
pod/pod-vol-nfs created
[root@k8s1 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-vol-nfs 1/1 Running 0 2m21s 10.244.1.78 k8s2 <none> <none>
[root@k8s1 ~]#
在nfs存储创建文件
[root@liutie1 ~]# cd /data/v6/
[root@liutie1 v6]# cat index.html
nfs store
[root@liutie1 v6]#
在k8s节点打开网页
[root@k8s1 ~]# curl 10.244.1.78 --pod的ip地址
nfs store
[root@k8s1 ~]#
4.使用nfs共享存储(固定大小)
nfs服务器:
[root@liutie1 ~]# mkdir /data/v{1,2,3,4,5} --在存储上新建目录
[root@liutie1 ~]# yum install nfs* -y --安装nfs
[root@liutie1 ~]# vim /etc/exports --共享目录
/data/v1 172.16.8.0/24(rw,no_root_squash)
/data/v2 172.16.8.0/24(rw,no_root_squash)
/data/v3 172.16.8.0/24(rw,no_root_squash)
/data/v4 172.16.8.0/24(rw,no_root_squash)
/data/v5 172.16.8.0/24(rw,no_root_squash)
[root@liutie1 ~]# exportfs -arv
exporting 172.16.8.0/24:/data/v5
exporting 172.16.8.0/24:/data/v4
exporting 172.16.8.0/24:/data/v3
exporting 172.16.8.0/24:/data/v2
exporting 172.16.8.0/24:/data/v1
[root@liutie1 ~]# showmount -e
Export list for liutie1:
/data/v5 172.16.8.0/24
/data/v4 172.16.8.0/24
/data/v3 172.16.8.0/24
/data/v2 172.16.8.0/24
/data/v1 172.16.8.0/24
[root@liutie1 ~]#
node各节点:
[root@k8s2 ~]# yum install nfs-common nfs-utils -y --所有node节点必须安装nfs-utils软件包,否则会出错
master节点:
[root@k8s1 ~]# yum install -y nfs-utils
[root@k8s1 ~]# kubectl explain PersistentVolume --帮助信息
[root@k8s1 ~]# vim pv.yaml --将远程的nfs目录转换成pv
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv001
labels:
name: pv001
spec:
nfs:
path: /data/v1
server: 172.16.8.108
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 5Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv002
labels:
name: pv002
spec:
nfs:
path: /data/v2
server: 172.16.8.108
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 15Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv003
labels:
name: pv003
spec:
nfs:
path: /data/v3
server: 172.16.8.108
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv004
labels:
name: pv004
spec:
nfs:
path: /data/v4
server: 172.16.8.108
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv005
labels:
name: pv005
spec:
nfs:
path: /data/v5
server: 172.16.8.108
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 13Gi
[root@k8s1 ~]# kubectl apply -f pv.yaml --生成pv
persistentvolume/pv001 created
persistentvolume/pv002 created
persistentvolume/pv003 created
persistentvolume/pv004 created
persistentvolume/pv005 created
[root@k8s1 ~]# kubectl get pv --查看pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv001 5Gi RWO,RWX Retain Available 2m40s
pv002 15Gi RWO,RWX Retain Available 2m40s
pv003 1Gi RWO,RWX Retain Available 2m40s
pv004 20Gi RWO,RWX Retain Available 2m40s
pv005 13Gi RWO,RWX Retain Available 2m40s
[root@k8s1 ~]# vim pvc.yaml --创建pvc,pvc的大小为6G
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mypvc
namespace: default --定义一个mypvc名字的pvc
spec:
accessModes: ["ReadWriteMany"]
resources:
requests:
storage: 6Gi
---
apiVersion: v1
kind: Pod --定义一个pod,pod使用pvc
metadata:
name: pod-vol-pvc
namespace: default
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
volumeMounts:
- name: html --使用mypvc存储
mountPath: /usr/share/nginx/html
volumes:
- name: html
persistentVolumeClaim:
claimName: mypvc --引用上面的mypvc
[root@k8s1 ~]# kubectl apply -f pvc.yaml
persistentvolumeclaim/mypvc created
pod/pod-vol-pvc created
[root@k8s1 ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv001 5Gi RWO,RWX Retain Available 8m31s
pv002 15Gi RWO,RWX Retain Available 8m31s
pv003 1Gi RWO,RWX Retain Available 8m31s
pv004 20Gi RWO,RWX Retain Available 8m31s
pv005 13Gi RWO,RWX Retain Bound default/mypvc 8m31s --Bound表示使用
[root@k8s1 ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
mypvc Bound pv005 13Gi RWO,RWX 2m31s --使用了pv005的mypvc存储卷
[root@k8s1 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
pod-demo 2/2 Running 0 141m
pod-vol-hostpath 1/1 Running 0 77m
pod-vol-pvc 1/1 Running 0 4s
[root@k8s1 ~]# kubectl describe pods pod-vol-pvc --查看详细信息
上述内容就是k8s如何使用emptyDir,hostPath,nfs,pv,pvc做存储,你们学到知识或技能了吗?如果还想学到更多技能或者丰富自己的知识储备,欢迎关注天达云行业资讯频道。