在pod上直接使用Pod存储卷分为两步

  1. 在pod的Volumes字段上定义存储卷名称及存储卷
  2. 在容器内部挂载Volumes字段中所定义的存储卷。

emptyDir类型

emptyDir类型的存储卷不可持久存储,其生命周期和容器生命周期相同。

emptyDir示例

1.创建配置清单

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
root@k8s-master01:~/yaml/chapter05# vim volumes-empyterdir-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: volumes-emptydir-demo
namespace: default
spec:
initContainers:
- name: config-file-downloader
image: ikubernetes/admin-box
imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c","wget -O /data/envoy.yaml http://ilinux.io/envoy.yaml"]
volumeMounts:
- name: config-file-store
mountPath: /data
containers:
- name: envoy
image: envoyproxy/envoy-alpine:v1.13.1
command: ['/bin/sh','-c']
args: ['envoy -c /etc/envoy/envoy.yaml']
volumeMounts: # 挂载存储卷
- name: config-file-store
mountPath: /etc/envoy
readOnly: true
volumes: # 定义存储卷
- name: config-file-store # 存储卷名称
emptyDir: # 存储卷类型
medium: Memory # 存储介质,默认磁盘
sizeLimit: 16Mi

2.应用配置清单

1
2
root@k8s-master01:~/yaml/chapter05# kubectl apply -f volumes-empyterdir-demo.yaml 
pod/volumes-emptydir-demo created

3.查看容器内的挂载点

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# 查看容器内的配置文件是否被成功挂载
root@k8s-master01:~/yaml/chapter05# kubectl exec volumes-emptydir-demo -- cat /etc/envoy/envoy.yaml
Defaulted container "envoy" out of: envoy, config-file-downloader (init)
admin:
access_log_path: /tmp/admin_access.log
address:
socket_address: { address: 0.0.0.0, port_value: 9901 }

static_resources:
listeners:
- name: listener_0
address:
socket_address: { address: 0.0.0.0, port_value: 80 }
filter_chains:
- filters:
- name: envoy.http_connection_manager
config:
stat_prefix: ingress_http
codec_type: AUTO
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match: { prefix: "/" }
route: { cluster: local_service }
http_filters:
- name: envoy.router

clusters:
- name: local_service
connect_timeout: 0.25s
type: STATIC
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: local_service
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 8080

# 查看容器内的监听端口
root@k8s-master01:~/yaml/chapter05# kubectl exec volumes-emptydir-demo -- netstat -tnl
Defaulted container "envoy" out of: envoy, config-file-downloader (init)
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN
tcp 0 0 0.0.0.0:9901 0.0.0.0:* LISTEN

# 查看卷挂载
root@k8s-master01:~/yaml/chapter05# kubectl exec volumes-emptydir-demo -- mount | grep envoy
Defaulted container "envoy" out of: envoy, config-file-downloader (init)
tmpfs on /etc/envoy type tmpfs (ro,relatime)

# 卷挂载成功。

hostPath类型

hostPath类型的挂在卷,是将本地的某个存在的路径挂载到容器内部指定的挂载目录下。

hostPath的挂在卷可以指定其类型,其具体的挂载类型有以下几种:

  • File:事先必须存在的文件路径;
  • Directory:事先必须存在的目录路径;
  • DirectoryOrCreate:指定的路径不存时自动将其创建为0755权限的空目录,属主属组均为kubelet;
  • FileOrCreate:指定的路径不存时自动将其创建为0644权限的空文件,属主和属组同为kubelet;
  • Socket:事先必须存在的Socket文件路径;
  • CharDevice:事先必须存在的字符设备文件路径;
  • BlockDevice:事先必须存在的块设备文件路径;
  • “”:空字符串,默认配置,在关联hostPath存储卷之前不进行任何检查。

hostPath示例

1.编写配置清单

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
root@k8s-master01:~/yaml/chapter05# vim volumes-hostpath-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: volumes-hostpath-demo
spec:
containers:
- name: filebeat
image: ikubernetes/filebeat:5.6.7-alpine
env:
- name: REDIS_HOST
value: redis.ilinux.io:6379
- name: LOG_LEVEL
value: info
volumeMounts:
- name: varlog
mountPath: /var/log
- name: socket
mountPath: /var/run/docker.sock
- name: varlibdockercontainers
mountPath: /var/lib/docker/contianers
readOnly: true
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: socket
hostPath:
path: /var/run/docker.sock

2.应用配置清单

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
root@k8s-master01:~/yaml/chapter05# kubectl apply -f volumes-hostpath-demo.yaml 
pod/volumes-hostpath-demo created

# 查看pod详细信息
root@k8s-master01:~/yaml/chapter05# kubectl describe pods volumes-hostpath-demo
Name: volumes-hostpath-demo
Namespace: default
Priority: 0
Node: k8s-node03/172.16.11.83
Start Time: Tue, 13 Jul 2021 06:28:13 +0000
Labels: <none>
Annotations: <none>
Status: Running
IP: 10.244.3.79
IPs:
IP: 10.244.3.79
Containers:
filebeat:
Container ID: docker://46a9666f684bb0ac85c71d36c276546ab48b8195efb34ac061bdb26f09468a1a
Image: ikubernetes/filebeat:5.6.7-alpine
Image ID: docker-pullable://ikubernetes/filebeat@sha256:3957f67b612aa8628f643f8ede02b71bfbabf34892ef136f1e5ee18bbc0775aa
Port: <none>
Host Port: <none>
State: Running
Started: Tue, 13 Jul 2021 06:28:24 +0000
Ready: True
Restart Count: 0
Environment:
REDIS_HOST: redis.ilinux.io:6379
LOG_LEVEL: info
Mounts: # 容器的挂载卷信息
/var/lib/docker/contianers from varlibdockercontainers (ro)
/var/log from varlog (rw)
/var/run/docker.sock from socket (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-wqhfx (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
varlog:
Type: HostPath (bare host directory volume)
Path: /var/log
HostPathType:
varlibdockercontainers:
Type: HostPath (bare host directory volume)
Path: /var/lib/docker/containers
HostPathType:
socket:
Type: HostPath (bare host directory volume)
Path: /var/run/docker.sock
HostPathType:
kube-api-access-wqhfx:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 55s default-scheduler Successfully assigned default/volumes-hostpath-demo to k8s-node03
Normal Pulling 53s kubelet Pulling image "ikubernetes/filebeat:5.6.7-alpine"
Normal Pulled 45s kubelet Successfully pulled image "ikubernetes/filebeat:5.6.7-alpine" in 8.108131139s
Normal Created 44s kubelet Created container filebeat
Normal Started 44s kubelet Started container filebeat # 容器已经启动

NFS类型存储卷

NFS为网络类型的存储卷,其要确保每个k8s的节点上必须能够挂载nfs文件系统。

NFS示例

1.配置一台nfs-server

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
# 查看nfs服务器地址
[root@nfs ~]# ip a show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 52:54:00:f5:86:46 brd ff:ff:ff:ff:ff:ff
inet 172.16.11.79/24 brd 172.16.11.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet6 fe80::128c:e1f8:720f:875f/64 scope link noprefixroute
valid_lft forever preferred_lft forever

# 创建共享目录
[root@nfs ~]# mkdir -pv /data/redis
mkdir: created directory '/data'
mkdir: created directory '/data/redis'

# 让999用户对/data/redis目录有读写权限
[root@nfs ~]# chown 999 /data/redis
[root@nfs ~]# ls -ld /data/redis
drwxr-xr-x 2 systemd-coredump root 4096 Jul 13 14:47 /data/redis
[root@nfs ~]# id systemd-coredump
uid=999(systemd-coredump) gid=997(systemd-coredump) groups=997(systemd-coredump)

# 配置nfs
[root@nfs ~]# vim /etc/exports
/data/redis 172.16.11.0/24(rw)

# 安装nfs-utils
[root@nfs ~]# dnf install nfs-utils -y

# 启动nfs
[root@nfs ~]# systemctl start nfs-server.service

# 查看2049端口是否被监听
[root@nfs ~]# ss -tnl | grep 2049
LISTEN 0 64 0.0.0.0:2049 0.0.0.0:*
LISTEN 0 64 [::]:2049 [::]:*

2.在所有k8s节点上安装nfs驱动

1
2
3
4
5
6
7
8
root@k8s-node01:~# apt install nfs-common

# 测试手动挂载
root@k8s-node02:~# mount -t nfs 172.16.11.79:/data/redis /mnt

# 查看是否被挂载
root@k8s-node02:~# mount | grep mnt
172.16.11.79:/data/redis on /mnt type nfs4 (rw,relatime,vers=4.2,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=172.16.11.82,local_lock=none,addr=172.16.11.79)

3.编辑配置清单

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
root@k8s-master01:~/yaml/chapter05# vim volumes-nfs-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: volumes-nfs-demo
labels:
app: redis
spec:
containers:
- name: redis
image: redis:alpine
ports:
- containerPort: 6379
name: redisport
securityContext:
runAsUser: 999 # 此处使用的用户为id是999的用户,要确保nfs服务器上id 999的用户对挂载的目录有写权限。
volumeMounts:
- mountPath: /data
name: redisdata
volumes:
- name: redisdata
nfs:
server: 172.16.11.79
path: /data/redis
readOnly: false

4.应用配置清单

1
2
root@k8s-master01:~/yaml/chapter05# kubectl apply -f volumes-nfs-demo.yaml 
pod/volumes-nfs-demo created

5.查看pod详细信息

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
root@k8s-master01:~/yaml/chapter05# kubectl describe pod volumes-nfs-demo 
Name: volumes-nfs-demo
Namespace: default
Priority: 0
Node: k8s-node01/172.16.11.81
Start Time: Tue, 13 Jul 2021 07:22:24 +0000
Labels: app=redis
Annotations: <none>
Status: Running
IP: 10.244.1.50
IPs:
IP: 10.244.1.50
Containers:
redis:
Container ID: docker://d06c8e430cb3b38c9e266ee50b1caeaf834f4b51b6270540735eff780bc2f968
Image: redis:alpine
Image ID: docker-pullable://redis@sha256:442fbfdeccf203c277827cfd8e7e727ce411611e1a6caeda9cca8115ed17b9cc
Port: 6379/TCP
Host Port: 0/TCP
State: Running
Started: Tue, 13 Jul 2021 07:22:40 +0000
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/data from redisdata (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-wppkn (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
redisdata:
Type: NFS (an NFS mount that lasts the lifetime of a pod)
Server: 172.16.11.79
Path: /data/redis
ReadOnly: false
kube-api-access-wppkn:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 87s default-scheduler Successfully assigned default/volumes-nfs-demo to k8s-node01
Normal Pulling 86s kubelet Pulling image "redis:alpine"
Normal Pulled 74s kubelet Successfully pulled image "redis:alpine" in 11.045849701s
Normal Created 73s kubelet Created container redis
Normal Started 72s kubelet Started container redis