01 配置权限
在 configmap 中查找, aws-auth, 找到 worker node 被附加的 IAM Role
Terminal
kubectl -n kube-system describe configmap aws-authOutput
Name: aws-auth
Namespace: kube-system
Labels: <none>
Annotations: <none>
Data
====
mapRoles:
----
- groups:
- system:bootstrappers
- system:nodes
rolearn: arn:aws:iam::xxxxxxx:role/eksctl-eksdemo1-nodegroup-eksdemo-NodeInstanceRole-12PYV622VGSUQ
username: system:node:{{EC2PrivateDNSName}}
BinaryData
====
Events: <none>在 IAM 控制台中, 找到此 Role 添加一个名为 AMazonEBSCSIDriverPolicy 的策略.
02 部署 Amazon EBS CSI Driver
安装 CSI Driver
Terminal
kubectl apply -k "github.com/kubernetes-sigs/aws-ebs-csi-driver/deploy/kubernetes/overlays/stable/?ref=master"Output
serviceaccount/ebs-csi-controller-sa created
serviceaccount/ebs-csi-node-sa created
role.rbac.authorization.k8s.io/ebs-csi-leases-role created
clusterrole.rbac.authorization.k8s.io/ebs-csi-node-role created
clusterrole.rbac.authorization.k8s.io/ebs-external-attacher-role created
clusterrole.rbac.authorization.k8s.io/ebs-external-provisioner-role created
clusterrole.rbac.authorization.k8s.io/ebs-external-resizer-role created
clusterrole.rbac.authorization.k8s.io/ebs-external-snapshotter-role created
rolebinding.rbac.authorization.k8s.io/ebs-csi-leases-rolebinding created
clusterrolebinding.rbac.authorization.k8s.io/ebs-csi-attacher-binding created
clusterrolebinding.rbac.authorization.k8s.io/ebs-csi-node-getter-binding created
clusterrolebinding.rbac.authorization.k8s.io/ebs-csi-provisioner-binding created
clusterrolebinding.rbac.authorization.k8s.io/ebs-csi-resizer-binding created
clusterrolebinding.rbac.authorization.k8s.io/ebs-csi-snapshotter-binding created
deployment.apps/ebs-csi-controller created
poddisruptionbudget.policy/ebs-csi-controller created
daemonset.apps/ebs-csi-node created
csidriver.storage.k8s.io/ebs.csi.aws.com created验证是否是否安装成功
Terminal
kubectl get pods -n kube-systemOutput
NAME READY STATUS RESTARTS AGE
aws-node-dkxwz 1/1 Running 0 25h
aws-node-qqdnm 1/1 Running 0 25h
coredns-7975d6fb9b-4pxrl 1/1 Running 0 26h
coredns-7975d6fb9b-4w2qf 1/1 Running 0 26h
ebs-csi-controller-6cb877dd5b-9qr57 6/6 Running 0 78s
ebs-csi-controller-6cb877dd5b-n4jf8 6/6 Running 0 78s
ebs-csi-node-952qw 3/3 Running 0 76s
ebs-csi-node-kzwxc 3/3 Running 0 76s
kube-proxy-7wpsl 1/1 Running 0 25h
kube-proxy-m58q2 1/1 Running 0 25h03 创建 EBS Storage Class 对象
创建资源
01-storageClass.yml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ebs-sc
provisioner: ebs.csi.aws.com
volumeBindingMode: WaitForFirstConsumTerminal
kubectl apply -f 01-storageClass.yml验证资源创建完成
Terminal
kubectl describe sc/ebs-scOutput
Name: ebs-sc
IsDefaultClass: No
Annotations: kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"storage.k8s.io/v1","kind":"StorageClass","metadata":{"annotations":{},"name":"ebs-sc"},"provisioner":"ebs.csi.aws.com","volumeBindingMode":"WaitForFirstConsumer"}
Provisioner: ebs.csi.aws.com
Parameters: <none>
AllowVolumeExpansion: <unset>
MountOptions: <none>
ReclaimPolicy: Delete
VolumeBindingMode: WaitForFirstConsumer
Events: <none>04 使用 EBS Storage Class 创建一个 PVC 对象
创建 PVC
02-pvc.yml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ebs-mysql-pv-claim
spec:
accessModes:
- ReadWriteOnce
storageClassName: ebs-sc
resources:
requests:
storage: 1GiTerminal
kubectl apply -f 02-pvc.yml验证 pvc 处于 pending 状态, 等待 consuming
Terminal
kubectl get pvc/ebs-pvc -owideOutput
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
ebs-pvc Pending ebs-sc 8m57s Filesystem
03-storage # kubectl describe pvc/ebs-pvc
Name: ebs-pvc
Namespace: default
StorageClass: ebs-sc
Status: Pending
Volume:
Labels: <none>
Annotations: <none>
Finalizers: [kubernetes.io/pvc-protection]
Capacity:
Access Modes:
VolumeMode: Filesystem
Used By: <none>
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal WaitForFirstConsumer 3m17s (x26 over 9m20s) persistentvolume-controller waiting for first consumer to be created before binding05 创建 Pod 挂载 PVC
挂载 PVC
03-simple-pv.yml
apiVersion: v1
kind: Pod
metadata:
name: simple-pv
spec:
containers:
- name: demo
image: nginx
volumeMounts:
- name: pv-demo
mountPath: /data
volumes:
- name: pv-demo
persistentVolumeClaim:
claimName: ebs-pvcTerminal
kubectl apply -f 03-simple-pv.yml验证Volume挂载成功
Terminal
kubectl describe pod/simple-pvcOutput
Name: simple-pvc
Namespace: default
Priority: 0
Service Account: default
Node: ip-192-168-33-3.ec2.internal/192.168.33.3
Start Time: Tue, 08 Aug 2023 23:16:17 +0800
Labels: <none>
Annotations: <none>
Status: Running
IP: 192.168.50.211
IPs:
IP: 192.168.50.211
Containers:
demo:
Container ID: containerd://2fba97133bf92f80a87795dd73a54ecaec5375a97acc2cc3f6f60897206b920b
Image: nginx
Image ID: docker.io/library/nginx@sha256:67f9a4f10d147a6e04629340e6493c9703300ca23a2f7f3aa56fe615d75d31ca
Port: <none>
Host Port: <none>
State: Running
Started: Tue, 08 Aug 2023 23:16:25 +0800
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/data from pvc-demo (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-r7lz6 (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
pvc-demo:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: ebs-pvc
ReadOnly: false
kube-api-access-r7lz6:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 13s default-scheduler Successfully assigned default/simple-pvc to ip-192-168-33-3.ec2.internal
Normal SuccessfulAttachVolume 11s attachdetach-controller AttachVolume.Attach succeeded for volume "pvc-2e0565ec-c262-4892-8332-3470de8cc954"
Normal Pulling 5s kubelet Pulling image "nginx"
Normal Pulled 5s kubelet Successfully pulled image "nginx" in 163.279957ms (163.298555ms including waiting)
Normal Created 5s kubelet Created container demo
Normal Started 5s kubelet Started container demo验证 EBS Volume
验证确实有一个 1g ebs volume 被创建且正在被挂载
Terminal
ws ec2 describe-volumes --filters Name=size,Values=1 --query "Volumes[*].[VolumeId,State,Size]" --output textOutput
vol-0c053a4248a03a380 in-use 106 验证 EBS Volume 持久性
创建文件
进入pod, 在Volume 的挂载目录下, 创建一个 text.txt 文件
Terminal
kubectl exec -it pod/simple-pvc -- /bin/bashOutput
root@simple-pvc:/# ls
bin boot data dev docker-entrypoint.d docker-entrypoint.sh etc home lib lib32 lib64 libx32 media mnt opt proc root run sbin srv sys tmp usr var
root@simple-pvc:/# cd data/
root@simple-pvc:/data# ls
lost+found
root@simple-pvc:/data# touch test.txt
root@simple-pvc:/data#删除 Pod
Terminal
kubectl delete -f 03-simple-pvc.ymlOutput
pod "simple-pvc" deleted确认 Volume 状态
验证EBS volume 此时处于 available 状态, 即没有被挂载状态.
Terminal
aws ec2 describe-volumes --filters Name=size,Values=1 --query "Volumes[*].[VolumeId,State,Size]" --output textOutput
vol-0c053a4248a03a380 available 1创建新 pod
04-other-pod-pvc.yml
apiVersion: v1
kind: Pod
metadata:
name: other-pod-pvc
spec:
containers:
- name: demo
image: nginx
volumeMounts:
- name: pvc-demo
mountPath: /data2
volumes:
- name: pvc-demo
persistentVolumeClaim:
claimName: ebs-pvcTerminal
kubectl apply -f 04-other-pod-pvc.yml验证新 pod 中, 之前数据仍然存在.
Terminal
kubectl apply -f 04-other-pod-pvc.ymlOutput
pod/other-pod-pvc created
03-storage # aws ec2 describe-volumes --filters Name=size,Values=1 --query "Volumes[*].[VolumeId,State,Size]" --output text
vol-0c053a4248a03a380 in-use 1
03-storage # kubectl exec -it pod/other-pod-pvc -- /bin/bash
root@other-pod-pvc:/# cd /data2/
root@other-pod-pvc:/data2# ls
lost+found test.txt
root@other-pod-pvc:/data2#07 清除资源
清除资源相关资源, 并验证.
Terminal
kubectl delete -f 04-other-pod-pvc.ymlOutput
pod "other-pod-pvc" deletedTerminal
kubectl delete -f 02-pvc.ymlOutput
persistentvolumeclaim "ebs-pvc" deletedTerminal
aws ec2 describe-volumes --filters Name=size,Values=1 --query "Volumes[*].[VolumeId,State,Size]" --output textOutput