1.部署nfs服务

其它主机安装
yum -y install nfs-utils
nfs主机安装
yum -y install rpcbind

安装nfs客户端:
sudo apt-get install nfs-common
安装服务器端:
sudo apt-get install nfs-kernel-server

新建共享目录
mkdir -p /mnt/nfs/shared_dir
echo "/mnt/nfs/shared_dir *(rw,no_root_squash)
192.168.1.101(rw,sync,no_root_squash)
" >> /etc/exports

chmod -R 777 /mnt/nfs/shared_dir

"" 可以用 IP地址代替,比如192.168.1.

启动NFS服务
systemctl start rpcbind nfs && systemctl enable rpcbind nfs
sudo /etc/init.d/nfs-kernel-server restart

配置hosts表解析,全部主机都需要配置
echo "nfs主机ip nfs.kubernetes.local" >> /etc/hosts

查询共享
showmount -e nfs.kubernetes.local | grep share

2.创建授权账户信息

vim rbac.yaml

----------
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io

leader-locking-nfs-client-provisioner是对nfs-client-provisioner的一个选举,防止多实例同时管理卷情况的出现
kubectl apply -f rbac.yaml

3.部署nfs-client-provisioner的deployment

vim nfs_client_provisioner.yaml

kind: Deployment
apiVersion: apps/v1
metadata:
  name: nfs-client-provisioner
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nfs-client-provisioner
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: 172.20.58.152/kubernetes/nfs-client-provisioner:latest # 修改为本地镜像地址
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs  # provisioner的名字,可以修改
            - name: NFS_SERVER
              value: 172.20.58.83   # nfs服务器的地址
            - name: NFS_PATH
              value: /data/ # nfs服务器共享地址
      volumes:
        - name: nfs-client-root
          nfs:
            server: 172.20.58.83   # nfs服务器的地址
            path: /data/nfs_data   # nfs服务器共享地址

kubectl apply -f nfs_client_provisioner.yaml
kubectl get pod -A | grep nfs

4.创建storageclass

vim nfs-storage-class.yaml

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
  archiveOnDelete: "false" # When set to "false" your PVs will not be archived
                           # by the provisioner upon deletion of the PVC.

kubectl apply -f nfs-storage-class.yaml
kubectl get sc

5.测试

创建pvc

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim
  annotations:
    volume.kubernetes.io/storage-provisioner
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Mi
  storageClassName: managed-nfs-storage

使用

kind: Pod
apiVersion: v1
metadata:
  name: test-pod
spec:
  containers:
  - name: test-pod
    image: 172.20.58.152/middleware/busybox:1.36
    command:
      - "/bin/sh"
    args:
      - "-c"
      - "touch /mnt/SUCCESS && exit 0 || exit 1"
    volumeMounts:
      - name: nfs-pvc
        mountPath: "/mnt"
  restartPolicy: "Never"
  volumes:
    - name: nfs-pvc
      persistentVolumeClaim:
        claimName: test-claim
最后修改:2025 年 06 月 27 日
如果觉得我的文章对你有用,请随意赞赏