在Kubernetes v1.27.1中部署Ceph CSI v3.11对接Ceph Octopus集群
环境说明
Kubernetes版本:v1.27.1 (6节点集群)
Ceph版本:Octopus 15.2.17
Ceph CSI版本:v3.11.0
一、Ceph集群准备工作
1. 创建RBD存储池和K8s访问用户
# 创建存储池
ceph osd pool create kubernetes 128 128
# 创建K8s专用用户并授权
ceph auth get-or-create client.kubernetes \
mon 'profile rbd' \
osd 'profile rbd pool=kubernetes' \
mgr 'profile rbd pool=kubernetes'
# 获取用户密钥(重要!)
ceph auth get-key client.kubernetes
# 获取ceph集群id、mon ip
[root@ceph01 /root]# ceph mon dump
dumped monmap epoch 3
epoch 3
fsid 232143b06-3ae7-llf1-3a86-bc99c824231a
min_mon_release 15 (octopus)
0: [v2:192.168.66.103:3300/0,v1:192.168.66.103:6789/0] mon.ceph01
1: [v2:192.168.66.102:3300/0,v1:192.168.66.102:6789/0] mon.ceph02
2: [v2:192.168.66.104:3300/0,v1:192.168.66.104:6789/0] mon.ceph03
2. 验证存储池和用户
ceph osd lspools | grep kubernetes
ceph auth list | grep client.kubernetes二、获取Ceph CSI部署文件
# 下载指定版本部署模板
wget https://github.com/ceph/ceph-csi/archive/refs/tags/v3.11.0.tar.gz
tar -zxvf v3.11.0.tar.gz
cd ceph-csi-3.11.0/deploy/rbd/kubernetes
tree ./
.
├── csi-config-map.yaml
├── csidriver.yaml
├── csi-nodeplugin-rbac.yaml
├── csi-provisioner-rbac.yaml
├── csi-rbdplugin-provisioner.yaml
└── csi-rbdplugin.yaml
# https://github.com/ceph/ceph-csi/tree/v3.11.0 #github地址三、创建Ceph CSI命名空间
# 01-csi-ns.yaml
cat <<EOF > 01-csi-ns.yaml
apiVersion: v1
kind: Namespace
metadata:
name: ceph-csi
EOF
kubectl apply -f 01-csi-ns.yaml四、部署RBAC权限配置
# 修改RBAC文件中的命名空间
sed -i 's/namespace: default/namespace: ceph-csi/g' *.yaml
# 部署Provisioner RBAC
mv csi-provisioner-rbac.yaml 02-csi-provisioner-rbac.yaml
kubectl apply -f 02-csi-provisioner-rbac.yaml
# 部署NodePlugin RBAC
mv csi-nodeplugin-rbac.yaml 03-csi-nodeplugin-rbac.yaml
kubectl apply -f 03-csi-nodeplugin-rbac.yaml五、创建Ceph配置文件ConfigMap
mv csi-config-map.yaml 04-csi-config-map.yaml
cat <<EOF > 04-csi-config-map.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "ceph-csi-config"
namespace: ceph-csi
data:
config.json: |-
[
{
"clusterID": "232143b06-3ae7-llf1-3a86-bc99c824231a",
"monitors": [
"192.168.66.103:6789",
"192.168.66.102:6789",
"192.168.66.104:6789"
]
}
]
EOF
cat <<EOF > 05-ceph-conf.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-config
namespace: ceph-csi
data:
ceph.conf: |
[global]
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
keyring: | # 必须保留此字段(即使为空)
EOFkubectl apply -f 04-csi-config-map.yaml 05-ceph-conf.yaml 关键说明:
keyring: |字段必须存在,这是Ceph CSI的要求。实际认证信息将通过Secret传递。
六、创建Ceph认证Secret
cat <<EOF > 06-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
namespace: ceph-csi
data:
# echo -n "kubernetes" | base64
userID: a3ViZXJuZXRlcw== # kubernetes
# echo -n $(ceph auth get-key client.kubernetes) | base64
userKey: QVFCb1dtTzJacjZRR2hBQUFYa3YrVUY4Nmg4d0xJZ2c0RlE9PQ==
EOFkubectl apply -f 06-secret.yaml安全建议: 密钥应通过安全流程生成和存储,避免硬编码在YAML中。
七、部署CSIDriver对象
mv csidriver.yaml 07-csidriver.yaml
cat << EOF > 07-csidriver.yaml
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: "rbd.csi.ceph.com"
spec:
attachRequired: true
podInfoOnMount: false
seLinuxMount: true
fsGroupPolicy: File
EOFkubectl apply -f 07-csidriver.yaml八、部署Provisioner和NodePlugin
关键修改点:
在
08-csi-rbdplugin-provisioner.yaml中:mv csi-rbdplugin-provisioner.yaml 08-csi-rbdplugin-provisioner.yamlenv: # 注释KMS相关配置 #- name: ceph-csi-encryption-kms-config # mountPath: /etc/ceph-csi-encryption-kms-config/ #- name: ceph-csi-encryption-kms-config # configMap: # name: ceph-csi-encryption-kms-config在
09-csi-rbdplugin.yaml中:mv csi-rbdplugin.yaml 09-csi-rbdplugin.yamlenv: # 注释KMS相关配置 #- name: ceph-csi-encryption-kms-config # mountPath: /etc/ceph-csi-encryption-kms-config/ #- name: ceph-csi-encryption-kms-config # configMap: # name: ceph-csi-encryption-kms-config
部署命令:
kubectl apply -f 08-csi-rbdplugin-provisioner.yaml
kubectl apply -f 09-csi-rbdplugin.yaml验证Pod状态:
kubectl -n ceph-csi get pods -l app=csi-rbdplugin-provisioner
kubectl -n ceph-csi get pods -l app=csi-rbdplugin$ kubectl -n ceph-csi get pods -l app=csi-rbdplugin-provisioner
NAME READY STATUS RESTARTS AGE
csi-rbdplugin-provisioner-676cc49476-lk6lt 7/7 Running 0 106m
csi-rbdplugin-provisioner-676cc49476-rkpf7 7/7 Running 0 106m
csi-rbdplugin-provisioner-676cc49476-tldj9 7/7 Running 0 106m
$ kubectl -n ceph-csi get pods -l app=csi-rbdplugin
NAME READY STATUS RESTARTS AGE
csi-rbdplugin-jktcj 3/3 Running 0 106m
csi-rbdplugin-knwrk 3/3 Running 0 106m
csi-rbdplugin-mvcj9 3/3 Running 0 106m
csi-rbdplugin-wlwj8 3/3 Running 0 106m九、创建StorageClass
# 10-ceph-csi.yaml
cat << EOF > 10-ceph-csi.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ceph-csi
provisioner: rbd.csi.ceph.com
parameters:
clusterID: 232143b06-3ae7-llf1-3a86-bc99c824231a # ceph fsid
pool: kubernetes
imageFeatures: layering,exclusive-lock
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: ceph-csi
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: ceph-csi
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: Immediate
mountOptions:
- discard
EOFkubectl apply -f 10-ceph-csi.yaml十、验证部署
创建测试PVC
cat <<EOF > 11-test.pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ceph-rbd-test-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: ceph-rbd
resources:
requests:
storage: 100Mi
EOF
kubectl apply -f 11-test.pvc.yaml检查资源状态
$ kubectl get pvc ceph-rbd-test-pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
ceph-rbd-test-pvc Bound pvc-e2260750-e191-4639-a687-218fc1602492 100Mi RWO ceph-csi 31s
$ kubectl get pv pvc-e2260750-e191-4639-a687-218fc1602492
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-e2260750-e191-4639-a687-218fc1602492 100Mi RWO Delete Bound default/ceph-rbd-test-pvc ceph-csi 38s
# ceph节点操作
$ rbd -p kubernetes ls # 在Ceph集群中确认镜像创建版本兼容性参考表
部署经验:
必须严格匹配Ceph CSI与Ceph的版本兼容性
生产环境建议使用独占的Ceph用户和存储池
定期检查官方兼容性矩阵
评论区