本实验使用ceph-csi将CephFS对接到k8s作为StoregeClass存储提供给集群使用,查资料的时候发现ceph-csi官网和其他一些博客相关资料都写的比较零散,于是就把自己的实验经历写下来供大家参考参考——
Ceph侧准备
创建cephfs文件系统
ceph osd pool create ceph_k8s
ceph osd pool create metadata_ceph_k8s
ceph fs new cephfs_k8s metadata_ceph_k8s ceph_k8s #用创建好的资源池组成cephfs
ceph fs subvolume create cephfs_k8s csi #创建子卷
创建用户认证
参考ceph-csi文档,创建一个拥有正确权限用于连接的账户。
USER=csi-cephfs
FS_NAME=cephfs
SUB_VOL=csi
ceph auth get-or-create client.$USER \
mgr "allow rw" \
osd "allow rw tag cephfs metadata=$FS_NAME, allow rw tag cephfs data=$FS_NAME" \
mds "allow r fsname=$FS_NAME path=/volumes, allow rws fsname=$FS_NAME path=/volumes/$SUB_VOL" \
mon "allow r fsname=$FS_NAME"
获取所需信息
#ceph mon dump
epoch 3
fsid ec3f3a04-ea05-11ef-a686-bc2411ba81e9 #FSID --required
last_changed 2025-02-13T12:34:06.030850+0000
created 2025-02-13T12:28:28.092545+0000
min_mon_release 16 (pacific)
election_strategy: 1 #monIP below --required
0: [v2:10.0.0.13:3300/0,v1:10.0.0.13:6789/0] mon.ceph01
1: [v2:10.0.0.15:3300/0,v1:10.0.0.15:6789/0] mon.ceph03
2: [v2:10.0.0.14:3300/0,v1:10.0.0.14:6789/0] mon.ceph02
dumped monmap epoch 3
# ceph fs ls
#需要FSname(cephfs_k8s)以及data pools名字(ceph_k8s),不要混淆!
name: cephfs_k8s, metadata pool: metadata_ceph_k8s, data pools: [ceph_k8s ]
# ceph fs subvolume ls cephfs_k8s #确认子卷已经创建
[
{
"name": "csi"
}
]
#ceph auth get client.k8s 需要key。
[client.k8s]
key = AQA5m7lntJ60BxAAKbT5dbeltplkiU9xZETyvQ==
caps mds = "allow r fsname=cephfs_k8s path=/volumes, allow rws fsname=cephfs_k8s path=/volumes"
caps mgr = "allow rw"
caps mon = "allow r fsname=cephfs_k8s"
caps osd = "allow rw tag cephfs metadata=cephfs_k8s, allow rw tag cephfs data=cephfs_k8s"
exported keyring for client.k8s
Kubernate侧对接
下载ceph-csi仓库
git clone https://github.com/ceph/ceph-csi.git
cd ceph-csi/deploy/cephfs/kubernetes/
创建一个命名空间用于放置ceph-csi组件
kubeadm create ns csi
sed -i 's/namespace: default/namespace: csi/g'
配置ceph-csi
csi-config-map.yaml:
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "ceph-csi-config"
data:
config.json: |-
[
{
"clusterID": "ec3f3a04-ea05-11ef-a686-bc2411ba81e9",
"monitors": [
"10.0.0.13:6789",
"10.0.0.14:6789",
"10.0.0.15:6789"
]
}
]
csi-secret.yaml:
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: "cephfs.csi.ceph.com"
spec:
attachRequired: false
podInfoOnMount: false
fsGroupPolicy: File
seLinuxMount: true
[root@k8s-master kubernetes]# cat csi-secret.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: csi-secret
namespace: csi
stringData:
# Required for statically and dynamically provisioned volumes
# The userID must not include the "client." prefix!
userID: k8s
userKey: AQA5m7lntJ60BxAAKbT5dbeltplkiU9xZETyvQ==
# Encryption passphrase
encryptionPassphrase: test_passphrase
配置StorgeClass
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-cephfs-sc
namespace: default
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: cephfs.csi.ceph.com
parameters:
clusterID: ec3f3a04-ea05-11ef-a686-bc2411ba81e9 #fsID
fsName: cephfs_k8s #fsName
pool: ceph_k8s #poolname
#认证密钥名
csi.storage.k8s.io/provisioner-secret-name: csi-secret
csi.storage.k8s.io/provisioner-secret-namespace: csi
csi.storage.k8s.io/controller-expand-secret-name: csi-secret
csi.storage.k8s.io/controller-expand-secret-namespace: csi
csi.storage.k8s.io/node-stage-secret-name: csi-secret
csi.storage.k8s.io/node-stage-secret-namespace: csi
mounter: kernel
reclaimPolicy: Delete
allowVolumeExpansion: true
一键应用!
ls *.yaml | xargs -I {} kubectl apply -n csi -f {} --validate=false
