内容简介:本文风哥教程参考Linux官方文档、Red Hat Enterprise Linux官方文档、Ansible Automation Platform官方文档、Docker官方文档、Kubernetes官方文档和Podman官方文档等内容,详细介绍了相关技术的配置和使用方法。from PG视频:www.itpux.com
本文档
风哥提示:
介绍CephFS文件系统的配置和使用方法。
Part01-CephFS部署
1.1 创建CephFS
[root@ceph-mon1 ~]# ceph osd pool create cephfs_data 128 128
pool ‘cephfs_data’ created
[root@ceph-mon1 ~]# ceph osd pool create cephfs_metadata 32 32
pool ‘cephfs_metadata’ created
# 启用CephFS应用
[root@ceph-mon1 ~]# ceph fs new cephfs cephfs_metadata cephfs_data
new fs with name ‘cephfs’ created
# 查看CephFS状态
[root@ceph-mon1 ~]# ceph fs ls
name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
[root@ceph-mon1 ~]# ceph fs status cephfs
cephfs – 0 clients
======
RANK STATE MDS ACTIVITY DNS INOS DIRS CAPS
0 created – 0 0 0 0
POOL TYPE USED AVAIL
cephfs_data data 0 B 1.2 TiB
cephfs_metadata metadata 0 B 1.2 TiB
# 创建MDS服务
[root@ceph-admin ceph-cluster]# ceph-deploy mds create ceph-mon1 ceph-mon2
[ceph_deploy.mds][DEBUG ] Deploying mds, cluster ceph, hosts ceph-mon1 ceph-mon2
[ceph-mon1][DEBUG ] write conf to /etc/ceph/ceph.更多学习教程公众号风哥教程itpux_comconf
[ceph-mon1][DEBUG ] create mds path /var/lib/ceph/mds/ceph-ceph-mon1
[ceph-mon1][DEBUG ] write keyring to /var/lib/ceph/mds/ceph-ceph-mon1/keyring
# 查看MDS状态
[root@ceph-mon1 ~]# ceph mds stat
cephfs:1 {0=ceph-mon1=up:active} 1 up:standby
# 查看CephFS状态
[root@ceph-mon1 ~]# ceph fs status cephfs
cephfs – 0 clients
======
RANK STATE MDS ACTIVITY DNS INOS DIRS CAPS
0 active ceph-mon1 Reqs: 0 /s 10 13 2 0
POOL TYPE USED AVAIL
cephfs_data data 0 B 1.2 TiB
cephfs_metadata metadata 2.0 MiB 1.2 TiB
1.2 客户端挂载
[root@ceph-mon1 ~]# ceph fs authorize cephfs client.cephfs / rw
[client.cephfs]
key = ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ab==
# 查看用户密钥
[root@ceph-mon1 ~]# ceph auth get client.cephfs
[client.cephfs]
key = ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ab==
caps mds = “allow rw”
caps mon = “allow r”
caps osd = “allow rw pool=cephfs_data, allow rw pool=cephfs_metadata”
exported keyring for client.cephfs
# 创建挂载点
[root@client ~]# mkdir /mnt/cephfs
# 使用内核客户端挂载
[root@client ~]# mount -t ceph 192.168.1.11:6789:/ /mnt/cephfs -o name=cephfs,secret=ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ab==
# 查看挂载
[root@client ~]# df -h | grep ceph
192.168.1.11:6789:/ 1.2T 0 1.2T 0% /mnt/cephfs
# 测试写入
[root@client ~]# echo “Hello CephFS” > /mnt/cephfs/test.txt
[root@client ~]# cat /mnt/cephfs/test.txt
Hello CephFS
# 创建目录
[root@client ~]# mkdir /mnt/cephfs/data
[root@client ~]# ls -la /mnt/cephfs/
total 1
drwxr-xr-x 1 root root 1 Apr 4 20:15 .
drwxr-xr-x 1 root root 1 Apr 4 20:15 ..
drwxr-xr-x 1 root root 2 Apr 4 20:15 data
-rw-r–r– 1 root root 13 Apr 4 20:15 test.txt
# 配置自动挂载
[root@client ~]# cat > /etc/fstab << 'EOF'
192.168.1.11:6789:/ /mnt/cephfs ceph name=cephfs,secret=ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ab==,noatime,_netdev 0 0
EOF
# 使用FUSE客户端挂载
[root@client ~]# dnf install -y ceph-fuse
[root@client ~]# mkdir /mnt/cephfs-fuse
[root@client ~]# ceph-fuse -n client.cephfs -m 192.168.1.11:6789 /mnt/cephfs-fuse
ceph-fuse[12345]: starting ceph client
ceph-fuse[12345]: starting fuse
# 查看挂载
[root@client ~]# df -h | grep ceph
ceph-fuse 1.2T 0 1.2T 0% /mnt/cephfs-fuse
Part02-CephFS管理
2.1 配额管理
[root@client ~]# setfattr -n ceph.quota.max_bytes -v 10737418240 /mnt/cephfs/data
[root@client ~]# getfattr -n ceph.quota.max_bytes /mnt/cephfs/data
getfattr: Removing leading ‘/’ from absolute path names
# file: mnt/cephfs/data
ceph.quota.max_bytes=”10737418240″
# 设置文件数配额
[root@client ~]# setfattr -n ceph.quota.max_files -v 1000 /mnt/cephfs/data
[root@client ~]# getfattr -n ceph.quota.max_files /mnt/cephfs/data
getfattr: Removing leading ‘/’ from absolute path names
# file: mnt/cephfs/data
ceph.quota.max_files=”1000″
# 查看配额使用情况
[root@client ~]# getfattr -d -m ceph /mnt/cephfs/data
getfattr: Removing leading ‘/’ from absolute path names
# file: mnt/cephfs/data
ceph.学习交流加群风哥微信: itpux-comdir.layout=”stripe_unit=4194304 stripe_count=1 object_size=4194304 pool=cephfs_data”
ceph.dir.layout.pool=”cephfs_data”
ceph.dir.layout.pool_namespace=””
ceph.dir.layout.stripe_count=”1″
ceph.dir.layout.stripe_unit=”4194304″
ceph.quota.max_bytes=”10737418240″
ceph.quota.max_files=”1000″
# 清除配额
[root@client 学习交流加群风哥QQ113257174~]# setfattr -n ceph.quota.max_bytes -v 0 /mnt/cephfs/data
[root@client ~]# setfattr -n ceph.quota.max_files -v 0 /mnt/cephfs/data
2.2 快照管理
[root@ceph-mon1 ~]# ceph fs set cephfs allow_snapshots true
# 创建快照
[root@client ~]# mkdir /mnt/cephfs/.snap/snap1
# 查看快照
[root@client ~]# ls -la /mnt/cephfs/.snap/
total 1
drwxr-xr-x 1 root root 1 Apr 4 20:20 .
drwxr-xr-x 1 root root 1 Apr 4 20:15 ..
drwxr-xr-x 1 root root 2 Apr 4 20:20 snap1
# 从快照恢复文件
[root@client ~]# rm /mnt/cephfs/test.txt
[root@client ~]# cp /mnt/cephfs/.snap/snap1/test.更多视频教程www.fgedu.net.cntxt /mnt/cephfs/
[root@client ~]# cat /mnt/cephfs/test.txt
Hello CephFS
# 删除快照
[root@client ~]# rmdir /mnt/cephfs/.snap/snap1
# 查看CephFS信息
[root@ceph-mon1 ~]# ceph fs get cephfs
Filesystem ‘cephfs’
Name: cephfs
Metadata pool: cephfs_metadata
Data pools: [cephfs_data ]
Standby count: 1
Max MD rank: 1
MD cluster map:
Rank GID State
0 12345 active
MD cluster map history:
Epoch Modification
1 Mon Apr 4 20:10:00 2026
- 部署多个MDS提高可用性
- 使用内核客户端提高性能
- 配置合理的配额限制
- 定期创建快照备份
- 监控CephFS状态
本文由风哥教程整理发布,仅用于学习测试使用,转载注明出处:http://www.fgedu.net.cn/10327.html
