1. 创建空间池
[root@ceph-adm ~]#ceph osd pool create testspace 256 256
[root@ceph-adm ~]#ceph osd lspools
0 rbd,1 testspace,
2. 创建镜像文件storage_500k 专门用于放500K大小的文件
[root@ceph-adm ~]#rbd create storage_500k --size 150000 -p testspace
[root@ceph-adm ~]#rbd --image storage_500k -p testspace info
rbd image 'storage_500k':
size 146 GB in 37500 objects
order 22 (4096 kB objects)
block_name_prefix: rb.0.10af.238e1f29
format: 1
3. 将镜像映射到池中
[root@ceph-adm ~]#rbd map storage_500k -p testspace
/dev/rbd0
[root@ceph-adm ~]#df –h
文件系统 容量 已用 可用 已用% 挂载点
/dev/mapper/centos-root 18G 2.4G 16G 14% /
devtmpfs 905M 0 905M 0% /dev
tmpfs 914M 0 914M 0% /dev/shm
tmpfs 914M 8.6M 905M 1% /run
tmpfs 914M 0 914M 0% /sys/fs/cgroup
/dev/sda1 497M 162M 336M 33% /boot
[root@ceph-adm ~]# rbd showmapped
id pool image snap device
0 testspace storage_500k - /dev/rbd0
4. 格式化镜像
[root@ceph-adm ~]# mkfs.ext4 -m0 /dev/rbd0
mke2fs 1.42.9 (28-Dec-2013)
Discarding device blocks: 完成
文件系统标签=
OS type: Linux
块大小=4096 (log=2)
分块大小=4096 (log=2)
Stride=1024 blocks, Stripe width=1024 blocks
9601024 inodes, 38400000 blocks
0 blocks (0.00%) reserved for the super user
第一个数据块=0
Maximum filesystem blocks=2187329536
1172 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
4096000, 7962624, 11239424, 20480000, 23887872
Allocating group tables: 完成
正在写入inode表: 完成
Creating journal (32768 blocks): 完成
Writing superblocks and filesystem accounting information:
完成
5. 创建的虚拟块设备挂载到client端
[root@ceph-adm ~]# mount /dev/rbd0 /mnt
[root@ceph-adm ~]# df -h
文件系统 容量 已用 可用 已用% 挂载点
/dev/mapper/centos-root 18G 2.4G 16G 14% /
devtmpfs 905M 0 905M 0% /dev
tmpfs 914M 0 914M 0% /dev/shm
tmpfs 914M 8.6M 905M 1% /run
tmpfs 914M 0 914M 0% /sys/fs/cgroup
/dev/sda1 497M 162M 336M 33% /boot
/dev/rbd0 145G 61M 144G 1% /mnt
6.测试速度,创建一个100G的块文件
[root@ceph-adm mnt]# dd if=/dev/zero of=/mnt/100G bs=1000 count=1000000000