ceph_fs_rbd

mimic

ceph_fs_rbd

[root@ceph1 ceph]# ceph osd pool create cfs_data 10
pool 'cfs_data' created
[root@ceph1 ceph]# ceph osd pool create cfs_meta 10
pool 'cfs_meta' created

[root@ceph1 ceph]# ceph fs new cefs cfs_meta cfs_data
new fs with metadata pool 7 and data pool 6

[root@ceph1 ceph]# ceph fs ls
name: cefs, metadata pool: cfs_meta, data pools: [cfs_data ]


[root@ceph1 ceph]# ceph fs status
cefs - 0 clients
====
+------+--------+-------+---------------+-------+-------+
| Rank | State  |  MDS  |    Activity   |  dns  |  inos |
+------+--------+-------+---------------+-------+-------+
|  0   | active | ceph1 | Reqs:    0 /s |   10  |   13  |
+------+--------+-------+---------------+-------+-------+
+----------+----------+-------+-------+
|   Pool   |   type   |  used | avail |
+----------+----------+-------+-------+
| cfs_meta | metadata | 2286  | 55.0G |
| cfs_data |   data   |    0  | 55.0G |
+----------+----------+-------+-------+
+-------------+
| Standby MDS |
+-------------+
|    ceph2    |
+-------------+
MDS version: ceph version 13.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) mimic (stable)
[root@ceph1 ceph]#

sudo mount -t ceph 192.168.7.151:6789:/ /mnt -o name=admin,secret=AQBaPZNcCalvLRAAt4iyva3DHfb8NbOX4MxBAw==


rbd

ceph osd pool create rbdpool 64
rbd pool init rbdpool
rbd create --size 1024 rbdpool/rbdimage
rbd ls rbdpool
rbd info rbdpool/rbdimage

Unless specified, the rbd command will access the Ceph cluster using the ID admin.
ceph auth get-or-create client.qemu mon 'profile rbd' osd 'profile rbd pool=vms, profile rbd-read-only pool=images'

Ceph Block Device images are thin provisioned

[root@ceph1 ceph]# rbd info rbdpool/rbdimage
rbd image 'rbdimage':
        size 1 GiB in 256 objects
        order 22 (4 MiB objects)
        id: 12926b8b4567
        block_name_prefix: rbd_data.12926b8b4567
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
        op_features:
        flags:
        create_timestamp: Wed Mar 27 18:06:28 2019


[root@ceph1 ceph]# rbd feature disable rbdpool/rbdimage object-map fast-diff deep-flatten
[root@ceph1 ceph]# rbd device map rbdpool/rbdimage  --id admin
/dev/rbd0
[root@ceph1 ~]# lsblk
NAME                                                                                                  MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0                                                                                                    11:0    1  918M  0 rom
vda                                                                                                   253:0    0   50G  0 disk
├─vda1                                                                                                253:1    0  512M  0 part /boot
├─vda2                                                                                                253:2    0    8G  0 part [SWAP]
└─vda3                                                                                                253:3    0 41.5G  0 part /
vdb                                                                                                   253:16   0   60G  0 disk
└─ceph--c087d78f--9bb1--49a5--97ad--437995ee0ae7-osd--block--da3283a7--adfe--43ad--8ebc--0853ee8900bb 252:0    0   60G  0 lvm
rbd0                                                                                                  251:0    0    1G  0 disk


[root@ceph1 ~]# rbd device list
id pool    image    snap device
0  rbdpool rbdimage -    /dev/rbd0

[root@ceph1 ~]# ceph report
report 2274692862
{
"cluster_fingerprint": "a7164692-5fd9-46dc-be11-b65b5a9a9103",
"version": "13.2.5",
"commit": "cbff874f9007f1869bfd3821b7e33b2a6ffd4988",

rados

rados -h
radosgw-admin -h

[root@ceph1 ~]# rados lspools
mypool
[root@ceph1 ~]# rados df
POOL_NAME USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED RD_OPS  RD WR_OPS  WR
mypool     0 B       0      0      0                  0       0        0      0 0 B      0 0 B

total_objects    0
total_used       3.0 GiB
total_avail      177 GiB
total_space      180 GiB

[root@ali-3 ~]# rados df
POOL_NAME                              USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED  RD_OPS     RD WR_OPS    WR
pool-d83c6154956b44aea7639c7bd4c45c65 2001M   17460      0  52380                  0       0     1023 5775755 11819M 318825 6488M

total_objects    17460
total_used       44969M
total_avail      53608G
total_space      53652G


[root@ali-3 ~]# rados -p pool-d83c6154956b44aea7639c7bd4c45c65  ls
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014



[root@ceph1 ~]# ceph osd lspools
1 mypool
2 .rgw.root
3 default.rgw.control
4 default.rgw.meta
5 default.rgw.log
6 cfs_data
7 cfs_meta
8 rbdpool

列出池中对象
[root@ceph1 ~]# rados -p cfs_data ls
10000000005.00000000
10000000006.00000000
[root@ceph1 ~]# rados -p cfs_meta ls
601.00000000
602.00000000
600.00000000
603.00000000
1.00000000.inode
200.00000000
200.00000001
606.00000000
607.00000000
mds0_openfiles.0
608.00000000
500.00000001
604.00000000
500.00000000
mds_snaptable
605.00000000
mds0_inotable
100.00000000
mds0_sessionmap
609.00000000
400.00000000
100.00000000.inode
1.00000000
[root@ceph1 ~]# rados -p rbdpool ls
rbd_directory
rbd_id.rbdimage
rbd_info
rbd_header.12926b8b4567



猜你喜欢

转载自www.cnblogs.com/createyuan/p/10815413.html