ceph-add monitor

 1、修改配置

[root@node2 ~]# vim /etc/ceph/ceph-cluster/ceph.conf 
[global]
fsid = 4c137c64-9e09-410e-aee4-c04b0f46294e
mon_initial_members = node2,node1,master1
mon_host = 172.16.18.22,172.16.18.24,172.16.18.16
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

osd pool default size = 3
public network = 172.16.18.22/24

[mon]
mon allow pool delete = true

2、文件同步

[root@node2 ceph]# cp /etc/ceph/ceph-cluster/ceph.conf /etc/ceph/
[root@node2 ceph]# ceph-deploy --overwrite-conf config push node1 master1

 3、新增monitor

ceph-deploy --overwrite-conf mon create node1 master1

4、检查

[root@node2 ceph-cluster]# ceph -s
    cluster 6fd6ad33-621d-48c6-882d-0f364555a16b
     health HEALTH_ERR
            64 pgs are stuck inactive for more than 300 seconds
            64 pgs stuck inactive
            64 pgs stuck unclean
            no osds
     monmap e3: 3 mons at {master1=172.16.18.16:6789/0,node1=172.16.18.24:6789/0,node2=172.16.18.22:6789/0}
            election epoch 8, quorum 0,1,2 master1,node2,node1
     osdmap e1: 0 osds: 0 up, 0 in
            flags sortbitwise,require_jewel_osds
      pgmap v2: 64 pgs, 1 pools, 0 bytes data, 0 objects
            0 kB used, 0 kB / 0 kB avail
                  64 creating
[root@node2 ceph-cluster]#

猜你喜欢

转载自www.cnblogs.com/yajun2019/p/11637054.html