Set up the configuration ceph - three-node

CPU name IP  Disk Character
ceph01 10.10.20.55    
ceph02 10.10.20.66    
chph03 10.10.20.77    

 

systemctl stop ceph-mon@ceph01
systemctl stop ceph-mon@ceph02
systemctl stop ceph-mon@ceph03

 

[root@ceph02 ~]# parted /dev/sdb mklabel gpt
Information: You may need to update /etc/fstab.

[root@ceph02 ~]# parted /dev/sdb mkpart primary 1M 50%
Information: You may need to update /etc/fstab.

[root@ceph02 ~]# parted /dev/sdb mkpart primary 50% 100%
Information: You may need to update /etc/fstab.

[root@ceph02 ~]# chown ceph.ceph /dev/sdb1
[root@ceph02 ~]# chown ceph.ceph /dev/sdb2

 

Empty disk initialization data (operation can only ceph01)

[root@ceph01 ceph-cluster]# ceph-deploy disk zap ceph01 /dev/sd{c,d}

[root@ceph01 ceph-cluster]# ceph-deploy disk zap ceph01 /dev/sd{c,d}

[root@ceph01 ceph-cluster]# ceph-deploy disk zap ceph01 /dev/sd{c,d}

[root@ceph01 ceph-cluster]# ceph-deploy disk zap ceph03 /dev/sd{c,d}
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy disk zap ceph03 /dev/sdc /dev/sdd
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  debug                         : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : zap
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f1635a2fbd8>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  host                          : ceph03
[ceph_deploy.cli][INFO  ]  func                          : <function disk at 0x7f1635a7a578>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  disk                          : ['/dev/sdc', '/dev/sdd']
[ceph_deploy.osd][DEBUG ] zapping /dev/sdc on ceph03
[ceph03][DEBUG ] connected to host: ceph03 
[ceph03][DEBUG ] detect platform information from remote host
[ceph03][DEBUG ] detect machine type
[ceph03][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.7.1908 Core
[ceph03][DEBUG ] zeroing last few blocks of device
[ceph03][DEBUG ] find the location of an executable
[ceph03][INFO  ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sdc
[ceph03][WARNIN] --> Zapping: /dev/sdc
[ceph03][WARNIN] --> --destroy was not specified, but zapping a whole device will remove the partition table
[ceph03][WARNIN] Running command: /bin/dd if=/dev/zero of=/dev/sdc bs=1M count=10 conv=fsync
[ceph03][WARNIN]  stderr: 10+0 records in
[ceph03][WARNIN] 10+0 records out
[ceph03][WARNIN]  stderr: 10485760 bytes (10 MB) copied, 0.0125001 s, 839 MB/s
[ceph03][WARNIN] --> Zapping successful for: <Raw Device: /dev/sdc>
[ceph_deploy.osd][DEBUG ] zapping /dev/sdd on ceph03
[ceph03][DEBUG ] connected to host: ceph03 
[ceph03][DEBUG ] detect platform information from remote host
[ceph03][DEBUG ] detect machine type
[ceph03][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.7.1908 Core
[ceph03][DEBUG ] zeroing last few blocks of device
[ceph03][DEBUG ] find the location of an executable
[ceph03][INFO  ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sdd
[ceph03][WARNIN] --> Zapping: /dev/sdd
[ceph03][WARNIN] --> --destroy was not specified, but zapping a whole device will remove the partition table
[ceph03][WARNIN] Running command: /bin/dd if=/dev/zero of=/dev/sdd bs=1M count=10 conv=fsync
[ceph03][WARNIN]  stderr: 10+0 records in
[ceph03][WARNIN] 10+0 records out
[ceph03][WARNIN] 10485760 bytes (10 MB) copied
[ceph03][WARNIN]  stderr: , 0.00957528 s, 1.1 GB/s
[ceph03][WARNIN] --> Zapping successful for: <Raw Device: /dev/sdd>
View Code

 

OSD create storage space (only operation to node1)
# Create osd storage devices, vdc provide storage space for the cluster, vdb1 provide JOURNAL cache
# a storage device corresponds to a device cache, the cache needs SSD, you do not need a lot

[root@ceph01 ceph-cluster]# ceph-deploy osd create ceph01 --data /dev/sdc --journal /dev/sdb1
[root@ceph01 ceph-cluster]# ceph-deploy osd create ceph01 --data /dev/sdd --journal /dev/sdb2
[root@ceph01 ceph-cluster]# ceph-deploy osd create ceph02 --data /dev/sdc --journal /dev/sdb1
[root@ceph01 ceph-cluster]# ceph-deploy osd create ceph02 --data /dev/sdd --journal /dev/sdb2
[root@ceph01 ceph-cluster]# ceph-deploy osd create ceph03 --data /dev/sdc --journal /dev/sdb1
[root@ceph01 ceph-cluster]# ceph-deploy osd create ceph03 --data /dev/sdd --journal /dev/sdb2

 Verification test can be observed by the 0 to 6 the osd

[root@ceph01 ceph-cluster]# ceph -s
  cluster:
    id:     fbc66f50-ced8-4ad1-93f7-2453cdbf59ba
    health: HEALTH_WARN
            no active mgr
 
  services:
    mon: 3 daemons, quorum ceph01,ceph02,ceph03 (age 10m)
    mgr: no daemons active
    osd: 6 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs: 

Error, no active mgr
configuration mgr
create mgr named mgr1 in ceph01, three-node can be viewed to
[root @ ceph01 ceph-cluster] # ceph-deploy mgr create ceph01: mgr1

[root@ceph01 ceph-cluster]# ceph-deploy mgr create ceph01:mgr1
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy mgr create ceph01:mgr1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  mgr                           : [('ceph01', 'mgr1')]
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f2b64aedd40>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function mgr at 0x7f2b65357cf8>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.mgr][DEBUG ] Deploying mgr, cluster ceph hosts ceph01:mgr1
[ceph01][DEBUG ] connected to host: ceph01 
[ceph01][DEBUG ] detect platform information from remote host
[ceph01][DEBUG ] detect machine type
[ceph_deploy.mgr][INFO  ] Distro info: CentOS Linux 7.7.1908 Core
[ceph_deploy.mgr][DEBUG ] remote host will use systemd
[ceph_deploy.mgr][DEBUG ] deploying mgr bootstrap to ceph01
[ceph01][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph01][WARNIN] mgr keyring does not exist yet, creating one
[ceph01][DEBUG ] create a keyring file
[ceph01][DEBUG ] create path recursively if it doesn't exist
[ceph01][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-mgr --keyring /var/lib/ceph/bootstrap-mgr/ceph.keyring auth get-or-create mgr.mgr1 mon allow profile mgr osd allow * mds allow * -o /var/lib/ceph/mgr/ceph-mgr1/keyring
[ceph01][INFO  ] Running command: systemctl enable ceph-mgr@mgr1
[ceph01][WARNIN] Created symlink from /etc/systemd/system/ceph-mgr.target.wants/[email protected] to /usr/lib/systemd/system/ceph-mgr@.service.
[ceph01][INFO  ] Running command: systemctl start ceph-mgr@mgr1
[ceph01][INFO  ] Running command: systemctl enable ceph.target
You have new mail in /var/spool/mail/root
View Code

 

4.1 Creating mirror (node1)
See storage pool
[the root @ ceph01 Ceph-Cluster] # Ceph OSD lspools
[the root @ ceph01 Ceph-Cluster] # Ceph OSD the pool Create the pool-ZK 100
the pool 'the pool-ZK' Created
designated pool block device
[the root @ ceph01 Ceph-Cluster] # Ceph OSD pool file application enable pool-ZK RBD
Enabled file application 'RBD' ON pool 'pool-ZK'
rename pool of RBD
[the root @ ceph01 Ceph-Cluster] # Ceph OSD pool the rename pool RBD -ZK
the pool 'the pool-ZK' Renamed to 'RBD'
[@ ceph01 the root Ceph-Cluster] # OSD lspools Ceph
RBD. 1

 

Guess you like

Origin www.cnblogs.com/shuihuaboke/p/12582960.html