Ubuntu 18.04.4 安装 ceph v12.2.13 Luminous

# ceph --version
ceph version 12.2.13 (584a20eb0237c657dc0567da126be145106aa47e) luminous (stable)

1,安装ceph-deploy

# wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
OK
# echo deb https://download.ceph.com/debian-luminous/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
deb https://download.ceph.com/debian-luminous/ bionic main
# sudo apt update
# sudo apt install ceph-deploy
# sudo apt install ntpsec

2,ceph-deploy节点配置免密钥登陆到其它节点

# vim /etc/hosts

192.168.1.100   node1
192.168.1.101   node2
192.168.1.102   node3
# ssh-keygen
# ssh-copy-id root@node2
# ssh-copy-id root@node3

3,ceph搭建

# mkdir my-cluster
# cd my-cluster/

3.1,清除ceph配置

# ceph-deploy purge node1 node2 node3
# ceph-deploy purgedata node1 node2 node3
# ceph-deploy forgetkeys
# rm ceph.*

3.2,创建集群

  • 每个节点都需要安装python-minimal
sudo apt install python-minimal -y
# ceph-deploy new node1
  • 当前目录下生成
ceph.conf  ceph-deploy-ceph.log  ceph.mon.keyring
  • 每个节点安装ceph-deploy
# ceph-deploy install node1 node2 node3
  • 下载失败
获取:1 https://download.ceph.com/debian-mimic bionic/main amd64 ceph-osd amd64 13.2.10-1bionic [13.2 MB]
+++++[node1][WARNIN] No data was received after 300 seconds, disconnecting...
  • 设置网易源
# export CEPH_DEPLOY_REPO_URL=http://mirrors.163.com/ceph/debian-luminous/
# export CEPH_DEPLOY_GPG_URL=http://mirrors.163.com/ceph/keys/release.asc
# ceph-deploy install node1 node2 node3
# vim ceph.conf
public network = 192.168.1.0/24

3.3 创建mon

# ceph-deploy mon create-initial
  • 配置文件复制到其它节点
# ceph-deploy admin node1 node2 node3

3.3.1 mon create

# ceph-deploy mon create node1 node2

3.3.2 mon add

  • 添加mon
# ceph-deploy mon add node3
# ceph quorum_status --format json-pretty
  • 创建mgr
# ceph-deploy mgr create node1 
  • 添加mgr
# ceph-deploy mgr create node2 node3
# ceph -s

3.4 创建OSD

  • 报错
# ceph-deploy osd create --data /dev/sda node1

[node1][WARNIN] --> Absolute path not found for executable: lvs
[node1][WARNIN] --> Ensure $PATH environment variable contains common executable locations
[node1][WARNIN] -->  OSError: [Errno 2] No such file or directory
[node1][ERROR ] RuntimeError: command returned non-zero exit status: 1
[ceph_deploy.osd][ERROR ] Failed to execute command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sda
[ceph_deploy][ERROR ] GenericError: Failed to create 1 OSDs
# ceph-deploy disk list node1
  • 格式化磁盘
# mkfs.xfs -f /dev/sda
apt install -y lvm2
  • 创建OSD
# ceph-deploy osd create node1 --data /dev/sda
# ceph-deploy osd create node1 --data /dev/sdb
# ceph-deploy osd create node1 --data /dev/sdc

# ceph-deploy osd create node2 --data /dev/sda

4,创建存储池

  • 报错,不知道怎么解决PG设置为4096
# ceph osd pool create lotuspool 4096
Error ERANGE: For better initial performance on pools expected to store a large number of objects, consider supplying the expected_num_objects parameter when creating the pool.
# ceph osd pool create mypool 512
pool 'mypool' created
  • 查看存储池
# ceph osd lspools
1 mypool,
# ceph osd pool ls detail
pool 1 'mypool' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 512 pgp_num 512 last_change 100 flags hashpspool stripe_width 0
# rados df
POOL_NAME USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED RD_OPS RD WR_OPS WR
mypool      0B       0      0      0                  0       0        0      0 0B      0 0B
  • 存储池副本数设置为2
# ceph osd pool get mypool size
size: 3
# ceph osd pool set mypool size 2
set pool 1 size to 2
# ceph osd pool application enable mypool cephfs
enabled application 'cephfs' on pool 'mypool'

重启所有服务

# systemctl stop  ceph.target
# systemctl start  ceph.target

5,创建cephfs 文件系统

  • 创建mds
# ceph-deploy mds create node1 node2 node3
  • 创建数据池,元数据池
# ceph osd pool create cephfs_data 512
pool 'cephfs_data' created
# ceph osd pool create cephfs_metadata 128
pool 'cephfs_metadata' created
  • 启用存储池
# ceph fs new cephfs cephfs_metadata cephfs_data
new fs with metadata pool 3 and data pool 2
# ceph fs ls
name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
  • 查看mds正常运行
# ceph -s
mds: cephfs-1/1/1 up
# ceph mds stat
  • 设置副本
# ceph osd pool get cephfs_data size
size: 3
# ceph osd pool set cephfs_data size 2
set pool 2 size to 2

5.1 内核挂载cephfs

  • 创建挂载目录
# mkdir /cephfs
  • 挂载
# ceph auth export client.admin
  • monIP:6789
# mount -t ceph 192.168.1.100:6789,192.168.1.101:6789,192.168.1.102:6789:/ /cephfs/ -o name=admin,secret=key值
  • 或者,key值保存到新建文件admin.secret
# cd /etc/ceph/
# vim admin.secret
# mount -t ceph 192.168.1.100:6789,192.168.1.101:6789,192.168.1.102:6789:/ /cephfs/ -o name=admin,secretfile=/etc/ceph/admin.secret
  • 卸载
# umount /cephfs

6,开机自动挂载

  • vim /etc/fstab
192.168.1.100:6789,192.168.1.101:6789,192.168.1.102:6789:/ /cephfs/ -o name=admin,secretfile=/etc/ceph/admin.secret,noatime,_netdev  0  0

参考:

  1. 安装 ceph-deploy
  2. Ceph v12.2 Luminous基于ubuntu16.04集群部署 网易镜像源

猜你喜欢

转载自blog.csdn.net/u010953692/article/details/107367352