MongoDB分片集群部署

简介

采用三台服务器来搭建,使用三分片,每个分片一主一备一仲裁的安装方法,每个分片的主备分散到各个节点。

一、安装

1、解压

tar zxf mongodb-linux-x86_64-rhel70-3.6.4.tgz
cp -r mongodb-linux-x86_64-rhel70-3.6.4 /data/mongodb

#集群安全验证文件,需要自己生成
openssl rand -base64 745 >keyfile
chmod 600 keyfile

2、配置config server 副本集

#node1
mkdir -p /data/mongodb/conf
mkdir -p /data/mongodb/mongoc/log/
mkdir -p /data/mongodb/mongoc/data
chmod 600 keyfile
vim mongoc.conf
systemLog:
  destination: file
  logAppend: true
  path: /data/mongodb/mongoc/log/mongoc.log

storage:
  dbPath: /data/mongodb/mongoc/data
  directoryPerDB: true

processManagement:
  fork: true
  pidFilePath: /data/mongodb/mongoc/log/mongoc.pid

net:
  port: 20001
  bindIp: 0.0.0.0

replication:
  oplogSizeMB: 500
  replSetName: cfgReplSet

sharding:
  clusterRole: configsvr

security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile


#node2
mkdir -p /data/mongodb/conf
mkdir -p /data/mongodb/mongoc/log/
mkdir -p /data/mongodb/mongoc/data
chmod 600 keyfile
vim mongoc.conf
systemLog:
  destination: file
  logAppend: true
  path: /data/mongodb/mongoc/log/mongoc.log

storage:
  dbPath: /data/mongodb/mongoc/data
  directoryPerDB: true

processManagement:
  fork: true
  pidFilePath: /data/mongodb/mongoc/log/mongoc.pid

net:
  port: 20001
  bindIp: 0.0.0.0

replication:
  oplogSizeMB: 500
  replSetName: cfgReplSet

sharding:
  clusterRole: configsvr

security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile

  
#node3
mkdir -p /data/mongodb/conf
mkdir -p /data/mongodb/mongoc/log/
mkdir -p /data/mongodb/mongoc/data
vim mongoc.conf
systemLog:
  destination: file
  logAppend: true
  path: /data/mongodb/mongoc/log/mongoc.log

storage:
  dbPath: /data/mongodb/mongoc/data
  directoryPerDB: true

processManagement:
  fork: true
  pidFilePath: /data/mongodb/mongoc/log/mongoc.pid

net:
  port: 20001
  bindIp: 0.0.0.0

replication:
  oplogSizeMB: 500
  replSetName: cfgReplSet

sharding:
  clusterRole: configsvr

security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile
  
#启动config server
#node1
cd /data/mongodb/bin
./mongod -f /data/mongodb/conf/mongoc.conf

#node2
cd /data/mongodb/bin
./mongod -f /data/mongodb/conf/mongoc.conf

#node3
cd /data/mongodb/bin
./mongod -f /data/mongodb/conf/mongoc.conf

#配置config副本集
./mongo --port 20001
> use admin
> config = {
    
     _id:"cfgReplSet",members:[ {
    
    _id:0,host:"192.168.168.235:20001"}, {
    
    _id:1,host:"192.168.168.236:20001"}, {
    
    _id:2,host:"192.168.168.237:20001"}] }       #定义副本集
{
    
    
	"_id" : "cfgReplSet",
	"members" : [
		{
    
    
			"_id" : 0,
			"host" : "192.168.168.235:20001"
		},
		{
    
    
			"_id" : 1,
			"host" : "192.168.168.236:20001"
		},
		{
    
    
			"_id" : 2,
			"host" : "192.168.168.237:20001"
		}
	]
}


> rs.initiate(config)     #初始化副本集

3、配置mongos

#添加配置mongos配置文件
#node1
mkdir -p /data/mongodb/mongos/log
cd /data/mongodb/conf
vim mongos.conf
systemLog:
  destination: file
  path: /data/mongodb/mongos/log/mongos.log
  logAppend: true

net:
  port: 20000
  bindIp: 0.0.0.0
sharding:
  configDB: cfgReplSet/192.168.168.235:20001,192.168.168.236:20001,192.168.168.237:20001

processManagement:
  fork: true
  pidFilePath: /data/mongodb/mongos/log/mongos.pid
security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile

  
#node2
mkdir -p /data/mongodb/mongos/log
cd /data/mongodb/conf
vim mongos.conf
systemLog:
  destination: file
  path: /data/mongodb/mongos/log/mongos.log
  logAppend: true

net:
  port: 20000
  bindIp: 0.0.0.0
sharding:
  #autoSplit: true
  configDB: cfgReplSet/192.168.168.235:20001,192.168.168.236:20001,192.168.168.237:20001
  #chunkSize: 64

processManagement:
  fork: true
  pidFilePath: /data/mongodb/mongos/log/mongos.pid
security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile
  
#node3
mkdir -p  /data/mongodb/mongos/log
cd /data/mongodb/conf
vim mongos.conf
systemLog:
  destination: file
  path: /data/mongodb/mongos/log/mongos.log
  logAppend: true

net:
  port: 20000
  bindIp: 0.0.0.0
sharding:
  #autoSplit: true
  configDB: cfgReplSet/192.168.168.235:20001,192.168.168.236:20001,192.168.168.237:20001
  #chunkSize: 64

processManagement:
  fork: true
  pidFilePath: /data/mongodb/mongos/log/mongos.pid
security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile


#启动mongos
#node1
cd /data/mongodb/bin
./mongos -f /data/mongodb/conf/mongos.conf 

#node2
cd /data/mongodb/bin
./mongos -f /data/mongodb/conf/mongos.conf 

#node3
cd /data/mongodb/bin
./mongos -f /data/mongodb/conf/mongos.conf 

4、shard1副本集集群部署

#node1
mkdir -p /data/mongodb/shard1/log
mkdir -p /data/mongodb/shard1/data
vim shard1.conf
systemLog:
  destination: file
  logAppend: true
  path: /data/mongodb/shard1/log/mongod.log

storage:
  dbPath: /data/mongodb/shard1/data
  journal:
    enabled: true
  wiredTiger:
    engineConfig:
       cacheSizeGB: 30
  directoryPerDB: true

processManagement:
  fork: true
  pidFilePath: /data/mongodb/shard1/log/mongod.pid

net:
  port: 20002
  bindIp: 0.0.0.0

replication:
  oplogSizeMB: 500
  replSetName: shard1

sharding:
  clusterRole: shardsvr
security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile

#node2
mkdir -p /data/mongodb/shard1/log
mkdir -p /data/mongodb/shard1/data
vim shard1.conf
systemLog:
  destination: file
  logAppend: true
  path: /data/mongodb/shard1/log/mongod.log

storage:
  dbPath: /data/mongodb/shard1/data
  journal:
    enabled: true
  wiredTiger:
    engineConfig:
       cacheSizeGB: 30
  directoryPerDB: true

processManagement:
  fork: true
  pidFilePath: /data/mongodb/shard1/log/mongod.pid

net:
  port: 20002
  bindIp: 0.0.0.0

replication:
  oplogSizeMB: 500
  replSetName: shard1

sharding:
  clusterRole: shardsvr
security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile
  

#node3
mkdir -p /data/mongodb/shard1/log
mkdir -p /data/mongodb/shard1/data
vim shard1.conf
systemLog:
  destination: file
  logAppend: true
  path: /data/mongodb/shard1/log/mongod.log

storage:
  dbPath: /data/mongodb/shard1/data
  journal:
    enabled: true
  wiredTiger:
    engineConfig:
       cacheSizeGB: 30
  directoryPerDB: true

processManagement:
  fork: true
  pidFilePath: /data/mongodb/shard1/log/mongod.pid

net:
  port: 20002
  bindIp: 0.0.0.0

replication:
  oplogSizeMB: 500
  replSetName: shard1

sharding:
  clusterRole: shardsvr
security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile


#启动shard1
#node1
cd /data/mongodb/bin
./mongod -f /data/mongodb/conf/shard1.conf

#node2
cd /data/mongodb/bin
./mongod -f /data/mongodb/conf/shard1.conf

#node3
cd /data/mongodb/bin
./mongod -f /data/mongodb/conf/shard1.conf

#配置shard1副本集集群
./mongo --port 20002
use admin
config = {
    
    
        _id : "shard1",
        members : [
            {
    
    _id : 0, host : "192.168.168.235:20002" },
            {
    
    _id : 1, host : "192.168.168.236:20002" },
            {
    
    _id : 2, host : "192.168.168.237:20002" , arbiterOnly: true }

       ]
}
输出:
{
    
    
	"_id" : "shard1",
	"members" : [
		{
    
    
			"_id" : 0,
			"host" : "192.168.168.235:20002"
		},
		{
    
    
			"_id" : 1,
			"host" : "192.168.168.236:20002"
		},
		{
    
    
			"_id" : 2,
			"host" : "192.168.168.237:20002",
			"arbiterOnly" : true
		}
	]
}


#初始化副本集
rs.initiate(config)

5、shard2副本集集群部署

#node1
mkdir -p /data/mongodb/shard2/log
mkdir -p /data/mongodb/shard2/data
vim shard2.conf
systemLog:
  destination: file
  logAppend: true
  path: /data/mongodb/shard2/log/mongod.log

storage:
  dbPath: /data/mongodb/shard2/data
  journal:
    enabled: true
  wiredTiger:
    engineConfig:
       cacheSizeGB: 30
  directoryPerDB: true

processManagement:
  fork: true
  pidFilePath: /data/mongodb/shard2/log/mongod.pid

net:
  port: 20003
  bindIp: 0.0.0.0

replication:
  oplogSizeMB: 500
  replSetName: shard2

sharding:
  clusterRole: shardsvr
security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile

#node2
mkdir -p /data/mongodb/shard2/log
mkdir -p /data/mongodb/shard2/data
vim shard2.conf
systemLog:
  destination: file
  logAppend: true
  path: /data/mongodb/shard2/log/mongod.log

storage:
  dbPath: /data/mongodb/shard2/data
  journal:
    enabled: true
  wiredTiger:
    engineConfig:
       cacheSizeGB: 30
  directoryPerDB: true

processManagement:
  fork: true
  pidFilePath: /data/mongodb/shard2/log/mongod.pid

net:
  port: 20003
  bindIp: 0.0.0.0

replication:
  oplogSizeMB: 500
  replSetName: shard2

sharding:
  clusterRole: shardsvr
security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile
  

#node3
mkdir -p /data/mongodb/shard2/log
mkdir -p /data/mongodb/shard2/data
vim shard2.conf
systemLog:
  destination: file
  logAppend: true
  path: /data/mongodb/shard2/log/mongod.log

storage:
  dbPath: /data/mongodb/shard2/data
  journal:
    enabled: true
  wiredTiger:
    engineConfig:
       cacheSizeGB: 30
  directoryPerDB: true

processManagement:
  fork: true
  pidFilePath: /data/mongodb/shard2/log/mongod.pid

net:
  port: 20003
  bindIp: 0.0.0.0

replication:
  oplogSizeMB: 500
  replSetName: shard2

sharding:
  clusterRole: shardsvr
security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile


#启动shard2
#node1
cd /data/mongodb/bin
./mongod -f /data/mongodb/conf/shard2.conf

#node2
cd /data/mongodb/bin
./mongod -f /data/mongodb/conf/shard2.conf

#node3
cd /data/mongodb/bin
./mongod -f /data/mongodb/conf/shard2.conf

#配置shard2副本集集群
./mongo --port 20003
use admin
config = {
    
    
        _id : "shard2",
        members : [
            {
    
    _id : 0, host : "192.168.168.235:20003" },
            {
    
    _id : 1, host : "192.168.168.236:20003" , arbiterOnly: true},
            {
    
    _id : 2, host : "192.168.168.237:20003" }

       ]
}
输出:
{
    
    
	"_id" : "shard2",
	"members" : [
		{
    
    
			"_id" : 0,
			"host" : "192.168.168.235:20003"
		},
		{
    
    
			"_id" : 1,
			"host" : "192.168.168.236:20003"
		},
		{
    
    
			"_id" : 2,
			"host" : "192.168.168.237:20003",
			"arbiterOnly" : true
		}
	]
}

#初始化副本集
rs.initiate(config)

6、shard3副本集集群部署

#node1
mkdir -p /data/mongodb/shard3/log
mkdir -p /data/mongodb/shard3/data
vim shard3.conf
systemLog:
  destination: file
  logAppend: true
  path: /data/mongodb/shard3/log/mongod.log

storage:
  dbPath: /data/mongodb/shard3/data
  journal:
    enabled: true
  wiredTiger:
    engineConfig:
       cacheSizeGB: 30
  directoryPerDB: true

processManagement:
  fork: true
  pidFilePath: /data/mongodb/shard3/log/mongod.pid

net:
  port: 20004
  bindIp: 0.0.0.0

replication:
  oplogSizeMB: 500
  replSetName: shard3

sharding:
  clusterRole: shardsvr
security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile

#node2
mkdir -p /data/mongodb/shard3/log
mkdir -p /data/mongodb/shard3/data
vim shard3.conf
systemLog:
  destination: file
  logAppend: true
  path: /data/mongodb/shard3/log/mongod.log

storage:
  dbPath: /data/mongodb/shard3/data
  journal:
    enabled: true
  wiredTiger:
    engineConfig:
       cacheSizeGB: 30
  directoryPerDB: true

processManagement:
  fork: true
  pidFilePath: /data/mongodb/shard3/log/mongod.pid

net:
  port: 20004
  bindIp: 0.0.0.0

replication:
  oplogSizeMB: 500
  replSetName: shard3

sharding:
  clusterRole: shardsvr
security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile
  

#node3
mkdir -p /data/mongodb/shard3/log
mkdir -p /data/mongodb/shard3/data
vim shard3.conf
systemLog:
  destination: file
  logAppend: true
  path: /data/mongodb/shard3/log/mongod.log

storage:
  dbPath: /data/mongodb/shard3/data
  journal:
    enabled: true
  wiredTiger:
    engineConfig:
       cacheSizeGB: 30
  directoryPerDB: true

processManagement:
  fork: true
  pidFilePath: /data/mongodb/shard3/log/mongod.pid

net:
  port: 20004
  bindIp: 0.0.0.0

replication:
  oplogSizeMB: 500
  replSetName: shard3

sharding:
  clusterRole: shardsvr
security:
  clusterAuthMode: keyFile
  keyFile: /data/mongodb/conf/keyfile


#启动shard3
#node1
cd /data/mongodb/bin
./mongod -f /data/mongodb/conf/shard3.conf

#node2
cd /data/mongodb/bin
./mongod -f /data/mongodb/conf/shard3.conf

#node3
cd /data/mongodb/bin
./mongod -f /data/mongodb/conf/shard3.conf

#配置shard3副本集集群
./mongo --port 20004
use admin
config = {
    
    
        _id : "shard3",
        members : [
            {
    
    _id : 0, host : "192.168.168.235:20004" , arbiterOnly: true},
            {
    
    _id : 1, host : "192.168.168.236:20004" },
            {
    
    _id : 2, host : "192.168.168.237:20004" }

       ]
}
输出:
{
    
    
	"_id" : "shard3",
	"members" : [
		{
    
    
			"_id" : 0,
			"host" : "192.168.168.235:20004",
			"arbiterOnly" : true
		},
		{
    
    
			"_id" : 1,
			"host" : "192.168.168.236:20004"
		},
		{
    
    
			"_id" : 2,
			"host" : "192.168.168.237:20004"
		}
	]
}

#初始化副本集
rs.initiate(config)

二、分片配置

分片集合中是否有数据?默认第一个添加的shard就是主shard,存放没有被分割的shard就是主shard在创建分片的时,必须在索引中创建的,如果这个集合中有数据,则首先自己先创建索引,然后进行分片,如果是分片集合中没有数据的话,则就不需要创建索引,就可以分片。

#登陆mongos配置分片,向分区集群中添加shard服务器和副本集
./mongo --port 20000

mongos> use admin
mongos> sh.status()  #查看分片状态

#添加shard副本集
sh.addShard("shard1/192.168.168.235:20002,192.168.168.236:20002,192.168.168.237:20002")
sh.addShard("shard2/192.168.168.235:20003,192.168.168.236:20003,192.168.168.237:20003")
sh.addShard("shard3/192.168.168.235:20004,192.168.168.236:20004,192.168.168.237:20004")

#创建数据库
use ceshi
#插入数据
for(var i=1;i<=10000;i++){
    
    db.data.insert({
    
    
     x:i,name:"MACLEAN",name1:"MACLEAN",name2:"MACLEAN",name3:"MACLEAN"
 })}

#指定那个数据库分片
db.runCommand( {
    
     enablesharding :"fatboycollect"});
sh.enableSharding("fatboycollect")

#创建集合,这样创建才会生成分片
db.runCommand({
    
    shardcollection:"fatboycollect.other-appsflyer",key:{
    
    _id:"hashed"}})

结语

之前公司使用MongoDB来存储风控的相关数据,上面是MongoDB分片集群部署,希望对大家有所帮助。由于时间跨度比较长,导致安装文档不是很详细,大家有什么问题可以私信我。

Guess you like

Origin blog.csdn.net/qq_37837432/article/details/121579745