redis5 超详细的集群扩容攻略

 

搭建将要添加到集群的节点 (多实例部署在一台上面)


[root@localhost ~]# cd /usr/local/
[root@localhost local]# mkdir -p redis-cluster
[root@localhost ~]# cd /usr/local/redis-cluster/

#待添加到集群的节点,文件结构
[root@localhost redis-cluster]# mkdir -p 6379 6380 6381 6382 6383 6384 6385 6386

[root@localhost redis-cluster]#  /usr/src/redis-stable/utils/install_server.sh 
Welcome to the redis service installer
This script will help you easily set up a running redis server

Please select the redis port for this instance: [6379] 6380
Please select the redis config file name [/etc/redis/6380.conf] /usr/local/redis-cluster/6380/6380.conf
Please select the redis log file name [/var/log/redis_6380.log] /usr/local/redis-cluster/6380/6380.log
Please select the data directory for this instance [/var/lib/redis/6380] /usr/local/redis-cluster/6380
Please select the redis executable path [] /usr/local/redis/bin/redis-server
Selected config:
Port           : 6380
Config file    : /usr/local/redis-cluster/6380/6380.conf
Log file       : /usr/local/redis-cluster/6380/6380.log
Data dir       : /usr/local/redis-cluster/6380
Executable     : /usr/local/redis/bin/redis-server
Cli Executable : /usr/local/redis/bin/redis-cli
Is this ok? Then press ENTER to go on or Ctrl-C to abort.
Copied /tmp/6380.conf => /etc/init.d/redis_6380
Installing service...
Successfully added to chkconfig!
Successfully added to runlevels 345!
Starting Redis server...
Installation successful!



[root@localhost redis-cluster]# ps -ef | grep redis | grep -v grep
root       7429      1  0 00:34 ?        00:00:01 /usr/local/redis/bin/redis-server 0.0.0.0:6379 [cluster]
root       7514      1  0 00:41 ?        00:00:00 /usr/local/redis/bin/redis-server 127.0.0.1:6380



#修改集群节点的配置文件 6380.conf
[root@localhost ~]# vim /usr/local/redis-cluster/6380/6380.conf 
    (1)daemonize no ——> daemonize yes 

    (2)port 6379 修改为对应节点目录的端口号(本次修改为6380 6381 6382 6383 6384 6385 6386 )

    (3)bind 127.0.0.1 ——> bind 当前机器的IP地址(本次为192.168.168.104或者0.0.0.0)

    (4) dir 这一行 指定数据文件的存放路径。设置为各个节点下的目录。目录结构类似为"/usr/local/redis/redis-cluster/节点目录/" (本次为 dir /usr/local/redis/redis-cluster/6380/)

    (5)启动集群模式。cluster-enabled ——> cluster-enabled yes

    (6)设置集群的配置文件路径 。cluster-config-file nodes***.conf。***代表每个节点的目录对应的位置。本次修改为 nodes-6380.conf

     (7)修改appendonly 为yes 。然后按照6380节点配置格式分别修改6379 6381 6382 6383 6384 6385 6386
这几个节点的配置文件

[root@localhost ~]# /etc/init.d/redis_6380 restart
Stopping ...
Redis stopped
Starting Redis server...

[root@localhost ~]# cat /usr/local/redis-cluster/6380/nodes-6380.conf 
b5e14c3af20322eaa7823685bccf61c6b089f279 :0@0 myself,master - 0 0 0 connected
vars currentEpoch 0 lastVoteEpoch 0

[root@localhost ~]# ps -ef | grep redis | grep -v grep
root       7429      1  0 00:34 ?        00:00:02 /usr/local/redis/bin/redis-server 0.0.0.0:6379 [cluster]
root       7531      1  0 00:47 ?        00:00:00 /usr/local/redis/bin/redis-server 0.0.0.0:6380 [cluster]

 

创建集群(6379 6380 6381 6382 6383 6384)


root@localhost ~]# ps -ef | grep redis
root       5808      1  0 20:46 ?        00:00:04 /usr/local/redis/bin/redis-server 0.0.0.0:6379 [cluster]
root      21258      1  0 21:25 ?        00:00:01 /usr/local/redis/bin/redis-server 0.0.0.0:6380 [cluster]
root      21283      1  0 21:29 ?        00:00:01 /usr/local/redis/bin/redis-server 0.0.0.0:6381 [cluster]
root      21296      1  0 21:31 ?        00:00:01 /usr/local/redis/bin/redis-server 0.0.0.0:6382 [cluster]
root      21307      1  0 21:32 ?        00:00:01 /usr/local/redis/bin/redis-server 0.0.0.0:6383 [cluster]
root      21321      1  0 21:34 ?        00:00:01 /usr/local/redis/bin/redis-server 0.0.0.0:6384 [cluster]

root      21494      1  0 21:48 ?        00:00:00 /usr/local/redis/bin/redis-server 0.0.0.0:6385 [cluster]
root      21504      1  0 21:49 ?        00:00:00 /usr/local/redis/bin/redis-server 0.0.0.0:6386 [cluster]


#将6379 6380 6381 6382 6383 6384组成集群
[root@localhost ~]# /usr/local/redis/bin/redis-cli --cluster create 127.0.0.1:6379 127.0.0.1:6380 127.0.0.1:6381 127.0.0.1:6382 127.0.0.1:6383 127.0.0.1:6384 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 127.0.0.1:6383 to 127.0.0.1:6379
Adding replica 127.0.0.1:6384 to 127.0.0.1:6380
Adding replica 127.0.0.1:6382 to 127.0.0.1:6381
>>> Trying to optimize slaves allocation for anti-affinity
[WARNING] Some slaves are in the same host as their master
M: ea7e259588114e3ca0838d6d1a6cf31d6ec32a5e 127.0.0.1:6379
   slots:[0-5460] (5461 slots) master
M: b5e14c3af20322eaa7823685bccf61c6b089f279 127.0.0.1:6380
   slots:[5461-10922] (5462 slots) master
M: 2871951af37cc65c26b7a867743bcf33be76537f 127.0.0.1:6381
   slots:[10923-16383] (5461 slots) master
S: 727baeec15790e5d0f97b9abbfa5e484fc6c6477 127.0.0.1:6382
   replicates b5e14c3af20322eaa7823685bccf61c6b089f279
S: d5259850145e0049a043022ed0d0d7709d0d971e 127.0.0.1:6383
   replicates 2871951af37cc65c26b7a867743bcf33be76537f
S: b77f57c0e1a7635fc16854ec7660831ba22085b9 127.0.0.1:6384
   replicates ea7e259588114e3ca0838d6d1a6cf31d6ec32a5e
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
...
>>> Performing Cluster Check (using node 127.0.0.1:6379)
M: ea7e259588114e3ca0838d6d1a6cf31d6ec32a5e 127.0.0.1:6379
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
S: d5259850145e0049a043022ed0d0d7709d0d971e 127.0.0.1:6383
   slots: (0 slots) slave
   replicates 2871951af37cc65c26b7a867743bcf33be76537f
M: b5e14c3af20322eaa7823685bccf61c6b089f279 127.0.0.1:6380
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
S: 727baeec15790e5d0f97b9abbfa5e484fc6c6477 127.0.0.1:6382
   slots: (0 slots) slave
   replicates b5e14c3af20322eaa7823685bccf61c6b089f279
M: 2871951af37cc65c26b7a867743bcf33be76537f 127.0.0.1:6381
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
S: b77f57c0e1a7635fc16854ec7660831ba22085b9 127.0.0.1:6384
   slots: (0 slots) slave
   replicates ea7e259588114e3ca0838d6d1a6cf31d6ec32a5e
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.




Adding replica 127.0.0.1:6383 to 127.0.0.1:6379
Adding replica 127.0.0.1:6384 to 127.0.0.1:6380
Adding replica 127.0.0.1:6382 to 127.0.0.1:6381

 

集群创建完毕之后,添加两个服务节点6385,6386到集群(扩容)


[root@localhost 6379]# /usr/local/redis/bin/redis-cli --cluster add-node 127.0.0.1:6379 127.0.0.1:6385 
>>> Adding node 127.0.0.1:6379 to cluster 127.0.0.1:6385
>>> Performing Cluster Check (using node 127.0.0.1:6385)
M: a5865199238b35ae5c4e83b63e72e5ead59aa9d8 127.0.0.1:6385
   slots: (0 slots) master
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[ERR] Not all 16384 slots are covered by nodes.



###这个注意顺序,要集群扩容添加新节点会失败的!!!!!!!!!
错误顺序--cluster add-node 127.0.0.1:6379 127.0.0.1:6385 
正确顺序--cluster add-node 127.0.0.1:6385 127.0.0.1:6379 


#将监听在6385端口的redis实例加进集群,后面的节点随便指定一个即可,加入进去之后为master节点
[root@localhost 6379]# /usr/local/redis/bin/redis-cli --cluster add-node 127.0.0.1:6385 127.0.0.1:6379 
>>> Adding node 127.0.0.1:6385 to cluster 127.0.0.1:6379
>>> Send CLUSTER MEET to node 127.0.0.1:6385 to make it join the cluster.
[OK] New node added correctly.


[root@localhost 6379]# /usr/local/redis/bin/redis-cli --cluster add-node 127.0.0.1:6386 127.0.0.1:6379 
>>> Adding node 127.0.0.1:6386 to cluster 127.0.0.1:6379
>>> Send CLUSTER MEET to node 127.0.0.1:6386 to make it join the cluster.
[OK] New node added correctly.

 

重新分片


虽然6385节点添加成功,但是还没有分配散列槽,需要重新分片,就是将其他节点上的部分散列槽移动到该节点上。为master节点添加分片这里将127.0.0.1:6385作为master节点,对其进行分片

#此时查看集群中的节点发现,6385,6386默认均为master
#可以看到新加进去的节点并没有分配卡槽0 slots,而且没有从节点0 slaves
[root@localhost 6379]# /usr/local/redis/bin/redis-cli --cluster check 127.0.0.1:6379
127.0.0.1:6379 (ea7e2595...) -> 0 keys | 5461 slots | 1 slaves.
127.0.0.1:6386 (6c728b08...) -> 0 keys | 0 slots | 0 slaves.
127.0.0.1:6385 (a5865199...) -> 0 keys | 0 slots | 0 slaves.
127.0.0.1:6380 (b5e14c3a...) -> 0 keys | 5462 slots | 1 slaves.
127.0.0.1:6381 (2871951a...) -> 0 keys | 5461 slots | 1 slaves.

为master节点6385开始分片

#这个节点可以随便指定,重新分配,只需指定一个节点端口就行,redis-cli将自动找到其他节点
[root@localhost 6379]# /usr/local/redis/bin/redis-cli --cluster reshard 127.0.0.1:6385

#这里填写分配多少个槽给6385(指定分配多少散列槽)
How many slots do you want to move (from 1 to 16384)? 4096

#这里添加接收节点的ID,我们填写6385服务节点的ID(指定接收哈希槽的节点ID)
What is the receiving node ID? a5865199238b35ae5c4e83b63e72e5ead59aa9d8

#这里填写槽的来源,all表示是所有服务节点(指定从哪些节点获取散列槽)
Please enter all the source node IDs.
  Type 'all' to use all the nodes as source nodes for the hash slots.
  Type 'done' once you entered all the source nodes IDs.
Source node #1: all

查看分片结果 

​[root@localhost 6379]# /usr/local/redis/bin/redis-cli --cluster check 127.0.0.1:6385
127.0.0.1:6385 (a5865199...) -> 0 keys | 4096 slots | 0 slaves.
127.0.0.1:6381 (2871951a...) -> 0 keys | 4096 slots | 1 slaves.
127.0.0.1:6380 (b5e14c3a...) -> 0 keys | 4096 slots | 1 slaves.
127.0.0.1:6386 (6c728b08...) -> 0 keys | 0 slots | 0 slaves.
127.0.0.1:6379 (ea7e2595...) -> 0 keys | 4096 slots | 1 slaves.

#到这里,分片工作完成
127.0.0.1:6385 (a5865199...) -> 0 keys | 4096 slots | 0 slaves.

 

让6386节点变为6385的从节点


这里我们将6386服务节点作为6385的从节点,先登录6386服务节点

[root@localhost 6379]# /usr/local/redis/bin/redis-cli -p 6386
127.0.0.1:6386>

指定6386从节点的主节点ID,这里我们填写6385服务节点ID

127.0.0.1:6386> cluster replicate a5865199238b35ae5c4e83b63e72e5ead59aa9d8
OK

查看当前集群节点 

127.0.0.1:6386> cluster nodes
a5865199238b35ae5c4e83b63e72e5ead59aa9d8 127.0.0.1:6385@16385 master - 0 1592449703099 8 connected 0-1364 5461-6826 10923-12287
b5e14c3af20322eaa7823685bccf61c6b089f279 127.0.0.1:6380@16380 master - 0 1592449704560 2 connected 6827-10922
ea7e259588114e3ca0838d6d1a6cf31d6ec32a5e 127.0.0.1:6379@16379 master - 0 1592449703522 1 connected 1365-5460
6c728b0812447bb083b3f4a5afc166ec395c0dce 127.0.0.1:6386@16386 myself,slave a5865199238b35ae5c4e83b63e72e5ead59aa9d8 0 1592449704000 7 connected
2871951af37cc65c26b7a867743bcf33be76537f 127.0.0.1:6381@16381 master - 0 1592449704000 3 connected 12288-16383
727baeec15790e5d0f97b9abbfa5e484fc6c6477 127.0.0.1:6382@16382 slave b5e14c3af20322eaa7823685bccf61c6b089f279 0 1592449704144 2 connected
b77f57c0e1a7635fc16854ec7660831ba22085b9 127.0.0.1:6384@16384 slave ea7e259588114e3ca0838d6d1a6cf31d6ec32a5e 0 1592449705598 1 connected
d5259850145e0049a043022ed0d0d7709d0d971e 127.0.0.1:6383@16383 slave 2871951af37cc65c26b7a867743bcf33be76537f 0 1592449705186 3 connected


#可以看到集群扩容成功,6375为主节点,6376为从节点,至此集群扩容结束
a5865199238b35ae5c4e83b63e72e5ead59aa9d8 127.0.0.1:6385@16385 master - 0 1592449703099 8 connected 0-1364 5461-6826 10923-12287
6c728b0812447bb083b3f4a5afc166ec395c0dce 127.0.0.1:6386@16386 myself,slave a5865199238b35ae5c4e83b63e72e5ead59aa9d8 0 1592449704000 7 connected

[root@localhost 6379]# /usr/local/redis/bin/redis-cli --cluster check 127.0.0.1:6385
127.0.0.1:6385 (a5865199...) -> 0 keys | 4096 slots | 1 slaves.
127.0.0.1:6381 (2871951a...) -> 0 keys | 4096 slots | 1 slaves.
127.0.0.1:6380 (b5e14c3a...) -> 0 keys | 4096 slots | 1 slaves.
127.0.0.1:6379 (ea7e2595...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 0 keys in 4 masters.

 

 

猜你喜欢

转载自blog.csdn.net/qq_34556414/article/details/106822008