12 RabbitMQ cluster construction

3. RabbitMQ cluster construction

Abstract: In actual production applications, the cluster scheme of message queue will be adopted. If you choose RabbitMQ, it is necessary to understand the principle of its cluster scheme

Generally speaking, if you just want to learn RabbitMQ or verify the correctness of business engineering, then you can use its single instance deployment in the local environment or test environment, but for the reliability, concurrency, throughput and Considering issues such as message accumulation capabilities, the RabbitMQ cluster solution is generally considered in the production environment.

3.1 The principle of the cluster scheme

RabbitMQ, a message queue middleware product, is written based on Erlang, which is inherently distributed (realized by synchronizing the magic cookies of each node in the Erlang cluster). Therefore, RabbitMQ naturally supports Clustering. This makes RabbitMQ itself unnecessary to implement the HA scheme and save the metadata of the cluster through ZooKeeper like ActiveMQ and Kafka. Clustering is a way to ensure reliability, and at the same time, it can achieve the purpose of increasing message throughput through horizontal expansion.

[External link picture transfer failed, the source site may have an anti-leeching mechanism, it is recommended to save the picture and upload it directly (img-KEaBUyen-1670818771338)(pic/1566073768274.png)]

3.2 Single-machine multi-instance deployment

Due to certain factors, sometimes you have to build a rabbitmq cluster on one machine, which is similar to the stand-alone version of zookeeper. The real production environment still needs to be configured as a multi-machine cluster. For how to configure multi-machine clusters, you can refer to other materials. Here we mainly discuss how to configure multiple rabbitmq instances in a single machine.

Mainly refer to official documents: https://www.rabbitmq.com/clustering.html

First make sure that RabbitMQ is running without problems

[root@super ~]# rabbitmqctl status
Status of node rabbit@super ...
[{
    
    pid,10232},
 {
    
    running_applications,
     [{
    
    rabbitmq_management,"RabbitMQ Management Console","3.6.5"},
      {
    
    rabbitmq_web_dispatch,"RabbitMQ Web Dispatcher","3.6.5"},
      {
    
    webmachine,"webmachine","1.10.3"},
      {
    
    mochiweb,"MochiMedia Web Server","2.13.1"},
      {
    
    rabbitmq_management_agent,"RabbitMQ Management Agent","3.6.5"},
      {
    
    rabbit,"RabbitMQ","3.6.5"},
      {
    
    os_mon,"CPO  CXC 138 46","2.4"},
      {
    
    syntax_tools,"Syntax tools","1.7"},
      {
    
    inets,"INETS  CXC 138 49","6.2"},
      {
    
    amqp_client,"RabbitMQ AMQP Client","3.6.5"},
      {
    
    rabbit_common,[],"3.6.5"},
      {
    
    ssl,"Erlang/OTP SSL application","7.3"},
      {
    
    public_key,"Public key infrastructure","1.1.1"},
      {
    
    asn1,"The Erlang ASN1 compiler version 4.0.2","4.0.2"},
      {
    
    ranch,"Socket acceptor pool for TCP protocols.","1.2.1"},
      {
    
    mnesia,"MNESIA  CXC 138 12","4.13.3"},
      {
    
    compiler,"ERTS  CXC 138 10","6.0.3"},
      {
    
    crypto,"CRYPTO","3.6.3"},
      {
    
    xmerl,"XML parser","1.3.10"},
      {
    
    sasl,"SASL  CXC 138 11","2.7"},
      {
    
    stdlib,"ERTS  CXC 138 10","2.8"},
      {
    
    kernel,"ERTS  CXC 138 10","4.2"}]},
 {
    
    os,{
    
    unix,linux}},
 {
    
    erlang_version,
     "Erlang/OTP 18 [erts-7.3] [source] [64-bit] [async-threads:64] [hipe] [kernel-poll:true]\n"},
 {
    
    memory,
     [{
    
    total,56066752},
      {
    
    connection_readers,0},
      {
    
    connection_writers,0},
      {
    
    connection_channels,0},
      {
    
    connection_other,2680},
      {
    
    queue_procs,268248},
      {
    
    queue_slave_procs,0},
      {
    
    plugins,1131936},
      {
    
    other_proc,18144280},
      {
    
    mnesia,125304},
      {
    
    mgmt_db,921312},
      {
    
    msg_index,69440},
      {
    
    other_ets,1413664},
      {
    
    binary,755736},
      {
    
    code,27824046},
      {
    
    atom,1000601},
      {
    
    other_system,4409505}]},
 {
    
    alarms,[]},
 {
    
    listeners,[{
    
    clustering,25672,"::"},{
    
    amqp,5672,"::"}]},
 {
    
    vm_memory_high_watermark,0.4},
 {
    
    vm_memory_limit,411294105},
 {
    
    disk_free_limit,50000000},
 {
    
    disk_free,13270233088},
 {
    
    file_descriptors,
     [{
    
    total_limit,924},{
    
    total_used,6},{
    
    sockets_limit,829},{
    
    sockets_used,0}]},
 {
    
    processes,[{
    
    limit,1048576},{
    
    used,262}]},
 {
    
    run_queue,0},
 {
    
    uptime,43651},
 {
    
    kernel,{
    
    net_ticktime,60}}]

stop rabbitmq service

[root@super sbin]# service rabbitmq-server stop
Stopping rabbitmq-server: rabbitmq-server.

Start the first node:

[root@super sbin]# RABBITMQ_NODE_PORT=5673 RABBITMQ_NODENAME=rabbit1 rabbitmq-server start

              RabbitMQ 3.6.5. Copyright (C) 2007-2016 Pivotal Software, Inc.
  ##  ##      Licensed under the MPL.  See http://www.rabbitmq.com/
  ##  ##
  ##########  Logs: /var/log/rabbitmq/rabbit1.log
  ######  ##        /var/log/rabbitmq/rabbit1-sasl.log
  ##########
              Starting broker...
 completed with 6 plugins.

Start the second node:

The web management plug-in port is occupied, so the port number occupied by its web plug-in must also be specified.

[root@super ~]# RABBITMQ_NODE_PORT=5674 RABBITMQ_SERVER_START_ARGS="-rabbitmq_management listener [{port,15674}]" RABBITMQ_NODENAME=rabbit2 rabbitmq-server start

              RabbitMQ 3.6.5. Copyright (C) 2007-2016 Pivotal Software, Inc.
  ##  ##      Licensed under the MPL.  See http://www.rabbitmq.com/
  ##  ##
  ##########  Logs: /var/log/rabbitmq/rabbit2.log
  ######  ##        /var/log/rabbitmq/rabbit2-sasl.log
  ##########
              Starting broker...
 completed with 6 plugins.

End command:

rabbitmqctl -n rabbit1 stop
rabbitmqctl -n rabbit2 stop

Rabbit1 operates as the master node:

[root@super ~]# rabbitmqctl -n rabbit1 stop_app  
Stopping node rabbit1@super ...
[root@super ~]# rabbitmqctl -n rabbit1 reset     
Resetting node rabbit1@super ...
[root@super ~]# rabbitmqctl -n rabbit1 start_app
Starting node rabbit1@super ...
[root@super ~]# 

rabbit2 operates as a slave node:

[root@super ~]# rabbitmqctl -n rabbit2 stop_app
Stopping node rabbit2@super ...
[root@super ~]# rabbitmqctl -n rabbit2 reset
Resetting node rabbit2@super ...
[root@super ~]# rabbitmqctl -n rabbit2 join_cluster rabbit1@'super' ###''内是主机名换成自己的
Clustering node rabbit2@super with rabbit1@super ...
[root@super ~]# rabbitmqctl -n rabbit2 start_app
Starting node rabbit2@super ...

View cluster status:

[root@super ~]# rabbitmqctl cluster_status -n rabbit1
Cluster status of node rabbit1@super ...
[{nodes,[{disc,[rabbit1@super,rabbit2@super]}]},
 {running_nodes,[rabbit2@super,rabbit1@super]},
 {cluster_name,<<"rabbit1@super">>},
 {partitions,[]},
 {alarms,[{rabbit2@super,[]},{rabbit1@super,[]}]}]

web monitoring:

[External link picture transfer failed, the source site may have an anti-leeching mechanism, it is recommended to save the picture and upload it directly (img-TGAqrXj2-1670818771339)(pic\1566065096459.png)]

3.3 Cluster Management

rabbitmqctl join_cluster {cluster_node} [–ram]
joins the node to the specified cluster. It is necessary to stop the RabbitMQ application and reset the node before executing this command.

rabbitmqctl cluster_status
shows the status of the cluster.

rabbitmqctl change_cluster_node_type {disc|ram}
modifies the type of the cluster node. The RabbitMQ application needs to be stopped before this command is executed.

rabbitmqctl forget_cluster_node [–offline]
deletes the node from the cluster, allowing offline execution.

rabbitmqctl update_cluster_nodes {clusternode}

Consult the latest information of the clusternode node before starting the node application in the cluster, and update the corresponding cluster information. This is different from join_cluster, it does not join the cluster. Consider a situation where both node A and node B are in the cluster. When node A goes offline, node C forms a cluster with node B, and then node B leaves the cluster. When A wakes up, it will Attempt to contact node B, but this will fail because node B is no longer in the cluster.

rabbitmqctl cancel_sync_queue [-p vhost] {queue}
cancels the operation of queue queue synchronization mirroring.

rabbitmqctl set_cluster_name {name}
sets the cluster name. The cluster name is announced to clients when they connect. The Federation and Shovel plugins also use cluster names. The cluster name defaults to the name of the first node in the cluster, which can be reset through this command.

3.4 RabbitMQ mirror cluster configuration

The RabbitMQ default cluster mode has been completed above, but the high availability of the queue is not guaranteed. Although the switch and binding can be copied to any node in the cluster, the queue content will not be copied. Although this mode solves the pressure on the nodes of a project team, the downtime of the queue node directly causes the queue to be unavailable and can only be restarted. Therefore, if the queue node is down or faulty and can be used normally, the content of the queue must be copied to the cluster. For each node of , a mirrored queue must be created.

The mirror queue is based on the ordinary cluster mode, and then some policies are added, so you still have to configure the ordinary cluster first, and then you can set up the mirror queue. We will continue with the above cluster.

The mirror queue can be set through Admin->Policies on the management side of the opened web page, or through commands.

rabbitmqctl set_policy my_ha “^” ‘{“ha-mode”:“all”}’

  • Name: Policy name
  • Pattern: the matching rule, if it matches all queues, it is ^.
  • Definition: Use all in ha-mode mode, that is, synchronize all matching queues. Question mark links to help documentation.

3.5 Load Balancing - HAProxy

HAProxy provides high availability, load balancing, and proxy based on TCP and HTTP applications, and supports virtual hosts. It is a free, fast and reliable solution. Many well-known Internet companies including Twitter, Reddit, StackOverflow, and GitHub are in use. HAProxy implements an event-driven, single-process model that supports very large numbers of concurrent connections.

3.5.1 Install HAProxy
//下载依赖包
yum install gcc vim wget
//上传haproxy源码包
//解压
tar -zxvf haproxy-1.6.5.tar.gz -C /usr/local
//进入目录、进行编译、安装
cd /usr/local/haproxy-1.6.5
make TARGET=linux31 PREFIX=/usr/local/haproxy
make install PREFIX=/usr/local/haproxy
mkdir /etc/haproxy
//赋权
groupadd -r -g 149 haproxy
useradd -g haproxy -r -s /sbin/nologin -u 149 haproxy
//创建haproxy配置文件
mkdir /etc/haproxy
vim /etc/haproxy/haproxy.cfg
3.5.2 Configuring HAProxy

Configuration file path: /etc/haproxy/haproxy.cfg

#logging options
global
    log 127.0.0.1 local0 info
    maxconn 5120
    chroot /usr/local/haproxy
    uid 99
    gid 99
    daemon
    quiet
    nbproc 20
    pidfile /var/run/haproxy.pid

defaults
    log global
    
    mode tcp

    option tcplog
    option dontlognull
    retries 3
    option redispatch
    maxconn 2000
    contimeout 5s
   
     clitimeout 60s

     srvtimeout 15s    
#front-end IP for consumers and producters

listen rabbitmq_cluster
    bind 0.0.0.0:5672
    
    mode tcp
    #balance url_param userid
    #balance url_param session_id check_post 64
    #balance hdr(User-Agent)
    #balance hdr(host)
    #balance hdr(Host) use_domain_only
    #balance rdp-cookie
    #balance leastconn
    #balance source //ip
    
    balance roundrobin
    
        server node1 127.0.0.1:5673 check inter 5000 rise 2 fall 2
        server node2 127.0.0.1:5674 check inter 5000 rise 2 fall 2

listen stats
    bind 172.16.98.133:8100
    mode http
    option httplog
    stats enable
    stats uri /rabbitmq-stats
    stats refresh 5s

Start the HAproxy load

/usr/local/haproxy/sbin/haproxy -f /etc/haproxy/haproxy.cfg
//查看haproxy进程状态
ps -ef | grep haproxy

访问如下地址对mq节点进行监控
http://172.16.98.133:8100/rabbitmq-stats

Accessing the mq cluster address in the code becomes accessing the haproxy address: 5672

Guess you like

Origin blog.csdn.net/weixin_68930048/article/details/128285287