Docker builds tidb cluster + Prometheus monitoring

Suppose we intend to deploy a TiDB cluster on 8 hosts:

 



 

 

 

Hostname IP Deployment Service Data Disk Mount

host1 172.18.0.11 PD1 

host2 172.18.0.12 PD2/data

host3 172.18.0.13 PD3/data

host4 172.18.0.14 TiKV1 / data

host5 172.18.0.15 TiKV2/data

host6 172.18.0.16 TiKV3 / data

host7 172.18.0.17 tidb

host8 172.18.0.18 TiKV4 / data

host9 172.18.0.19 pushgateway

host10 172.18.0.20 prometheus

host11 172.18.0.21 grafana

 

 New subnet

docker network create --subnet=172.18.0.0/16 shadownet

 

/data/tidbconf/pd.toml

[metric]

# prometheus client push interval, set "0s" to disable prometheus.

interval = "15s"

# prometheus pushgateway address, leaves it empty will disable prometheus.

address = "172.18.0.19:9091"

job="pd"

 

/data/tidbconf/tikv.toml

[metric]

# the Prometheus client push interval. Setting the value to 0s stops Prometheus client from pushing.

interval = "15s"

# the Prometheus pushgateway address. Leaving it empty stops Prometheus client from pushing.

address = "172.18.0.19:9091"

# the Prometheus client push job name. Note: A node id will automatically append, e.g., "tikv_1".

job = "tikv"

 

 

/data/tidbconf/tidb.toml

[status]

# If enable status report HTTP service.

report-status = true

 

# TiDB status port.

#status-port = 10080

 

# Prometheus pushgateway address, leaves it empty will disable prometheus push.

metrics-addr = "172.18.0.19:9091"

 

# Prometheus client push interval in second, set \"0\" to disable prometheus push.

metrics-interval = 15

 

job="tidb"

 

 

 

/data/prometheus/prometheus.yml

global:

  scrape_interval: 15s

  scrape_timeout: 10s

  evaluation_interval: 15s

alerting:

  alertmanagers:

  - static_configs:

    - targets: []

    scheme: http

    timeout: 10s

scrape_configs:

- job_name: prometheus

  scrape_interval: 15s

  scrape_timeout: 10s

  metrics_path: /metrics

  scheme: http

  static_configs:

  - targets:

    - localhost:9090

- job_name: 'push-metrics'

  static_configs:

  - targets: ['172.18.0.19:9091']

 

 

 

docker run -d --name pd1 \

  --network=shadownet --ip=172.18.0.11 \

  --privileged \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/pd:latest \

  --name="pd1" \

  --data-dir="/data/pd1" \

  --client-urls="http://0.0.0.0:2379" \

  --advertise-client-urls="http://172.18.0.11:2379" \

  --peer-urls="http://0.0.0.0:2380" \

  --advertise-peer-urls="http://172.18.0.11:2380" \

  --initial-cluster="pd1=http://172.18.0.11:2380,pd2=http://172.18.0.12:2380,pd3=http://172.18.0.13:2380" \

  --config="/data/tidbconf/pd.toml" 

 

 

docker run -d --name pd2 \

  --network=shadownet --ip=172.18.0.12 \

  --privileged \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/pd:latest \

  --name="pd2" \

  --data-dir="/data/pd2" \

  --client-urls="http://0.0.0.0:2379" \

  --advertise-client-urls="http://172.18.0.12:2379" \

  --peer-urls="http://0.0.0.0:2380" \

  --advertise-peer-urls="http://172.18.0.12:2380" \

  --initial-cluster="pd1=http://172.18.0.11:2380,pd2=http://172.18.0.12:2380,pd3=http://172.18.0.13:2380" \

  --config="/data/tidbconf/pd.toml"

 

 

docker run -d --name pd3 \

  --network=shadownet --ip=172.18.0.13 \

  --privileged \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/pd:latest \

  --name="pd3" \

  --data-dir="/data/pd3" \

  --client-urls="http://0.0.0.0:2379" \

  --advertise-client-urls="http://172.18.0.13:2379" \

  --peer-urls="http://0.0.0.0:2380" \

  --advertise-peer-urls="http://172.18.0.13:2380" \

  --initial-cluster="pd1=http://172.18.0.11:2380,pd2=http://172.18.0.12:2380,pd3=http://172.18.0.13:2380" \

  --config="/data/tidbconf/pd.toml"

  

 

 

docker run -d --name tikv1 \

  --network=shadownet --ip=172.18.0.14 \

  --privileged \

  --ulimit nofile = 1000000: 1000000 \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/tikv:latest \

  --addr="0.0.0.0:20160" \

  --advertise-addr="172.18.0.14:20160" \

  --data-dir="/data/tikv1" \

  --pd="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \

  --config="/data/tidbconf/tikv.toml" 

  

  

 

 

docker run -d --name tikv2 \

  --network=shadownet --ip=172.18.0.15 \

  --privileged \

  --ulimit nofile = 1000000: 1000000 \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/tikv:latest \

  --addr="0.0.0.0:20160" \

  --advertise-addr="172.18.0.15:20160" \

  --data-dir="/data/tikv2" \

  --pd="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \

  --config="/data/tidbconf/tikv.toml" 

 

 

docker run -d --name tikv3 \

  --network=shadownet --ip=172.18.0.16 \

  --privileged \

  --ulimit nofile = 1000000: 1000000 \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/tikv:latest \

  --addr="0.0.0.0:20160" \

  --advertise-addr="172.18.0.16:20160" \

  --data-dir="/data/tikv3" \

  --pd="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \

  --config="/data/tidbconf/tikv.toml" 

  

 

docker run -d --name tikv4 \

  --network=shadownet --ip=172.18.0.18 \

  --privileged \

  --ulimit nofile = 1000000: 1000000 \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/tikv:latest \

  --addr="0.0.0.0:20160" \

  --advertise-addr="172.18.0.18:20160" \

  --data-dir="/data/tikv4" \

  --pd="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \

  --config="/data/tidbconf/tikv.toml"

 

 

  docker run -d --name tidb1 \

  -p 4000:4000 \

  -p 10080:10080 \

  --network=shadownet --ip=172.18.0.17 \

  --privileged \

  -v /etc/localtime:/etc/localtime:ro \

  -v /data:/data \

  pingcap/tidb:latest \

  --store = tikv \

  --path="172.18.0.11:2379,172.18.0.12:2379,172.18.0.13:2379" \

  --config="/data/tidbconf/tidb.toml"

 

 

Connect to TiDB test using MySQL standard client

 

Log in to host1 and make sure the MySQL command line client is installed, execute:

 

$ mysql -h 127.0.0.1 -P 4000 -u root -D test

mysql> show databases;

+--------------------+

| Database           |

+--------------------+

| INFORMATION_SCHEMA |

| PERFORMANCE_SCHEMA |

| mysql              |

| test               |

+--------------------+

4 rows in set (0.00 sec)

 

SET PASSWORD FOR 'root'@'%' = PASSWORD('test')

 

 Internal and external testing machine

#docker run  -it --network=shadownet --ip=172.18.0.26 centos:6.8 /bin/bash

 

docker run --network=shadownet --ip=172.18.0.19 -d --name pushgateway  prom/pushgateway  

docker run --network=shadownet --ip=172.18.0.20 -d --privileged -v /data/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro \

 -p 9090:9090 --name prometheus   quay.io/prometheus/prometheus  

docker run --network = shadownet --ip = 172.18.0.21 -d -p 3000: 3000 --name grafana grafana / grafana

 

 

Click Grafana Logo -> Click Dashboards -> Click Import -> Select the desired Dashboard  configuration file to upload -> Select the corresponding data source

 

 

 

Test cleanup command

echo pd1 |xargs docker stop|xargs docker rm

echo pd2 |xargs docker stop|xargs docker rm

echo pd3 |xargs docker stop|xargs docker rm

echo tikv1 |xargs docker stop|xargs docker rm

echo tikv2 |xargs docker stop|xargs docker rm

echo tikv3 |xargs docker stop|xargs docker rm

echo tikv4 |xargs docker stop|xargs docker rm

echo tidb1 |xargs docker stop|xargs docker rm

 

echo pushgateway |xargs docker stop|xargs docker rm

echo prometheus |xargs docker stop|xargs docker rm

echo grafana |xargs docker stop|xargs docker rm

 

 

refer to

https://pingcap.com/docs-cn/op-guide/monitor/

https://github.com/pingcap/tidb/blob/master/config/config.toml.example

https://prometheus.io/docs/prometheus/latest/querying/basics/

Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=326036918&siteId=291194637