ES service using the cache to relieve pressure
Architecture is as follows:
Nginx-json-> filebeat -> Redis <- logstash ---> elasticsearch <--- kibana
1. Installation Configuration redis
yum install redis -y
systemctl start redis
redis-cli set k1 v1
redis-cli get k1
2. Configure filebeat
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
json.keys_under_root: true
json.overwrite_keys: true
tags: ["access"]
- type: log
enabled: true
paths:
- /var/log/nginx/error.log
tags: ["error"]
output.redis:
hosts: ["172.16.1.51"]
keys:
- key: "nginx_access"
when.contains:
tags: "access"
- key: "nginx_error"
when.contains:
tags: "error"
setup.template.name: "nginx"
setup.template.pattern: "nginx_*"
setup.template.enabled: false
setup.template.overwrite: true
3. Make sure the log as nginx json format
>/var/log/nginx/access.log
ab -c 10 -n 100 http://localhost/oldxu
4. Start filebeat and to test whether it can exist in redis
systectl restart filebeat
redis-cli
keys *
TYPE nginx_access
LLEN nginx_access
LRANGE nginx_access 1 2
5. Installation Configuration logstash
[root@db01 ~]# cat /etc/logstash/conf.d/redis.conf
input {
redis {
host => "172.16.1.51"
port => "6379"
db => "0"
key => "nginx_access"
data_type => "list"
}
redis {
host => "10.0.0.51"
port => "6379"
db => "0"
key => "nginx_error"
data_type => "list"
}
}
filter {
mutate {
convert => ["upstream_time", "float"]
convert => ["request_time", "float"]
}
}
output {
stdout {}
if "access" in [tags] {
elasticsearch {
hosts => "http://localhost:9200"
manage_template => false
index => "nginx_access-%{+yyyy.MM}"
}
}
if "error" in [tags] {
elasticsearch {
hosts => "http://localhost:9200"
manage_template => false
index => "nginx_error-%{+yyyy.MM}"
}
}
}
6. Start Logstash
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/redis.conf
7. Check whether there had been removed redis
redis-cli
LLEN nginx_access
8.es-head and kibana View