ELK+kafka+Filebeat

1. Architecture diagram
ELK+kafka+Filebeat

The official download link of the package: https://www.elastic.co/cn/downloads/
2. Deploy ElasticSearch

cd /opt/src
tar xf  elasticsearch-7.10.2-linux-x86_64.tar.gz -C /opt/
ln -s elasticsearch-7.10.2 /opt/elasticsearch
cd /opt/elasticsearch

mkdir -p /data/elasticsearch/{data,logs}

配置 elasticsearch.yml

[root@hdss-52 opt]# egrep -v "^#|^$" elasticsearch/config/elasticsearch.yml 

cluster.name: es.kcwl.com
node.name: hdss-52.host.com
path.data: /data/elasticsearch/data
path.logs: /data/elasticsearch/logs
bootstrap.memory_lock: true
network.host: 172.16.90.52
http.port: 9200
discovery.seed_hosts: ["127.0.0.1"]
cluster.initial_master_nodes: ["hdss-52.host.com"]
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true

Set jvm parameters

elasticsearch]# vi config/jvm.options
# 根据环境设置,-Xms和-Xmx设置为相同的值,推荐设置为机器内存的一半左右
-Xms512m 
-Xmx512m

Create normal user

useradd -s /bin/bash -M es
chown -R es.es /opt/elasticsearch-7.10.2
chown -R es.es /data/elasticsearch/

Adjust file descriptor

vim /etc/security/limits.d/es.conf
es hard nofile 65536
es soft fsize unlimited
es hard memlock unlimited
es soft memlock unlimited

Adjust kernel parameters

sysctl -w vm.max_map_count=262144
echo "vm.max_map_count=262144" > /etc/sysctl.conf
sysctl -p

Start es service

su -c "/opt/elasticsearch/bin/elasticsearch -d" es

netstat -luntp|grep 9200
tcp6       0      0 172.16.90.52:9200       :::*                    LISTEN      15501/java

Adjust ES log template

curl -XPUT http://192.168.0.107:9200/_template/k8s -H 'content-Type:application/json' -d '{
 "template" : "k8s*",
 "index_patterns": ["k8s*"], 
 "settings": {
  "number_of_shards": 5,
  "number_of_replicas": 0    # 生产为3份副本集,本es为单节点,不能配置副本集
 }
}'

2, deployment kafka
deployment kafka first to deploy zookeeper service
Download: https://archive.apache.org/dist/zookeeper/

 tar zxvf /usr/local/src/zookeeper-3.4.14.tar.gz -C /opt/
 ln -s /opt/zookeeper-3.4.14/ /opt/zookeeper
 mkdir -pv /data/zookeeper/data /data/zookeeper/logs

Configuration file modification:

cat /opt/zookeeper/conf/zoo.cfg

tickTime=2000
initLimit=10
syncLimit=5
dataDir=/data/zookeeper/data
dataLogDir=/data/zookeeper/logs
clientPort=2181

Start service

/opt/zookeeper/bin/zkServer.sh start

netstat -lntp| grep 2181
tcp6 0 0 :::2181 :::* LISTEN 108043/java

Deploy Kafka to download the installation package

cd /opt/src
wget https://archive.apache.org/dist/kafka/2.2.0/kafka_2.12-2.2.0.tgz
tar xf kafka_2.12-2.2.0.tgz -C /opt/
ln -s /opt/kafka_2.12-2.2.0/ /opt/kafka
cd /opt/kafka

mkdir /data/kafka/logs -p

Change setting

[root@hdss-52 opt]# egrep -v "^#|^$" kafka/config/server.properties 
broker.id=0
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/data/kafka/logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.flush.interval.messages=10000
log.flush.interval.ms=1000
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
delete.topic.enable=true
host.name=hdss-52.host.com

Start kafka

bin/kafka-server-start.sh -daemon config/server.properties

netstat -luntp|grep 9092
tcp6       0      0 172.16.90.52:9092       :::*                    LISTEN      2026/java

4. Deploy logstash (deployed by docker here),
download the official image and push it to your own library.

docker pull docker.elastic.co/logstash/logstash:7.10.2
docker tag  d0a2dac51fcb harbor.china95059.com.cn/infra/logstash:v7.10.2
docker push  harbor.china95059.com.cn/infra/logstash:v7.10.2

Prepare the catalog

mkdir /etc/logstash/

Create project A configuration file

[root@hdss-6 logstash]# cat logstash-sx.conf 
input {
    kafka {
      bootstrap_servers => "172.16.90.52:9092"
      topics => ["shengxian"]
      group_id => "shengxian"
      codec => "json"
  }
}

filter {
  json {
    source => "message"
  }
}

output{
  if [filetype] == "app" {
    elasticsearch {
      hosts => ["http://172.16.90.52:9200"]
      user => "elastic"
      password => "lvcheng@2015"
      index => "sx_app-%{+YYYY.MM}"
    }
  }
  else if [filetype] == "web" {
    elasticsearch {
      hosts => ["http://172.16.90.52:9200"]
      user => "elastic"
      password => "lvcheng@2015"
      index => "sx_web-%{+YYYY.MM}"
    }
  }

Start logstash

docker run -it -d  --restart=always --name logstash-sx-prod -v /etc/logstash:/etc/logstash harbor.china95059.com.cn/infra/logstash:v7.10.2 -f /etc/logstash/logstash-sx.conf

If there are two projects, then start a container consumption log.
ELK+kafka+Filebeat

5, department kibana

tar zxvf kibana-7.10.2-linux-x86_64.tar.gz 
mv kibana-7.10.2-linux-x86_64 kibana-7.10.2
mv kibana-7.10.2 ../
ln -s /opt/kibana-7.10.2/ /opt/kibana

Configuration file

egrep -v "^#|^$" /opt/kibana/config/kibana.yml 
server.port: 5601
server.host: "172.16.90.6"
elasticsearch.hosts: ["http://172.16.90.52:9200"]
elasticsearch.username: "kibana"
elasticsearch.password: "123456"
i18n.locale: "zh-CN"

Dynamic kibana

nohup ./bin/kibana --allow-root &

6. Deploy filebeat
hosts to add analysis

cat /etc/hosts

122.224.207.90 hdss-52.host.com

Unzip the installation package

tar zxvf filebeat-7.5.1-linux-x86_64.tar.gz
mv filebeat-7.5.1-linux-x86_64 filebeat-7.5.1
ln -s /opt/filebeat-7.5.1 /opt/filebeat

Configuration file writing

cat /opt/filebeat/filebeat.yml 
filebeat.inputs:
- type: log
  fields_under_root: true
  fields:
    filetype: web
  paths:
    - /home/project/shangye/log/shangye-web*.log
  scan_frequency: 120s
  max_bytes: 10485760
  multiline.pattern: ^\d{2}
  multiline.negate: true
  multiline.match: after
  multiline.max_lines: 100

output.kafka:
  hosts: ["192.16.90.52:9092"]
  topic: shengxian
  version: 2.0.0      
  required_acks: 0
  max_message_bytes: 10485760

Start service

./filebeat -e -c /opt/filebeat/filebeat.yml

7. Create ES user password

[root@node01 elasticsearch-7.7.0]# bin/elasticsearch-setup-passwords interactive
future versions of Elasticsearch will require Java 11; your Java version from [/opt/app/jdk1.8.0_181/jre] does not meet this requirement
Initiating the setup of passwords for reserved users elastic,apm_system,kibana,logstash_system,beats_system,remote_monitoring_user.
You will be prompted to enter passwords as the process progresses.
Please confirm that you would like to continue [y/N]y

Enter password for [elastic]: 
Reenter password for [elastic]: 
Enter password for [apm_system]: 
Reenter password for [apm_system]: 
Enter password for [kibana]: 
Reenter password for [kibana]: 
Enter password for [logstash_system]: 
Reenter password for [logstash_system]: 
Enter password for [beats_system]: 
Reenter password for [beats_system]: 
Enter password for [remote_monitoring_user]: 
Reenter password for [remote_monitoring_user]: 
Changed password for user [apm_system]
Changed password for user [kibana]
Changed password for user [logstash_system]
Changed password for user [beats_system]
Changed password for user [remote_monitoring_user]
Changed password for user [elastic]

The deployment has been completed here

Now add domain name resolution:

[root@hdss-6 conf.d]# cat kibana.com.cn.conf 
server {
    listen       80;
    server_name  kibana.com.cn;

    client_max_body_size 1000m;

    location / {
        proxy_pass http://172.16.90.6:5601;
    }
}

Browser access: http://kibana.com.cn

Username: elastic
Password: just set by yourself

Guess you like

Origin blog.51cto.com/14033037/2607037