filebeat+kafka+logstash+elasticsearch+kibana

参考文章:

1、安装filebeat

 选择 tar -axvf filebeat-6.4.2-linux-x86_64.tar.gz 安装

2、配置filebeat.yml,启动

   sudo -i service filebeat restart(rpman安装)
    sudo -i service filebeat start(rpman安装)
    ./filebeat -e -c filebeat.yml -d "publish"
    nohup ./filebeat -e -c filebeat.yml > filebeat.log &
    /usr/share/filebeat/bin/filebeat -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat (rpman安装)

###################### Filebeat Configuration Example #########################


#=========================== Filebeat inputs =============================

filebeat.prospectors:

- input_type: log

  enabled: true

  paths:
    - /data/log/push-message-*.log
    
  exclude_files: ["20[12][0-9]", "error"]
  multiline.pattern: '^\[{0,1}\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}[.,:]0{0,1}\d{3}'
  multiline.negate: true
  multiline.match: after

  fields:
    logType: normal

- input_type: log

  enabled: true

  paths:
    - /data/log/push-message-*-error.log
    - /data/log/push-message-*-error-*.log
    
  multiline.pattern: '^\[{0,1}\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}[.,:]0{0,1}\d{3}'
  multiline.negate: true
  multiline.match: after

  fields:
    logType: error
 

#============================= Filebeat modules ===============================

filebeat.config.modules:
  
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false

#==================== Elasticsearch template setting ==========================

setup.template.settings:
  index.number_of_shards: 3
  #index.codec: best_compression
  #_source.enabled: false

name: "10.0.19.141"


#-------------------------- kafka output ------------------------------

output.kafka:
  enabled: true
  hosts: ["10.0.23.105:9092"]
  topic: CREDIT-LOGPROCESSOR-ROCORD-PUSH-MESSAGE
  required_acks: 1

3、安装logstash,配置config ,启动

    bin/logstash -f config/kafka2es.conf
    
    nohup bin/logstash -f config/kafka2es.conf -w 10 -l /var/log/logstash/logstash.log &

input{
   kafka{
     topics => ["CREDIT-LOGPROCESSOR-ROCORD-PUSH-MESSAGE"]
     bootstrap_servers => "10.0.23.103:9092,10.0.23.104:9092,10.0.23.105:9092"
     group_id=> "CREDIT-LOGPROCESSOR-PUSHMESSAGE"
     codec => json
     decorate_events => true
     
  } 
}


filter{

	if [fields][logType] == "error"{
		grok { 		
 			patterns_dir => ["/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-patterns-core-4.1.2/patterns"]			
			match => { 
				"message"=> [
                           		 '%{LOG_TIME:logTime}%{ANY}\[ERROR\] %{LOG_CLASS:logClass} %{LOG_LOCATION:logLocation}%{ANY}%{LOG_ID:logId}%{ANY}%{EXCEPTION_TYPE:exceptionType}%{ANY}%{EXCEPTION_PHRASE:exceptionPhrase}',
                           		 '%{LOG_TIME:logTime}%{ANY}\[ERROR\] %{LOG_CLASS:logClass} %{LOG_LOCATION:logLocation}%{ANY}%{LOG_ID:logId}%{ANY}'
				]
			}  
		}
        
			
        }
		
      

        if [fields][logType] == "normal"{
		grok { 		
 			patterns_dir => ["/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-patterns-core-4.1.2/patterns"]			
			match => { 
				"message"=> [
                           		 '%{LOG_TIME:logTime}%{ANY}\[INFO \] %{LOG_CLASS:logClass} %{LOG_LOCATION:logLocation}%{ANY}%{LOG_ID:logId}%{ANY}',
                          		 '%{LOG_TIME:logTime}%{ANY}\[WARN \] %{LOG_CLASS:logClass} %{LOG_LOCATION:logLocation}%{ANY}%{LOG_ID:logId}%{ANY}'
				]
			}  
		}
            
			
        }

      
 }
  

output{

     elasticsearch {
		hosts => ["10.0.19.144:9200","10.0.19.145:9200","10.0.19.146:9200"]
		index => "log-pushmessage-%{+YYYY-MM-dd}"
		
	}

     stdout { codec=> rubydebug }

 }




猜你喜欢

转载自blog.csdn.net/u014534643/article/details/83117491