Go project combat: to build highly concurrent log collection system (eight)

Antecedent Review

We previously completed the basic functions of log collection system, including log monitoring, log collection, hot update configuration, coroutine dynamic startup and shutdown, and expanded support for the etcd management file path.

This section target

This new section log query and retrieval capabilities. The basic idea is to log information is read from kafka then put elasticsearch, the elasticsearch is a distributed multi-user capabilities
of full-text search engine, web interface to access it we can provide and query the specified data. Further, for easier retrieval and inquiry may be utilized with elastic kibana visual
query. Kibana is open source analysis and visualization platform for Elasticsearch design.

Source code implementation

Kafka log read from and written to parse this part of the elastic function, we will refine it to another process, separate and process kafka start monitoring data.

package main
import (
	"fmt"
	kafconsumer "golang- / logcatchsys / kafconsumer"
	"golang-/logcatchsys/logconfig"
)

func main() {
	v: = logconfig.InitVipper ()
	if v == nil {
		fmt.Println("vipper init failed!")
		return
	}

	kafconsumer.GetMsgFromKafka ()
}

  The main function calls the function GetMsgFromKafka I read the messages encapsulated kafconsumer package.

func GetMsgFromKafka() {
	fmt.Println("kafka consumer begin ...")
	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true
	var kafkaddr = "localhost:9092"
	kafkaconf, _: = logconfig.ReadConfig (logconfig.InitVipper () "kafkaconfig.kafkaaddr")
	if kafkaconf != nil {
		kafkaddr = kafkaconf.(string)
	}
	// Create the customer
	consumer, err := sarama.NewConsumer([]string{kafkaddr}, config)
	if err != nil {
		fmt.Println("consumer create failed, error is ", err.Error())
		return
	}
	defer func(consumer sarama.Consumer) {
		if err := recover(); err != nil {
			fmt.Println("consumer panic error ", err)
		}
		consumer.Close()
		topicSet = nil
		// recover all coroutine
		for _, val := range topicMap {
			for _, management: = Range val {
				valt.Cancel ()
			}
		}

		TopicMap = nil
	}(consumer)
	topicSetTmp := ConstructTopicSet()
	if topicSetTmp == nil {
		fmt.Println("construct topic set error ")
		return
	}
	topicSet = topicSetTmp
	ConsumeTopic(consumer)
}

  GetMsgFromKafka created in kafka consumers, then call ConstructTopicSet constructed topic collection depending on the configuration, topicSet collection is actually a map,
to ensure the collection of topic is not repeated. Then call functions to retrieve data from kafka ConsumeTopic The topic.

func ConstructTopicSet() map[string]bool {
	topicSetTmp := make(map[string]bool)
	configtopics, _ := logconfig.ReadConfig(logconfig.InitVipper(), "collectlogs")
	if configtopics == nil {
		goto CONFTOPIC
	}
	for _, configtopic := range configtopics.([]interface{}) {
		confmap := configtopic.(map[interface{}]interface{})
		for key, val := range confmap {
			if key.(string) == "logtopic" {
				topicSetTmp[val.(string)] = true
			}
		}
	}
CONFTOPIC:
	return topicSetTmp
}

  ConstructTopicSet read configuration topic list, and then returns the map into the topic.

func ConsumeTopic(consumer sarama.Consumer) {

	for key, _ := range topicSet {
		partitionList, err := consumer.Partitions(key)
		if err != nil {
			fmt.Println("get consumer partitions failed")
			fmt.Println("error is ", err.Error())
			continue
		}

		for partition := range partitionList {
			pc, err := consumer.ConsumePartition(key, int32(partition), sarama.OffsetNewest)
			if err != nil {
				fmt.Println("consume partition error is ", err.Error())
				continue
			}
			defer pc.AsyncClose()

			topicData := new(TopicData)
			topicData.Ctx, topicData.Cancel = context.WithCancel(context.Background())
			topicData.KafConsumer = pc
			topicData.TPartition = new (TopicPart)
			topicData.TPartition.Partition = int32 (partition)
			topicData.TPartition.Topic = key
			_, Okm: topicMap = [key]
			if! okm {
				topicMap[key] = make(map[int32]*TopicData)
			}
			topicMap[key][int32(partition)] = topicData
			go ReadFromEtcd(topicData)

		}
	}
	for {
		select {
		case topicpart := <-topicChan:
			fmt.Printf("receive goroutine exited, topic is %s, partition is %d\n",
				topicpart.Topic, topicpart.Partition)
			// restart consumers read data coroutine
			val, ok := topicMap[topicpart.Topic]
			if !ok {
				continue
			}
			tp, ok := val[topicpart.Partition]
			if !ok {
				continue
			}
			tp.Ctx, tp.Cancel = context.WithCancel(context.Background())
			go ReadFromEtcd(tp)
		}

	}
}

  ConsumeTopic actual topic is set into the map traversal topic, and then start the coroutine ReadFromEtcd call function to read the message.

func ReadFromEtcd(topicData *TopicData) {

	fmt.Printf("kafka consumer begin to read message, topic is %s, part is %d\n", topicData.TPartition.Topic,
		topicData.TPartition.Partition)

	logger := log.New(os.Stdout, "LOGCAT", log.LstdFlags|log.Lshortfile)
	elastiaddr, _ := logconfig.ReadConfig(logconfig.InitVipper(), "elasticconfig.elasticaddr")
	if elastiaddr == nil {
		elastiaddr = "localhost:9200"
	}

	esClient, err := elastic.NewClient(elastic.SetURL("http://"+elastiaddr.(string)),
		elastic.SetErrorLog(logger))
	if err != nil {
		// Handle error
		logger.Println("create elestic client error ", err.Error())
		return
	}

	info, code, err := esClient.Ping("http://" + elastiaddr.(string)).Do(context.Background())
	if err != nil {
		logger.Println("elestic search ping error, ", err.Error())
		esClient.Stop()
		esClient = nil
		return
	}
	fmt.Printf("Elasticsearch returned with code %d and version %s\n", code, info.Version.Number)

	esversion, err := esClient.ElasticsearchVersion("http://" + elastiaddr.(string))
	if err != nil {
		fmt.Println("elestic search version get failed, ", err.Error())
		esClient.Stop()
		esClient = nil
		return
	}
	fmt.Printf("Elasticsearch version %s\n", esversion)

	defer func(esClient *elastic.Client) {
		if err := recover(); err != nil {
			fmt.Printf("consumer message panic %s, topic is %s, part is %d\n", err,
				topicData.TPartition.Topic, topicData.TPartition.Partition)
			topicChan <- topicData.TPartition
		}

	} (EsClient)

	var typestr = "catlog"
	typeconf, _ := logconfig.ReadConfig(logconfig.InitVipper(), "elasticconfig.typestr")
	if typeconf != nil {
		typestr = typeconf.(string)
	}

	for {
		select {
		case msg, ok := <-topicData.KafConsumer.Messages():
			if !ok {
				fmt.Println("etcd message chan closed ")
				return
			}
			fmt.Printf("%s---Partition:%d, Offset:%d, Key:%s, Value:%s\n",
				msg.Topic, msg.Partition, msg.Offset, string(msg.Key), string(msg.Value))
			idstr := strconv.FormatInt(int64(msg.Partition), 10) + strconv.FormatInt(msg.Offset, 10)
			logdata := &LogData{Topic: msg.Topic, Log: string(msg.Value), Id: idstr}
			createIndex, err := esClient.Index().Index(msg.Topic).Type(typestr).Id(idstr).BodyJson(logdata).Do(context.Background())

			if err != nil {
				logger.Println("create index failed, ", err.Error())
				continue
			}
			fmt.Println("create index success, ", createIndex)

		case <-topicData.Ctx.Done():
			fmt.Println("receive exited from parent goroutine !")
			return
		}
	}
}

  ReadFromEtcd function writes the read data in the elastic kafka while if the coroutine crash notification is sent to the parent coroutine, restart the coroutine.

Show results

Before we start the log monitoring program, and then start the information processing program is now designed.
When you can see are constantly being written to the log, the log monitoring program change information write kafka.
Meanwhile, the information processing program continuously reads data from the write kafka the elastic. We kibana query data
1.png

2.png

Source download

https://github.com/secondtonone1/golang-/tree/master/logcatchsys
thank my public concern No.
wxgzh.jpg

Guess you like

Origin www.cnblogs.com/secondtonone1/p/12205376.html