pyflink stream batch combination

# -*- coding: utf-8 -*-
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.datastream.functions import RuntimeContext, FlatMapFunction, MapFunction
import json
import re
import logging
import sys
from pyflink.datastream.state import ValueStateDescriptor, MapStateDescriptor,ListStateDescriptor
from pyflink.datastream.connectors.kafka import FlinkKafkaConsumer, TypeInformation
from pyflink.common.typeinfo import Types
from pyflink.datastream.connectors.elasticsearch import Elasticsearch7SinkBuilder, ElasticsearchEmitter, FlushBackoffType
from  pyflink.datastream.connectors import  DeliveryGuarantee
from pyflink.common.serialization import SimpleStringSchema
from datetime import datetime


logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(asctime)s-%(levelname)s-%(message)s")
logger = logging.getLogger(__name__)

# 创建 StreamExecutionEnvironment 对象
env = StreamExecutionEnvironment.get_execution_environment()
env.set_parallelism(1)
env.add_jars("file:///root/pyflink/flink-sql-connector-kafka_2.11-1.14.4.jar")

TEST_KAFKA_SERVERS = "1.1.146.13:9092,1.1.146.14:9092,1.1.146.16:9092"
TEST_KAFKA_TOPIC = "clpf-gaps-topic"
TEST_GROUP_ID =  "clpf_gaps_group"


def get_kafka_customer_properties(kafka_servers: str, group_id: str):
    properties = {         "bootstrap.servers": kafka_servers,         "fetch.max.bytes": "67108864",         "key.deserializer": "org.apache.kafka.common. serialization.StringDeserializer",         "value.deserializer": "org.apache.kafka.common.serialization.StringDeserializer",         "enable.auto.commit": "false", # Close kafka automatic submission, bool type cannot be passed here Error         "group.id": group_id,     }     return properties








properties = get_kafka_customer_properties(TEST_KAFKA_SERVERS, TEST_GROUP_ID)


class LogEvent:
    # id means global pipeline
    id = None
    # source ip
    source = None
    #process name
    fileTag= None
    #file name
    fileName = None
    #scene code
    serviceCode = None
    #system name
    appName= None
    #time stamp
    timestamp = None
    #offset volume
    offset = None

    def __init__(self, id,source, fileTag,fileName, serviceCode,appName,timestamp,offset,message,index_name):
        self.id=id
        self.source = source
        self.fileTag = fileTag
        self.fileName = fileName
        self.serviceCode = serviceCode
        self.appName = appName
        self.timestamp= timestamp
        self.offset = offset
        self.message = message
        self.index_name = index_name

    def to_dict(self):
        return {
            "id": str(self.id),
            "source": str(self.source),
            "fileTag": str(self.fileTag),
            "fileName":str(self.fileName),
            "serviceCode":str(self.serviceCode),
            "appName":str(self.appName),
            "timestamp":self.timestamp,
            "offset":str(self.offset),
            "message":self.message,
            "index_name": self.index_name
        }

    def get_source(self):
         return self.source


class MyMapFunction(FlatMapFunction):
    def open(self, runtime_context: RuntimeContext):
        self.process_id_to_bus_seq = runtime_context.get_map_state(MapStateDescriptor('process_id_map_bus_seq', Types.STRING(), Types.STRING()))
        self.gapslist=runtime_context.get_list_state(ListStateDescriptor('process_list', Types.LIST(Types.STRING())))

    def flat_map(self, raw_message):
        id = ''
        source =''
        fileTag =''
        fileName =''
        serviceCode =''
        appName =''
        timestamp =''
        process_id = ''
        offset =''
        message =''
        unique_key =''
        try:
           raw_message = raw_message.replace("\n", "")
           #print(raw_message)
           out=json.loads(raw_message)
           message = out['message']
           source = out['source']
           fileTag = out['file_tag']
           serviceCode='00000'
           appName=out['app_name']
           timestamp=str(out.get('time_nano'))
           offset=out.get('offset')
           fileName=out.get('file_name')
           # pattern = r".*? Receive data.*?\d{26 }"
           # matchObj = re.match(pattern, message)
        except:
             #logger.info('111111111111111111111111111')
               return
        if 'Start grading log' in message:
            self.process_id_to_bus_seq.clear()
            self.gapslist.clear()
            # to record Add to cache
            self.gapslist.add(message)
            return
        print('111111111111111111111')
        self.has_start='0'
        for x in self.gapslist.get():
             if  '开始分级日志' in x:
                 self.has_start='1'
                 break;
        print('222222222222222222222222')
        #<BUSS_SEQ_NO>20601020230621010072249201</BUSS_SEQ_NO>
        if  "</BUSS_SEQ_NO>" in message:
              #print('1111111111111111111111111111')
              pat = re.compile(r"\<BUSS_SEQ_NO\>(\d+)\<\/BUSS_SEQ_NO\>")
              bus_seq = pat.search(message).group(1)
              self.process_id_to_bus_seq.put('id', bus_seq)
              id=bus_seq
              for output_message in  self.gapslist.get():
                  print(output_message)
                  date_str = datetime.now().strftime("%Y%m%d")
                  index_name = 'flink-log-clpf-gaps-' + str(date_str)
                  try:
                      log_event = LogEvent(id, source, fileTag, fileName, serviceCode, appName, timestamp, offset,output_message, index_name)
                      yield log_event.to_dict()
                  except:
                      return
              self.gapslist.clear()
              self.has_start='0'
              log_event = LogEvent(id, source, fileTag, fileName, serviceCode, appName, timestamp, offset,message, index_name)
              yield log_event.to_dict()
              return
               
        if   self.has_start == '1':
             self.gapslist.add(message)
             return
        id= self.process_id_to_bus_seq.get('id')
        if not id:
           id ='0'
        try:
              log_event = LogEvent(id, source, fileTag, fileName, serviceCode, appName, timestamp, offset, message, index_name)
              yield log_event.to_dict()
        except:
                return


data_stream = env.add_source(
    FlinkKafkaConsumer(topics=TEST_KAFKA_TOPIC,
        properties=properties,
        deserialization_schema=SimpleStringSchema()) \
        .set_commit_offsets_on_checkpoints(True) \
        .set_start_from_latest()
).name(f"消费{TEST_KAFKA_TOPIC}主题数据")

env.add_jars("file:///root/pyflink/flink-sql-connector-elasticsearch7-3.0.1-1.16.jar")

# .set_hosts(['1.1.101.32:9200','1.1.101.33:9200','1.1.101.38:9200']) \
es_sink = Elasticsearch7SinkBuilder() \
        .set_bulk_flush_backoff_strategy(FlushBackoffType.EXPONENTIAL, 5, 1000) \
        .set_emitter(ElasticsearchEmitter.dynamic_index('index_name')) \
        .set_hosts(['1.1.101.32:9200','1.1.101.33:9200','1.1.101.38:9200']) \
        .set_delivery_guarantee(DeliveryGuarantee.AT_LEAST_ONCE) \
        .set_bulk_flush_interval(1000) \
        .set_connection_request_timeout(30000) \
        .set_connection_timeout(31000) \
        .set_socket_timeout(32000) \
        .build()

def get_line_key(line):
    message = ''
    try:
        message = line.replace("\n", "")
        source = json.loads(message)['source']
    except:
        source = '999999'
    return source


#data_stream.key_by(get_line_key).flat_map(MyMapFunction(),output_type=Types.MAP(Types.STRING(), Types.STRING())).sink_to(es_sink).set_parallelism(3)
#data_stream.key_by(get_line_key).flat_map(MyMapFunction(),output_type=Types.MAP(Types.STRING(), Types.STRING())).print()
data_stream.key_by(get_line_key).flat_map(MyMapFunction()).print()

# Execute the task
env.execute('flink_elink_midsys')
 

Guess you like

Origin blog.csdn.net/zhaoyangjian724/article/details/131341186