Spring使用Kafka Connect + Debezium 同步数据Mysql数据

方案:Kafka Connect + Debezium Mysql Connector

实现:Spring Kafka + OOP

一、部署准备

Kafka使用最新版:2.13-3.3.1

Debezium Mysql Connector插件约束于JDK版本。目前最新仅支持到:1.9.7.Final

Kafka最新包下载:kafka_2.13-3.3.1.tgz

Debezium最新包下载:debezium-connector-mysql-1.9.7.Final-plugin.tar.gz

下载后的debezium-connector-mysql-1.9.7.Final-plugin.tar.gz需要解压并将整个目录放入到特定路径。参考下面的配置。

二、配置文件参考解读

config/connect-standalone.properties

# 当前broker的地址
bootstrap.servers=192.168.26.213:9092

# 保持默认转换器即可
key.converter=org.apache.kafka.connect.json.JsonConverter
value.converter=org.apache.kafka.connect.json.JsonConverter
# 关闭schema信息,节省带宽
key.converter.schemas.enable=false
value.converter.schemas.enable=false
# 指定offset的缓存文件位置
offset.storage.file.filename=/tmp/connect.offsets
# Flush much faster than normal, which is useful for testing/debugging
offset.flush.interval.ms=10000
 
# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors,
# 插件目录,需要把Debezium的完整目录放在该目录下,示例地址为:/opt/connectors/xxxx/*.jar
plugin.path=/opt/connectors/

config/mysql-connect.properties

# connector名称,要求kafka内唯一
name=MysqlSync
# connector的实现类
connector.class=io.debezium.connector.mysql.MySqlConnector
# 数据库服务器
database.hostname=192.168.26.198
# 数据库端口号
database.port=3306
# 数据库用户名
database.user=root
# 数据库喵喵
database.password=oI3WtMO8h%mSYARp
# 数据库id,要求唯一
database.server.id=1
# 数据库节点名称,注意,这将作为kafka的主topic标识
database.server.name=nucleic_sync
# 包含数据库列表,我们只处理hesuan数据库
database.include.list=hesuan
# 指定broker server地址
database.history.kafka.bootstrap.servers=192.168.26.213:9092
# 历史记录所在topic
database.history.kafka.topic=nucleic-record
# 快照模式,使用initial默认,如果遇到问题,可以修改调整schema_only_recovery
snapshot.mode=initial
# 屏蔽墓碑事件,删除行后不会推送多余事件
tombstones.on.delete=false
# schema处理模式,我们选择warn,而不是直接报错,更容易发现问题
inconsistent.schema.handling.mode=warn
# 使用文件历史记录
database.history=io.debezium.relational.history.FileDatabaseHistory
# 指定历史记录文件,相对路径为相对执行目录
database.history.file.filename=history.dat

三、启动Zookeeper、Kafka、Kafka Connect

#!/bin/sh

base=/home/parallels/kafka_2.13-3.3.1

# launch zookeeper
$base/bin/zookeeper-server-start.sh -daemon $base/config/zookeeper.properties

# launch kafka
$base/bin/kafka-server-start.sh -daemon $base/config/server.properties

# launch kafka connect
$base/bin/connect-standalone.sh -daemon $base/config/connect-standalone.properties $base/config/mysql-connect.properties

四、使用Spring创建topic

package com.chinaunicom.system.epi.config;

import com.chinaunicom.system.epi.config.datasource.AnotherDataSourceProperties;
import com.chinaunicom.system.epi.nucleic.sync.KafkaTopics;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.NewTopic;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.core.KafkaOperations;
import org.springframework.kafka.listener.DeadLetterPublishingRecoverer;
import org.springframework.kafka.listener.DefaultErrorHandler;
import org.springframework.kafka.support.converter.JsonMessageConverter;
import org.springframework.kafka.support.converter.RecordMessageConverter;
import org.springframework.util.backoff.FixedBackOff;

/**
 * kafka配置
 *
 * @author wangyu
 * 此处声明异常处理、topic、消费逻辑,统一管理控制
 */
@Configuration
@Slf4j
public class KafkaConfig {
    
    

    /*
     * kafka异常处理器,采用默认
     */
    @Bean
    public DefaultErrorHandler errorHandler(KafkaOperations<?, ?> template) {
    
    
        return new DefaultErrorHandler(
                new DeadLetterPublishingRecoverer(template), new FixedBackOff(1000L, 2));
    }

    @Bean
    @ConfigurationProperties(prefix = "database.another")
    public AnotherDataSourceProperties sourceProperties() {
    
    
        return new AnotherDataSourceProperties();
    }

    /**
     * 指定消息体的转换器
     *
     * @return 结果
     */
    @Bean
    public RecordMessageConverter converter() {
    
    
        return new JsonMessageConverter();
    }

    /**
     * connector节点topic
     *
     * @return 实例
     */
    @Bean
    public NewTopic schemaTopic() {
    
    
        return new NewTopic(KafkaTopics.NUCLEIC_SCHEMA, 1, (short) 1);
    }

    /**
     * connector节点topic
     *
     * @return 实例
     */
    @Bean
    public NewTopic nodeTopic() {
    
    
        return new NewTopic(KafkaTopics.NUCLEIC_SYNC, 1, (short) 1);
    }

    /**
     * 核酸记录表topic
     *
     * @return 实例
     */
    @Bean
    public NewTopic nucleicRecordTopic() {
    
    
        return new NewTopic(KafkaTopics.NUCLEIC_RECORD, 3, (short) 1);
    }

    /**
     * 核酸结果表topic
     *
     * @return 实例
     */
    @Bean
    public NewTopic nucleicResultTopic() {
    
    
        return new NewTopic(KafkaTopics.NUCLEIC_RESULT, 3, (short) 1);
    }

}

四、使用Spring监听并消费变化

面向对象建模

package com.chinaunicom.system.epi.nucleic.sync.entity;

import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonValue;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.Getter;

import java.util.Date;

/**
 * kafka同步消息载体
 *
 * @author wangyu
 */
@Data
public class KafkaSourcePayload<T> {
    
    

    private T before;

    private T after;

    private KafkaSourceInfo source;

    private Operation op;

    @JsonProperty("ts_ms")
    private Date time;

    private String transaction;

    @AllArgsConstructor
    @Getter
    public enum Operation {
    
    

        CREATE("c", "插入"), UPDATE("u", "更新"), DELETE("d", "删除");

        @JsonValue
        private final String code;

        private final String name;
    }
}

package com.chinaunicom.system.epi.nucleic.sync.entity;

import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;

import java.util.Date;

/**
 * kafka消息源端信息
 *
 * @author wangyu
 */
@Data
public class KafkaSourceInfo {
    
    

    private String version;

    private String connector;

    private String name;

    @JsonProperty("ts_ms")
    private Date time;

    private Boolean snapshot;

    private String db;

    private Integer sequence;

    private String table;

    @JsonProperty("server_id")
    private Integer serverId;

    private String gtid;

    private String file;

    private Long pos;

    private Long row;

    private Integer thread;

    private String query;
}

监听并消费数据:

package com.chinaunicom.system.epi.nucleic.sync;

import com.alibaba.druid.filter.Filter;
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
import com.chinaunicom.system.epi.nucleic.domain.po.EpiNucleicAcid;
import com.chinaunicom.system.epi.nucleic.mapper.EpiNucleicAcidMapper;
import com.chinaunicom.system.epi.nucleic.sync.domain.NucleicAcidSampling;
import com.chinaunicom.system.epi.nucleic.sync.domain.NucleicAcidTesting;
import com.chinaunicom.system.epi.nucleic.sync.entity.KafkaSourcePayload;
import lombok.extern.slf4j.Slf4j;
import org.springframework.kafka.annotation.DltHandler;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;

import javax.sql.DataSource;
import java.util.List;

/**
 * 核酸数据监听器
 *
 * @author wangyu
 */
@Component
@Slf4j
public class NucleicListener {
    
    

    private final EpiNucleicAcidMapper acidMapper;

    /**
     * 同步记录
     *
     * @param payload 负载
     */
    @KafkaListener(id = "record-consumer", topics = KafkaTopics.NUCLEIC_RECORD, concurrency = "10")
    public void recordSync(KafkaSourcePayload<NucleicAcidSampling> payload) {
    
    
        // 保存核酸记录
        EpiNucleicAcid po = null;
        switch (payload.getOp()) {
    
    
            case CREATE:
                po = buildPo(payload.getAfter());
                acidMapper.insert(po);
                break;
            case UPDATE:
                po = buildPo(payload.getAfter());
                if (acidMapper.updateById(po) == 0) {
    
    
                    acidMapper.insert(po);
                }
                break;
            case DELETE:
                // do nothing
                break;
        }
    }

    /**
     * 同步结果
     *
     * @param payload 负载
     */
    @KafkaListener(id = "result-consumer", topics = KafkaTopics.NUCLEIC_RESULT, concurrency = "10")
    public void resultSync(KafkaSourcePayload<NucleicAcidTesting> payload) {
    
    
        // 更新核酸结果
        switch (payload.getOp()) {
    
    
            case CREATE:
            case UPDATE:
                NucleicAcidTesting po = payload.getAfter();
                int rowCount = acidMapper.update(null, Wrappers.<EpiNucleicAcid>lambdaUpdate()
                        .set(EpiNucleicAcid::getNucleicAcidResult, po.getResult())
                        .set(EpiNucleicAcid::getTestingTime, po.getTestingTime())
                        .set(EpiNucleicAcid::getTestingOrg, po.getTestingOrg())
                        .eq(EpiNucleicAcid::getIdCard, po.getIdCard())
                        .eq(EpiNucleicAcid::getTubeNumber, po.getTubeNumber()));
                break;
            case DELETE:
                // do nothing
                break;
        }
    }

    /**
     * 处理异常
     */
    @DltHandler
    public void handleException(Exception e) {
    
    
        log.error(e.getMessage(), e);
    }

    /**
     * 根据采样记录构建目标实体
     *
     * @param sampling 采样记录
     * @return 结果
     */
    private EpiNucleicAcid buildPo(NucleicAcidSampling sampling) {
    
    
        EpiNucleicAcid acid = new EpiNucleicAcid();
        acid.setId(sampling.getId());
        acid.setPersonName(sampling.getPersonName());
        acid.setIdCard(sampling.getIdCard());
        acid.setPersonTel(sampling.getPersonTel());
        acid.setTubeNumber(sampling.getTubeNumber());
        acid.setSamplingTime(sampling.getSamplingTime());
        acid.setSamplingPerson(sampling.getSamplingPerson());
        acid.setSamplingName(sampling.getSamplingName());
        acid.setSamplingAddress(sampling.getSamplingAddress());
        acid.setCreateTime(sampling.getCreateTime());
        return acid;
    }
}

猜你喜欢

转载自blog.csdn.net/wybaby168/article/details/128852297