Instalación e implementación de la última versión 1.1.4 de Canal (1)

Configuración del entorno mysql

  • Abrir binlog

Operación binlog de configuración de mysql

  • Configurar permisos de canal

#创建用户
CREATE USER canal IDENTIFIED BY 'canal';  
#创建权限
GRANT SELECT, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'canal'@'%';
-- GRANT ALL PRIVILEGES ON *.* TO 'canal'@'%' ;
#刷新生效
FLUSH PRIVILEGES;


#查看用户列表
SELECT DISTINCT CONCAT('User: ''',user,'''@''',host,''';') AS query FROM mysql.user;

 

instalación

 

#进入安装包
cd /opt

#下载安装包
wget https://github.com/alibaba/canal/releases/download/canal-1.1.4/canal.deployer-1.1.4.tar.gz

#创建安装目录
mkdir canal

#解压到指定目录
tar zxvf canal.deployer-1.1.4.tar.gz -C ./canal

Configuración

Configuración de la instancia: instance.properties

vim conf/example/instance.properties
#################################################
## mysql serverId , v1.0.26+ will autoGen
# 同mysql集群配置中的serverId,mysql的server_id参数
canal.instance.mysql.slaveId=1000

# 开启gtid,生成同步数据全局id,防止主从不一致
canal.instance.gtidon=false

# binlog的位置信息
# mysql连接地址
canal.instance.master.address=127.0.0.1:3306
#mysql起始的binlog文件
canal.instance.master.journal.name=
#mysql起始的binlog偏移量
canal.instance.master.position=
#mysql起始的binlog时间戳
canal.instance.master.timestamp=
#ysql起始的binlog的gtid
canal.instance.master.gtid=

# 阿里云rds的sso配置
canal.instance.rds.accesskey=
canal.instance.rds.secretkey=
canal.instance.rds.instanceId=

# 开启tsdb功能,记录table mate变动
canal.instance.tsdb.enable=true
# tsdb数据存储在位置
#canal.instance.tsdb.url=jdbc:mysql://127.0.0.1:3306/canal_tsdb
#canal.instance.tsdb.dbUsername=canal
#canal.instance.tsdb.dbPassword=canal

# 备用数据库,当master数据库检查失败后,切换到该节点继续消费
#canal.instance.standby.address =
#canal.instance.standby.journal.name =
#canal.instance.standby.position =
#canal.instance.standby.timestamp =
#canal.instance.standby.gtid=

# mysql连接用户名和密码
canal.instance.dbUsername=canal
canal.instance.dbPassword=canal
canal.instance.connectionCharset = UTF-8
# 开启druid数据库密码加密
canal.instance.enableDruid=false
# 加密公钥
#canal.instance.pwdPublicKey=MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALK4BUxdDltRRE5/zXpVEVPUgunvscYFtEip3pmLlhrWpacX7y7GCMo2/JM6LeHmiiNdH1FWgGCpUfircSwlWKUCAwEAAQ==

# 匹配table表达式,需要处理的表
canal.instance.filter.regex=.*\\..*
# 匹配过滤table表达式,不需要处理的表
canal.instance.filter.black.regex=
# table field filter(format: schema1.tableName1:field1/field2,schema2.tableName2:field1/field2)
#canal.instance.filter.field=test1.t_product:id/subject/keywords,test2.t_company:id/name/contact/ch
# table field black filter(format: schema1.tableName1:field1/field2,schema2.tableName2:field1/field2)
#canal.instance.filter.black.field=test1.t_product:subject/product_image,test2.t_company:id/name/contact/ch

# mq消息配置
# mq topic
canal.mq.topic=example
# 动态topic配置,topic为表名
#canal.mq.dynamicTopic=mytest1.user,mytest2\\..*,.*\\..*
# mq分区
canal.mq.partition=0
# hash分区数量
#canal.mq.partitionsNum=3
# hash分区主键
#canal.mq.partitionHash=test.table:id^name,.*\\..*
#################################################

configuración del servidor: canal.properties

vim conf/canal.properties
#################################################
######### 		common argument		#############
#################################################
# 绑定tcp连接的本地ip,需要配置本机局域网ip
canal.ip = 127.0.0.1
# 项zookeeper注册的本地地址
canal.register.ip = 192.168.11.11
# 本地连接端口
canal.port = 11111
# 监控数据拉取端口
canal.metrics.pull.port = 11112
# canal instance user/passwd
# canal.user = canal
# canal.passwd = E3619321C1A937C46A0D8BD1DAC39F93B27D4458

# canal 控制台配置 admin config
#canal.admin.manager = 127.0.0.1:8089
canal.admin.port = 11110
canal.admin.user = admin
canal.admin.passwd = 4ACFE3202A5FF5CF467898FC58AAB1D615029441

#持久化到zk配置
canal.zkServers =
# 数据刷新到zk的频率,ms
canal.zookeeper.flush.period = 1000
# 关闭netty
canal.withoutNetty = false
# server模式 tcp, kafka, RocketMQ
canal.serverMode = tcp
# tsdb信息刷新flush meta cursor/parse position to file
# 文件位置
canal.file.data.dir = ${canal.conf.dir}
# 刷新间隔
canal.file.flush.period = 1000
# 缓存的数据最大条数,必须为2的倍数
canal.instance.memory.buffer.size = 16384
## 缓存限制
# 缓存快的大小,默认1024b,总内存等条数*大小
canal.instance.memory.buffer.memunit = 1024
# 缓存限制模式,MEMSIZE-显示缓存大小,ITEMSIZE限制记录数
canal.instance.memory.batch.mode = MEMSIZE
# 针对entry是否开启raw模式
canal.instance.memory.rawEntry = true

## 心跳检查
# 是否开启心跳检查
canal.instance.detecting.enable = false
# 心跳sql
#canal.instance.detecting.sql = insert into retl.xdual values(1,now()) on duplicate key update x=now()
canal.instance.detecting.sql = select 1
# 心跳频率
canal.instance.detecting.interval.time = 3
# 失败重试次数
canal.instance.detecting.retry.threshold = 3
# 是否开启失败切换mysql,需要配置standby数据库
canal.instance.detecting.heartbeatHaEnable = false

# 并发处理事务数
canal.instance.transaction.size =  1024
# mysql主备切换时,binlog监控需要回退的时间,防止切换导致的数据不同步
canal.instance.fallbackIntervalInSeconds = 60

## 网络配置
# 数据发送缓冲区,字节
canal.instance.network.receiveBufferSize = 16384
# 数据接受缓冲区,字节
canal.instance.network.sendBufferSize = 16384
# 获取数据的超时时间,秒
canal.instance.network.soTimeout = 30

# binlog 过滤配置
# 是否使用druid解析ddl
canal.instance.filter.druid.ddl = true
# 是否忽略dcl语句
canal.instance.filter.query.dcl = false
# 是否忽略dml语句
canal.instance.filter.query.dml = false
# 是否忽略ddl语句
canal.instance.filter.query.ddl = false
# 是否忽略table异常,用于排查table异常情况
canal.instance.filter.table.error = false
# 是否忽略dml的数据变动,如update/insert/update操作
canal.instance.filter.rows = false
# 忽略数据库事务的相关事件,如在写入kafka时,忽略TransactionBegin/Transactionend事件,
canal.instance.filter.transaction.entry = false

# 支持的binlog文件格式
canal.instance.binlog.format = ROW,STATEMENT,MIXED
# 支持的binlog记录格式
canal.instance.binlog.image = FULL,MINIMAL,NOBLOB

# ddl语句是否单独处理,防止语句内有无序并发处理导致数据不一致。
canal.instance.get.ddl.isolation = false

# 并行处理配置
# 是否开启并行处理binlog
canal.instance.parser.parallel = true
## concurrent thread number, default 60% available processors, suggest not to exceed Runtime.getRuntime().availableProcessors()
# 并发线程数,理论上不要超过可用处理器数,默认Runtime.getRuntime().availableProcessors()。
#canal.instance.parser.parallelThreadSize = 16
## 并行处理的缓冲区大小, 必须2的幂次方
canal.instance.parser.parallelBufferSize = 256

# 是否开启table mate 的tsdb功能
canal.instance.tsdb.enable = true
# 存储修改table mate的记录文件,默认使用h2数据库
canal.instance.tsdb.dir = ${canal.file.data.dir:../conf}/${canal.instance.destination:}
# h2数据相关配置
canal.instance.tsdb.url = jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
canal.instance.tsdb.dbUsername = canal
canal.instance.tsdb.dbPassword = canal
# 快照存储间隔,小时
canal.instance.tsdb.snapshot.interval = 24
# 快照过期时间,小时
canal.instance.tsdb.snapshot.expire = 360

# 阿里云的访问秘钥, 支持阿里云的rds和mq
canal.aliyun.accessKey =
canal.aliyun.secretKey =

#################################################
######### 		destinations		#############
#################################################
# 当前server中的实例列表
canal.destinations = example
# canal配置文件目录
canal.conf.dir = ../conf
# 是否开启自动扫描,添加启动和删除停止实例
canal.auto.scan = true
# 自动扫描间隔时间,秒
canal.auto.scan.interval = 5

# tsdb配置路径,在canal.conf.dir路径下
canal.instance.tsdb.spring.xml = classpath:spring/tsdb/h2-tsdb.xml
#canal.instance.tsdb.spring.xml = classpath:spring/tsdb/mysql-tsdb.xml

# 全局配置加载方式
canal.instance.global.mode = spring
# 是否开启lazy懒加载
canal.instance.global.lazy = false
# 管理配置的加载地址
canal.instance.global.manager.address = ${canal.admin.manager}
# 全局配置文件 file单机模式,default集群模式
#canal.instance.global.spring.xml = classpath:spring/memory-instance.xml 
canal.instance.global.spring.xml = classpath:spring/file-instance.xml
#canal.instance.global.spring.xml = classpath:spring/default-instance.xml

##################################################
######### 		     MQ 		     #############
##################################################
# MQ地址
canal.mq.servers = 127.0.0.1:6667
# 链接重试次数
canal.mq.retries = 0
# 每次发送消息的数据包大小,byte
canal.mq.batchSize = 16384
# 最大请求大小,byte
canal.mq.maxRequestSize = 1048576
# 每次发送消息的间隔时间,ms
canal.mq.lingerMs = 100
# 消息缓存大小
canal.mq.bufferMemory = 33554432
# canal消息体最大值
canal.mq.canalBatchSize = 50
# 获取消息超时时间,ms
canal.mq.canalGetTimeout = 100
# 是否为json格式消息
canal.mq.flatMessage = true
# 数据压缩
canal.mq.compressionType = none
# 消息状态
canal.mq.acks = all
# 自定义mq属性
#canal.mq.properties. =
# 消息组
canal.mq.producerGroup = test
# 消息跟踪, cloud-可在阿里云中查看
canal.mq.accessChannel = local
# 阿里云 namespace
#canal.mq.namespace =

##################################################
#########     Kafka Kerberos Info    #############
##################################################
#kafka的kerberos 认证,开启需要配置2个认证文件
canal.mq.kafka.kerberos.enable = false
canal.mq.kafka.kerberos.krb5FilePath = "../conf/kerberos/krb5.conf"
canal.mq.kafka.kerberos.jaasFilePath = "../conf/kerberos/jaas.conf"

Ubicación de la interfaz de usuario del administrador: canal_local.properties

vim conf/canal_local.properties
# canal server ip
canal.register.ip = 127.0.0.1

# Admin UI 配置
canal.admin.manager = 127.0.0.1:8089
canal.admin.port = 11110
canal.admin.user = admin
# 暗文密码,可以通过select PASSWORD('admin')获取重置。
canal.admin.passwd = 4ACFE3202A5FF5CF467898FC58AAB1D615029441
# 主动注册
canal.admin.register.auto = true
# 注册集群的名称
canal.admin.register.cluster = 

 

puesta en marcha

  • Iniciar canal
sh bin/startup.sh

  • Ver registro de inicio
tailf logs/canal/canal.log

  • apagar
sh bin/stop.sh

prueba

  • Prueba de enlace, use el puerto de enlace telnet para probar si el puerto de servicio se puede vincular

  • Use clases de prueba para probar
/**
 * @description: todo
 * @author: lizz
 * @date: 2021/1/9 13:36
 */
import java.net.InetSocketAddress;
import java.util.List;


import com.alibaba.otter.canal.client.CanalConnectors;
import com.alibaba.otter.canal.client.CanalConnector;
import com.alibaba.otter.canal.protocol.Message;
import com.alibaba.otter.canal.protocol.CanalEntry.Column;
import com.alibaba.otter.canal.protocol.CanalEntry.Entry;
import com.alibaba.otter.canal.protocol.CanalEntry.EntryType;
import com.alibaba.otter.canal.protocol.CanalEntry.EventType;
import com.alibaba.otter.canal.protocol.CanalEntry.RowChange;
import com.alibaba.otter.canal.protocol.CanalEntry.RowData;


public class CanalClientSampleTest {


    public static void main(String args[]) {
        // 创建链接,配置链接参数
        CanalConnector connector = CanalConnectors.newSingleConnector(
                new InetSocketAddress("172.x.x.x",11111),
                "example", "", "");
        int batchSize = 1000;
        int emptyCount = 0;
        try {
            connector.connect();
            connector.subscribe(".*\\..*");
            connector.rollback();
            int totalEmptyCount = 120;
            while (emptyCount < totalEmptyCount) {
                Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据
                long batchId = message.getId();
                int size = message.getEntries().size();
                if (batchId == -1 || size == 0) {
                    emptyCount++;
                    System.out.println("empty count : " + emptyCount);
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException e) {
                    }
                } else {
                    emptyCount = 0;
                    // System.out.printf("message[batchId=%s,size=%s] \n", batchId, size);
                    printEntry(message.getEntries());
                }

                connector.ack(batchId); // 提交确认
                // connector.rollback(batchId); // 处理失败, 回滚数据
            }

            System.out.println("empty too many times, exit");
        } finally {
            connector.disconnect();
        }
    }

    private static void printEntry(List<Entry> entrys) {
        for (Entry entry : entrys) {
            if (entry.getEntryType() == EntryType.TRANSACTIONBEGIN || entry.getEntryType() == EntryType.TRANSACTIONEND) {
                continue;
            }

            RowChange rowChage = null;
            try {
                rowChage = RowChange.parseFrom(entry.getStoreValue());
            } catch (Exception e) {
                throw new RuntimeException("ERROR ## parser of eromanga-event has an error , data:" + entry.toString(),
                        e);
            }

            EventType eventType = rowChage.getEventType();
            System.out.println(String.format("================&gt; binlog[%s:%s] , name[%s,%s] , eventType : %s",
                    entry.getHeader().getLogfileName(), entry.getHeader().getLogfileOffset(),
                    entry.getHeader().getSchemaName(), entry.getHeader().getTableName(),
                    eventType));

            for (RowData rowData : rowChage.getRowDatasList()) {
                if (eventType == EventType.DELETE) {
                    printColumn(rowData.getBeforeColumnsList());
                } else if (eventType == EventType.INSERT) {
                    printColumn(rowData.getAfterColumnsList());
                } else {
                    System.out.println("-------&gt; before");
                    printColumn(rowData.getBeforeColumnsList());
                    System.out.println("-------&gt; after");
                    printColumn(rowData.getAfterColumnsList());
                }
            }
        }
    }

    private static void printColumn(List<Column> columns) {
        for (Column column : columns) {
            System.out.println(column.getName() + " : " + column.getValue() + "    update=" + column.getUpdated());
        }
    }

}

Después de ejecutar main e iniciar, modifique los datos en mysql para obtener el contenido modificado.

empty count : 1
empty count : 2
empty count : 3
empty count : 4
empty count : 5
#修改mysql中数据后触发输出
================&gt; binlog[mysql-bin.000001:3258] , name[test,t_user] , eventType : UPDATE
-------&gt; before
id : 5    update=false
username : 嘿嘿    update=false
sex : 1    update=false
phone : 138815828282    update=false
createtime : 2021-01-11 14:59:14    update=false
-------&gt; after
id : 5    update=false
username : 嘿嘿1    update=true
sex : 1    update=false
phone : 138815828282    update=false
createtime : 2021-01-11 15:06:11    update=true
empty count : 1
empty count : 2
empty count : 3

 

Supongo que te gusta

Origin blog.csdn.net/lizz861109/article/details/112369812
Recomendado
Clasificación