canal data, data structures, and

A, canal of message data format

1, the type of data packet

import com.alibaba.otter.canal.protocol.CanalEntry.Column;
import com.alibaba.otter.canal.protocol.CanalEntry.Entry;
import com.alibaba.otter.canal.protocol.CanalEntry.EntryType;
import com.alibaba.otter.canal.protocol.CanalEntry.EventType;
import com.alibaba.otter.canal.protocol.CanalEntry.RowChange;
import com.alibaba.otter.canal.protocol.CanalEntry.RowData;
import com.alibaba.otter.canal.protocol.Message;

 2, all the data show

Refer to: https://blog.csdn.net/weixin_41047933/article/details/85293002

canal using protobuff:

​
Entry=====> RowChange=====> Column

​

Entry
    Header
        logfileName [binlog文件名]
        logfileOffset [binlog position]
        executeTime [发生的变更]
        schemaName 
        tableName
        eventType [insert/update/delete类型]
    entryType   [事务头BEGIN/事务尾END/数据ROWDATA]
    storeValue  [byte数据,可展开,对应的类型为RowChange]    


RowChange
    isDdl       [是否是ddl变更操作,比如create table/drop table]
    sql     [具体的ddl sql]
    rowDatas    [具体insert/update/delete的变更数据,可为多条,1个binlog event事件可对应多条变更,比如批处理]
        beforeColumns [Column类型的数组]
        afterColumns [Column类型的数组]  

    
Column 
    index       
    sqlType     [jdbc type]
    name        [column name]
    isKey       [是否为主键]
    updated     [是否发生过变更]
    isNull      [值是否为null]
    value       [具体的内容,注意为文本]

3, data code logic (java)

 

import com.alibaba.otter.canal.client.CanalConnector;
import com.alibaba.otter.canal.client.CanalConnectors;
import com.alibaba.otter.canal.common.utils.AddressUtils;
import com.alibaba.otter.canal.protocol.Message;
import com.tank.RedisObserver;

import java.net.InetSocketAddress;


/**
 * @ClassName: RedisClient
 * @Description: TODO
 * @Author: ****
 * @Data: 2019/5/20 18:14
 * @Version: 1.0
 **/

public class RedisClient {
    public static void main(String args[]) {
        final String ip = "hadoop";
        final int port = 11111;
        final String destination = "demo2";
        final String username = "canal";
        final String password = "Canal2019!";
        InetSocketAddress socketAddress = new InetSocketAddress(ip, port);
        CanalConnector connector = CanalConnectors.newSingleConnector(socketAddress, destination, username, password);
        int batchSize = 100;
        try {
            connector.connect();
            connector.subscribe(destination + "\\..*");
            connector.rollback();

            while (true) {
                //获取canal的信息,也即日志文件的数据
                Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据
                long batchId = message.getId();
                int size = message.getEntries().size();
                if (batchId == -1 || size == 0) {
                    //System.out.println("未获取到数据!");

                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                } else {
                    System.out.println("处理数据,输出数据!");
                    //RedisObserver.printEntry(message.getEntries());
                    //RedisObserver.printColumn(message.getEntries());

                    //在这个地方写处理逻辑。。。。。处理message。。。。。。。。
                }
                connector.ack(batchId); // 提交确认
                // connector.rollback(batchId); // 处理失败, 回滚数据
            }

        } finally {
            connector.disconnect();
        }
    }
}
public static void showAllMessage(Message message){
       List<Entry> entries = message.getEntries();
       for(Entry entry:entries){
           Header header = entry.getHeader();
                header.getLogfileName();
                header.getLogfileOffset();
                header.getExecuteTime();
                header.getSchemaName();
                header.getEventType();
           EntryType entryType = entry.getEntryType();
           ByteString storeValue = entry.getStoreValue();
           try {
               RowChange rowChage = RowChange.parseFrom(entry.getStoreValue());
               List<RowData> rowDatas = rowChage.getRowDatasList();
               for(RowData rowData:rowDatas){
                   List<Column> afterColumns = rowData.getAfterColumnsList();//用于非delete操作
                   List<Column> beforeColumns = rowData.getBeforeColumnsList();//用于delete操作
                   for(Column afterColumn:afterColumns){
                       afterColumn.getIndex();
                       afterColumn.getMysqlType();
                       afterColumn.getName();
                       afterColumn.getIsKey();
                       afterColumn.getUpdated();
                       afterColumn.getIsNull();
                       afterColumn.getValue();
                   }
               }
           } catch (InvalidProtocolBufferException e) {
               e.printStackTrace();
           }

       }

    }

 

 

Guess you like

Origin blog.csdn.net/weixin_40126236/article/details/90407401