章节目录
1. 写一个测试客户端,采用流式应用的典型 “consume-transform-produce” 模式
config
spring:
kafka:
bootstrap-servers: localhost:9092
producer:
# 事务id前缀,有值即开启kafka事务
transaction-id-prefix: tx-kafka-
value-serializer: org.springframework.kafka.support.serializer.ToStringSerializer
consumer:
group-id: spring-kafka-evo-consumer-004
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
# 消费者事务级别:读已提交
isolation-level: READ_COMMITTED
@Configuration
@EnableKafka
public class KafkaConfig {
@Bean
public NewTopic transactionTopic1() {
return TopicBuilder.name("TRANSACTION-TOPIC-1").partitions(1).replicas(1).build();
}
@Bean
public NewTopic transactionTopic2() {
return TopicBuilder.name("TRANSACTION-TOPIC-2").partitions(1).replicas(1).build();
}
}
Controller
@GetMapping("/tx-two")
@Transactional(rollbackFor = Exception.class, transactionManager = "kafkaTransactionManager")
public String sendTransactionTwo(@RequestParam("message") String message) throws InterruptedException {
log.info("发送消息:{}", message);
senderService.sendTransactionTwo(message);
return "send transaction-two success...";
}
Service
@Service
@Slf4j
public class SenderService {
@Autowired
private KafkaTemplate<String, String> template;
@Autowired
private ProcessEventRepository processEventRepository;
/**
* 发布第一事件
*
* @param message 消息
*/
@Transactional(rollbackFor = Exception.class)
public void sendTransactionTwo(String message) {
final Iterable<ProcessEventEntity> all = processEventRepository.findAll();
log.info("1-验证数据库事务,查询数据库:{}", all);
// 发起第一个 TOPIC 的事务消息
final ListenableFuture<SendResult<String, String>> result = this.template.send(
TransactionTopicKafkaConsumerGroup.TOPIC, "第一事件:" + message);
// 发送消息回调
result.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
@Override
public void onFailure(Throwable ex) {
log.error("2-事务发送kafka消息异常回调", ex);
}
@Override
public void onSuccess(SendResult<String, String> result) {
log.info("2-事务发送kafka消息成功回调: {}", result);
}
});
}
/**
* 第一事件消费中发布第二事件
*
* @param message 消息
*/
@Transactional(rollbackFor = Exception.class)
public void doEventV1(String message) {
final Iterable<ProcessEventEntity> all = processEventRepository.findAll();
log.info("3-验证数据库事务,查询数据库:{}", all);
// 在消费者发起另一个 TOPIC 的事务消息
final ListenableFuture<SendResult<String, String>> result = this.template.send(
TransactionTopicKafkaConsumerGroupV2.TOPIC, "第二事件:" + message);
// 发送消息回调
result.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
@Override
public void onFailure(Throwable ex) {
log.error("4-事务发送kafka消息异常回调", ex);
}
@Override
public void onSuccess(SendResult<String, String> result) {
log.info("4-事务发送kafka消息成功回调: {}", result);
}
});
}
}
KafkaListener
第一事件消费者
@Component
@Slf4j
public class TransactionOneEventListener {
@Autowired
private SenderService senderService;
@KafkaListener(topicPartitions = {
@TopicPartition(topic = "TRANSACTION-TOPIC-1", partitions = "0")
})
@Transactional(rollbackFor = Exception.class, transactionManager = "kafkaTransactionManager")
public void listenEvent1(String value,
@Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
@Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition,
@Header(KafkaHeaders.OFFSET) long offset) {
log.info("listenEvent1:接收kafka消息:[{}],from {} @ {}@ {}", value, topic, partition, offset);
senderService.doEventV1(value);
}
}
第二事件消费者
@Component
@Slf4j
public class TransactionTwoEventListener {
@Autowired
private SenderService senderService;
@KafkaListener(topicPartitions = {
@TopicPartition(topic = "TRANSACTION-TOPIC-2", partitions = "0")
})
@Transactional(rollbackFor = Exception.class, transactionManager = "kafkaTransactionManager")
public void listenEvent2(String value,
@Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
@Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition,
@Header(KafkaHeaders.OFFSET) long offset) {
log.info("listenEvent2:接收kafka消息:[{}],from {} @ {}@ {}", value, topic, partition, offset);
}
}
2. 日志留档
下面不用看了,全都是长篇日志,是用来后续文章对比参照用的。
2.1. 启动日志
2.1.1. Kafka启动日志
关键是打印Kafka节点配置,用于后续源码阅读时查阅对比
关键日志:
Connecting to zookeeper
:连接到 ZookeeperClient environment
:Kafka节点的系统环境属性KafkaConfig values
:Kafka节点配置[GroupCoordinator
: 消费者组协调器启动,集群里每个节点都会有,所以启动时自动开启[TransactionCoordinator
:事务协调器启动,集群里每个节点都会有,所以启动时自动开启
19:34:05: Executing ':core:Kafka.main()'...
> Configure project :
Building project 'core' with Scala version 2.11.12
> Task :clients:compileJava UP-TO-DATE
> Task :clients:processResources NO-SOURCE
> Task :clients:classes UP-TO-DATE
> Task :clients:determineCommitId UP-TO-DATE
> Task :clients:createVersionFile
> Task :clients:jar UP-TO-DATE
> Task :core:compileJava NO-SOURCE
> Task :core:compileScala UP-TO-DATE
> Task :core:processResources UP-TO-DATE
> Task :core:classes UP-TO-DATE
Connected to the target VM, address: 'localhost:53292', transport: 'socket'
> Task :core:Kafka.main()
[2023-03-01 19:34:07,140] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$)
[2023-03-01 19:34:07,764] INFO starting (kafka.server.KafkaServer)
[2023-03-01 19:34:07,765] INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer)
[2023-03-01 19:34:07,789] INFO [ZooKeeperClient] Initializing a new session to localhost:2181. (kafka.zookeeper.ZooKeeperClient)
[2023-03-01 19:34:07,870] INFO Client environment:zookeeper.version=3.4.10-39d3a4f269333c922ed3db283be479f9deacaa0f, built on 03/23/2017 10:13 GMT (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,870] INFO Client environment:host.name=KIT-DP-A0010 (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,870] INFO Client environment:java.version=1.8.0_251 (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,874] INFO Client environment:java.vendor=Oracle Corporation (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,874] INFO Client environment:java.home=C:\Program Files\Java\jdk1.8.0_251\jre (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,874] INFO Client environment:java.class.path=F:\workbox\github\kafka\core\build\classes\java\main;F:\workbox\github\kafka\core\build\classes\scala\main;F:\workbox\github\kafka\core\build\resources\main;F:\workbox\github\kafka\clients\build\libs\kafka-clients-1.1.2-SNAPSHOT.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\com.fasterxml.jackson.core\jackson-databind\2.9.7\e6faad47abd3179666e89068485a1b88a195ceb7\jackson-databind-2.9.7.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\net.sf.jopt-simple\jopt-simple\5.0.4\4fdac2fbe92dfad86aa6e9301736f6b4342a3f5c\jopt-simple-5.0.4.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\com.yammer.metrics\metrics-core\2.2.0\f82c035cfa786d3cbec362c38c22a5f5b1bc8724\metrics-core-2.2.0.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\com.typesafe.scala-logging\scala-logging_2.11\3.8.0\24b01e596b348c5caa195e44f6e22c32dfdd0c84\scala-logging_2.11-3.8.0.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\org.scala-lang\scala-reflect\2.11.12\2bb23c13c527566d9828107ca4108be2a2c06f01\scala-reflect-2.11.12.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\org.scala-lang\scala-library\2.11.12\bf5534e6fec3d665bd6419c952a929a8bdd4b591\scala-library-2.11.12.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\com.101tec\zkclient\0.10\c54d4b5a5e89af75a80b6d5857400165ce5188d0\zkclient-0.10.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\org.apache.zookeeper\zookeeper\3.4.10\8eebdbb7a9df83e02eaa42d0e5da0b57bf2e4da\zookeeper-3.4.10.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\org.slf4j\slf4j-log4j12\1.7.25\110cefe2df103412849d72ef7a67e4e91e4266b4\slf4j-log4j12-1.7.25.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\org.slf4j\slf4j-api\1.7.25\da76ca59f6a57ee3102f8f9bd9cee742973efa8a\slf4j-api-1.7.25.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\org.lz4\lz4-java\1.4.1\ad89b11ac280a2992d65e078af06f6709f1fe2fc\lz4-java-1.4.1.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\org.xerial.snappy\snappy-java\1.1.7.1\d5190b41f3de61e3b83d692322d58630252bc8c3\snappy-java-1.1.7.1.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\com.fasterxml.jackson.core\jackson-annotations\2.9.0\7c10d545325e3a6e72e06381afe469fd40eb701\jackson-annotations-2.9.0.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\com.fasterxml.jackson.core\jackson-core\2.9.7\4b7f0e0dc527fab032e9800ed231080fdc3ac015\jackson-core-2.9.7.jar;C:\Users\KitmanLee\.gradle\caches\modules-2\files-2.1\log4j\log4j\1.2.17\5af35056b4d257e4b64b9e8069c0746e8b08629f\log4j-1.2.17.jar (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,874] INFO Client environment:java.library.path=C:\Program Files\Java\jdk1.8.0_251\bin;C:\Windows\Sun\Java\bin;C:\Windows\system32;C:\Windows;C:\Program Files\Python310\Scripts\;C:\Program Files\Python310\;C:\Program Files\Java\jdk1.8.0_251\bin\;C:\Program Files\Java\jdk1.8.0_251\jre\bin\;D:\Program Files (x86)\NetSarang\Xshell 6\;D:\Program Files (x86)\NetSarang\Xftp 6\;C:\Program Files (x86)\VMware\VMware Workstation\bin\;C:\Program Files (x86)\Common Files\Oracle\Java\javapath;D:\workSoft\apache-maven-3.6.3-bin\apache-maven-3.6.3\bin\;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Windows\System32\OpenSSH\;C:\Program Files\Git\cmd;C:\ProgramData\chocolatey\bin;C:\Program Files\PuTTY\;C:\Program Files\IDM Computer Solutions\UltraEdit;C:\Android;C:\Windows\System32;D:\Program Files (x86)\Namp\;C:\Program Files (x86)\scala\bin;C:\Program Files (x86)\gradle\gradle-8.0\bin;C:\Users\KitmanLee\AppData\Local\Microsoft\WindowsApps;;C:\Users\KitmanLee\AppData\Local\Programs\Microsoft VS Code\bin;C:\Program Files (x86)\Nmap;. (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,875] INFO Client environment:java.io.tmpdir=C:\Users\KITMAN~1\AppData\Local\Temp\ (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,875] INFO Client environment:java.compiler=<NA> (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,875] INFO Client environment:os.name=Windows 10 (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,876] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,876] INFO Client environment:os.version=10.0 (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,876] INFO Client environment:user.name=KitmanLee (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,876] INFO Client environment:user.home=C:\Users\KitmanLee (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,876] INFO Client environment:user.dir=F:\workbox\github\kafka (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:07,878] INFO Initiating client connection, connectString=localhost:2181 sessionTimeout=6000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@4e5ed836 (org.apache.zookeeper.ZooKeeper)
[2023-03-01 19:34:08,328] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient)
[2023-03-01 19:34:08,329] INFO Opening socket connection to server ieonline.microsoft.com/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn)
[2023-03-01 19:34:08,331] INFO Socket connection established to ieonline.microsoft.com/127.0.0.1:2181, initiating session (org.apache.zookeeper.ClientCnxn)
[2023-03-01 19:34:08,378] INFO Session establishment complete on server ieonline.microsoft.com/127.0.0.1:2181, sessionid = 0x100025967820000, negotiated timeout = 6000 (org.apache.zookeeper.ClientCnxn)
[2023-03-01 19:34:08,380] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient)
[2023-03-01 19:34:09,077] INFO Cluster ID = AAxocmN2QcaKSWyM8YbHtQ (kafka.server.KafkaServer)
[2023-03-01 19:34:09,079] WARN No meta.properties file under dir F:\tmp\kafka-logs\meta.properties (kafka.server.BrokerMetadataCheckpoint)
[2023-03-01 19:34:09,105] INFO KafkaConfig values:
advertised.host.name = null
advertised.listeners = null
advertised.port = null
alter.config.policy.class.name = null
alter.log.dirs.replication.quota.window.num = 11
alter.log.dirs.replication.quota.window.size.seconds = 1
authorizer.class.name =
auto.create.topics.enable = true
auto.leader.rebalance.enable = true
background.threads = 10
broker.id = 0
broker.id.generation.enable = true
broker.rack = null
compression.type = producer
connections.max.idle.ms = 600000
controlled.shutdown.enable = true
controlled.shutdown.max.retries = 3
controlled.shutdown.retry.backoff.ms = 5000
controller.socket.timeout.ms = 30000
create.topic.policy.class.name = null
default.replication.factor = 1
delegation.token.expiry.check.interval.ms = 3600000
delegation.token.expiry.time.ms = 86400000
delegation.token.master.key = null
delegation.token.max.lifetime.ms = 604800000
delete.records.purgatory.purge.interval.requests = 1
delete.topic.enable = true
fetch.purgatory.purge.interval.requests = 1000
group.initial.rebalance.delay.ms = 0
group.max.session.timeout.ms = 300000
group.min.session.timeout.ms = 6000
host.name =
inter.broker.listener.name = null
inter.broker.protocol.version = 1.1-IV0
leader.imbalance.check.interval.seconds = 300
leader.imbalance.per.broker.percentage = 10
listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
listeners = null
log.cleaner.backoff.ms = 15000
log.cleaner.dedupe.buffer.size = 134217728
log.cleaner.delete.retention.ms = 86400000
log.cleaner.enable = true
log.cleaner.io.buffer.load.factor = 0.9
log.cleaner.io.buffer.size = 524288
log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
log.cleaner.min.cleanable.ratio = 0.5
log.cleaner.min.compaction.lag.ms = 0
log.cleaner.threads = 1
log.cleanup.policy = [delete]
log.dir = /tmp/kafka-logs
log.dirs = /tmp/kafka-logs
log.flush.interval.messages = 9223372036854775807
log.flush.interval.ms = null
log.flush.offset.checkpoint.interval.ms = 60000
log.flush.scheduler.interval.ms = 9223372036854775807
log.flush.start.offset.checkpoint.interval.ms = 60000
log.index.interval.bytes = 4096
log.index.size.max.bytes = 10485760
log.message.format.version = 1.1-IV0
log.message.timestamp.difference.max.ms = 9223372036854775807
log.message.timestamp.type = CreateTime
log.preallocate = false
log.retention.bytes = -1
log.retention.check.interval.ms = 300000
log.retention.hours = 168
log.retention.minutes = null
log.retention.ms = null
log.roll.hours = 168
log.roll.jitter.hours = 0
log.roll.jitter.ms = null
log.roll.ms = null
log.segment.bytes = 1073741824
log.segment.delete.delay.ms = 60000
max.connections.per.ip = 2147483647
max.connections.per.ip.overrides =
max.incremental.fetch.session.cache.slots = 1000
message.max.bytes = 1000012
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
min.insync.replicas = 1
num.io.threads = 8
num.network.threads = 3
num.partitions = 1
num.recovery.threads.per.data.dir = 1
num.replica.alter.log.dirs.threads = null
num.replica.fetchers = 1
offset.metadata.max.bytes = 4096
offsets.commit.required.acks = -1
offsets.commit.timeout.ms = 5000
offsets.load.buffer.size = 5242880
offsets.retention.check.interval.ms = 600000
offsets.retention.minutes = 1440
offsets.topic.compression.codec = 0
offsets.topic.num.partitions = 50
offsets.topic.replication.factor = 1
offsets.topic.segment.bytes = 104857600
password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
password.encoder.iterations = 4096
password.encoder.key.length = 128
password.encoder.keyfactory.algorithm = null
password.encoder.old.secret = null
password.encoder.secret = null
port = 9092
principal.builder.class = null
producer.purgatory.purge.interval.requests = 1000
queued.max.request.bytes = -1
queued.max.requests = 500
quota.consumer.default = 9223372036854775807
quota.producer.default = 9223372036854775807
quota.window.num = 11
quota.window.size.seconds = 1
replica.fetch.backoff.ms = 1000
replica.fetch.max.bytes = 1048576
replica.fetch.min.bytes = 1
replica.fetch.response.max.bytes = 10485760
replica.fetch.wait.max.ms = 500
replica.high.watermark.checkpoint.interval.ms = 5000
replica.lag.time.max.ms = 10000
replica.socket.receive.buffer.bytes = 65536
replica.socket.timeout.ms = 30000
replication.quota.window.num = 11
replication.quota.window.size.seconds = 1
request.timeout.ms = 30000
reserved.broker.max.id = 1000
sasl.enabled.mechanisms = [GSSAPI]
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.principal.to.local.rules = [DEFAULT]
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.mechanism.inter.broker.protocol = GSSAPI
security.inter.broker.protocol = PLAINTEXT
socket.receive.buffer.bytes = 102400
socket.request.max.bytes = 104857600
socket.send.buffer.bytes = 102400
ssl.cipher.suites = []
ssl.client.auth = none
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
transaction.max.timeout.ms = 900000
transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
transaction.state.log.load.buffer.size = 5242880
transaction.state.log.min.isr = 1
transaction.state.log.num.partitions = 50
transaction.state.log.replication.factor = 1
transaction.state.log.segment.bytes = 104857600
transactional.id.expiration.ms = 604800000
unclean.leader.election.enable = false
zookeeper.connect = localhost:2181
zookeeper.connection.timeout.ms = 6000
zookeeper.max.in.flight.requests = 10
zookeeper.session.timeout.ms = 6000
zookeeper.set.acl = false
zookeeper.sync.time.ms = 2000
(kafka.server.KafkaConfig)
[2023-03-01 19:34:09,109] INFO KafkaConfig values:
advertised.host.name = null
advertised.listeners = null
advertised.port = null
alter.config.policy.class.name = null
alter.log.dirs.replication.quota.window.num = 11
alter.log.dirs.replication.quota.window.size.seconds = 1
authorizer.class.name =
auto.create.topics.enable = true
auto.leader.rebalance.enable = true
background.threads = 10
broker.id = 0
broker.id.generation.enable = true
broker.rack = null
compression.type = producer
connections.max.idle.ms = 600000
controlled.shutdown.enable = true
controlled.shutdown.max.retries = 3
controlled.shutdown.retry.backoff.ms = 5000
controller.socket.timeout.ms = 30000
create.topic.policy.class.name = null
default.replication.factor = 1
delegation.token.expiry.check.interval.ms = 3600000
delegation.token.expiry.time.ms = 86400000
delegation.token.master.key = null
delegation.token.max.lifetime.ms = 604800000
delete.records.purgatory.purge.interval.requests = 1
delete.topic.enable = true
fetch.purgatory.purge.interval.requests = 1000
group.initial.rebalance.delay.ms = 0
group.max.session.timeout.ms = 300000
group.min.session.timeout.ms = 6000
host.name =
inter.broker.listener.name = null
inter.broker.protocol.version = 1.1-IV0
leader.imbalance.check.interval.seconds = 300
leader.imbalance.per.broker.percentage = 10
listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
listeners = null
log.cleaner.backoff.ms = 15000
log.cleaner.dedupe.buffer.size = 134217728
log.cleaner.delete.retention.ms = 86400000
log.cleaner.enable = true
log.cleaner.io.buffer.load.factor = 0.9
log.cleaner.io.buffer.size = 524288
log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
log.cleaner.min.cleanable.ratio = 0.5
log.cleaner.min.compaction.lag.ms = 0
log.cleaner.threads = 1
log.cleanup.policy = [delete]
log.dir = /tmp/kafka-logs
log.dirs = /tmp/kafka-logs
log.flush.interval.messages = 9223372036854775807
log.flush.interval.ms = null
log.flush.offset.checkpoint.interval.ms = 60000
log.flush.scheduler.interval.ms = 9223372036854775807
log.flush.start.offset.checkpoint.interval.ms = 60000
log.index.interval.bytes = 4096
log.index.size.max.bytes = 10485760
log.message.format.version = 1.1-IV0
log.message.timestamp.difference.max.ms = 9223372036854775807
log.message.timestamp.type = CreateTime
log.preallocate = false
log.retention.bytes = -1
log.retention.check.interval.ms = 300000
log.retention.hours = 168
log.retention.minutes = null
log.retention.ms = null
log.roll.hours = 168
log.roll.jitter.hours = 0
log.roll.jitter.ms = null
log.roll.ms = null
log.segment.bytes = 1073741824
log.segment.delete.delay.ms = 60000
max.connections.per.ip = 2147483647
max.connections.per.ip.overrides =
max.incremental.fetch.session.cache.slots = 1000
message.max.bytes = 1000012
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
min.insync.replicas = 1
num.io.threads = 8
num.network.threads = 3
num.partitions = 1
num.recovery.threads.per.data.dir = 1
num.replica.alter.log.dirs.threads = null
num.replica.fetchers = 1
offset.metadata.max.bytes = 4096
offsets.commit.required.acks = -1
offsets.commit.timeout.ms = 5000
offsets.load.buffer.size = 5242880
offsets.retention.check.interval.ms = 600000
offsets.retention.minutes = 1440
offsets.topic.compression.codec = 0
offsets.topic.num.partitions = 50
offsets.topic.replication.factor = 1
offsets.topic.segment.bytes = 104857600
password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
password.encoder.iterations = 4096
password.encoder.key.length = 128
password.encoder.keyfactory.algorithm = null
password.encoder.old.secret = null
password.encoder.secret = null
port = 9092
principal.builder.class = null
producer.purgatory.purge.interval.requests = 1000
queued.max.request.bytes = -1
queued.max.requests = 500
quota.consumer.default = 9223372036854775807
quota.producer.default = 9223372036854775807
quota.window.num = 11
quota.window.size.seconds = 1
replica.fetch.backoff.ms = 1000
replica.fetch.max.bytes = 1048576
replica.fetch.min.bytes = 1
replica.fetch.response.max.bytes = 10485760
replica.fetch.wait.max.ms = 500
replica.high.watermark.checkpoint.interval.ms = 5000
replica.lag.time.max.ms = 10000
replica.socket.receive.buffer.bytes = 65536
replica.socket.timeout.ms = 30000
replication.quota.window.num = 11
replication.quota.window.size.seconds = 1
request.timeout.ms = 30000
reserved.broker.max.id = 1000
sasl.enabled.mechanisms = [GSSAPI]
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.principal.to.local.rules = [DEFAULT]
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.mechanism.inter.broker.protocol = GSSAPI
security.inter.broker.protocol = PLAINTEXT
socket.receive.buffer.bytes = 102400
socket.request.max.bytes = 104857600
socket.send.buffer.bytes = 102400
ssl.cipher.suites = []
ssl.client.auth = none
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
transaction.max.timeout.ms = 900000
transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
transaction.state.log.load.buffer.size = 5242880
transaction.state.log.min.isr = 1
transaction.state.log.num.partitions = 50
transaction.state.log.replication.factor = 1
transaction.state.log.segment.bytes = 104857600
transactional.id.expiration.ms = 604800000
unclean.leader.election.enable = false
zookeeper.connect = localhost:2181
zookeeper.connection.timeout.ms = 6000
zookeeper.max.in.flight.requests = 10
zookeeper.session.timeout.ms = 6000
zookeeper.set.acl = false
zookeeper.sync.time.ms = 2000
(kafka.server.KafkaConfig)
[2023-03-01 19:34:09,122] INFO [ThrottledRequestReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledRequestReaper)
[2023-03-01 19:34:09,122] INFO [ThrottledRequestReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledRequestReaper)
[2023-03-01 19:34:09,128] INFO [ThrottledRequestReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledRequestReaper)
[2023-03-01 19:34:09,146] INFO Loading logs. (kafka.log.LogManager)
[2023-03-01 19:34:09,150] INFO Logs loading complete in 4 ms. (kafka.log.LogManager)
[2023-03-01 19:34:09,155] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager)
[2023-03-01 19:34:09,156] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager)
[2023-03-01 19:34:09,345] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.Acceptor)
[2023-03-01 19:34:09,361] INFO [SocketServer brokerId=0] Started 1 acceptor threads (kafka.network.SocketServer)
[2023-03-01 19:34:09,370] INFO [ExpirationReaper-0-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2023-03-01 19:34:09,370] INFO [ExpirationReaper-0-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2023-03-01 19:34:09,371] INFO [ExpirationReaper-0-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2023-03-01 19:34:09,376] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler)
[2023-03-01 19:34:09,392] INFO Creating /brokers/ids/0 (is it secure? false) (kafka.zk.KafkaZkClient)
[2023-03-01 19:34:09,468] INFO Result of znode creation at /brokers/ids/0 is: OK (kafka.zk.KafkaZkClient)
[2023-03-01 19:34:09,468] INFO Registered broker 0 at path /brokers/ids/0 with addresses: ArrayBuffer(EndPoint(KIT-DP-A0010,9092,ListenerName(PLAINTEXT),PLAINTEXT)) (kafka.zk.KafkaZkClient)
[2023-03-01 19:34:09,469] WARN No meta.properties file under dir F:\tmp\kafka-logs\meta.properties (kafka.server.BrokerMetadataCheckpoint)
[2023-03-01 19:34:09,527] INFO [ExpirationReaper-0-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2023-03-01 19:34:09,528] INFO [ExpirationReaper-0-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2023-03-01 19:34:09,528] INFO [ExpirationReaper-0-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper)
[2023-03-01 19:34:09,530] INFO Creating /controller (is it secure? false) (kafka.zk.KafkaZkClient)
[2023-03-01 19:34:09,561] INFO Result of znode creation at /controller is: OK (kafka.zk.KafkaZkClient)
[2023-03-01 19:34:09,563] INFO [GroupCoordinator 0]: Starting up. (kafka.coordinator.group.GroupCoordinator)
[2023-03-01 19:34:09,564] INFO [GroupCoordinator 0]: Startup complete. (kafka.coordinator.group.GroupCoordinator)
[2023-03-01 19:34:09,564] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 19:34:09,659] INFO [ProducerId Manager 0]: Acquired new producerId block (brokerId:0,blockStartProducerId:0,blockEndProducerId:999) by writing to Zk with path version 1 (kafka.coordinator.transaction.ProducerIdManager)
[2023-03-01 19:34:09,671] INFO [TransactionCoordinator id=0] Starting up. (kafka.coordinator.transaction.TransactionCoordinator)
[2023-03-01 19:34:09,672] INFO [Transaction Marker Channel Manager 0]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager)
[2023-03-01 19:34:09,672] INFO [TransactionCoordinator id=0] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator)
[2023-03-01 19:34:09,718] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread)
[2023-03-01 19:34:09,752] INFO [SocketServer brokerId=0] Started processors for 1 acceptors (kafka.network.SocketServer)
[2023-03-01 19:34:09,753] INFO Kafka version : 1.1.2-SNAPSHOT (org.apache.kafka.common.utils.AppInfoParser)
[2023-03-01 19:34:09,753] INFO Kafka commitId : cd15202179f339c4 (org.apache.kafka.common.utils.AppInfoParser)
[2023-03-01 19:34:09,754] INFO [KafkaServer id=0] started (kafka.server.KafkaServer)
2.1.2. 客户端启动日志
客户端启动时,会自动连接 Kafka 并进行主题、分区、消费者组等配置信息进行注册(初始化)操作
关键日志:
Adding transactional
:通过动态代理的方式,给每个@Transactional
注解的方法加上事务处理程序AdminClientConfig values
:客户端配置ConsumerConfig values
:消费者配置Discovered group coordinator
:发现消费者组协调器,此时发起FindCoordinatorRequest
请求Found no committed offset for partition
:消费者初次启动时,在 Kafka 服务端没有任务提交记录Resetting offset for partition
:消费者初次启动时,重置消费者在对应分区的提交偏移量为0
Connected to the target VM, address: '127.0.0.1:55070', transport: 'socket'
20:04:21.937 [Thread-0] DEBUG org.springframework.boot.devtools.restart.classloader.RestartClassLoader - Created RestartClassLoader org.springframework.boot.devtools.restart.classloader.RestartClassLoader@55dae2c6
. ____ _ __ _ _
/\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \
( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
\\/ ___)| |_)| | | | | || (_| | ) ) ) )
' |____| .__|_| |_|_| |_\__, | / / / /
=========|_|==============|___/=/_/_/_/
:: Spring Boot :: (v2.7.4)
2023-03-01 20:04:22.702 INFO 14708 --- [ restartedMain] c.l.p.e.kafka.SpringKafkaEvoApplication : Starting SpringKafkaEvoApplication using Java 1.8.0_251 on KIT-DP-A0010 with PID 14708 (F:\workbox\termux\spring-kafka-evo\target\classes started by KitmanLee in F:\workbox\termux\spring-kafka-evo)
2023-03-01 20:04:22.704 INFO 14708 --- [ restartedMain] c.l.p.e.kafka.SpringKafkaEvoApplication : No active profile set, falling back to 1 default profile: "default"
2023-03-01 20:04:22.799 INFO 14708 --- [ restartedMain] .e.DevToolsPropertyDefaultsPostProcessor : Devtools property defaults active! Set 'spring.devtools.add-properties' to 'false' to disable
2023-03-01 20:04:22.799 INFO 14708 --- [ restartedMain] .e.DevToolsPropertyDefaultsPostProcessor : For additional web related logging consider setting the 'logging.level.web' property to 'DEBUG'
2023-03-01 20:04:24.807 INFO 14708 --- [ restartedMain] .s.d.r.c.RepositoryConfigurationDelegate : Multiple Spring Data modules found, entering strict repository configuration mode
2023-03-01 20:04:24.810 INFO 14708 --- [ restartedMain] .s.d.r.c.RepositoryConfigurationDelegate : Bootstrapping Spring Data JPA repositories in DEFAULT mode.
2023-03-01 20:04:24.898 INFO 14708 --- [ restartedMain] .s.d.r.c.RepositoryConfigurationDelegate : Finished Spring Data repository scanning in 76 ms. Found 3 JPA repository interfaces.
2023-03-01 20:04:24.915 INFO 14708 --- [ restartedMain] .s.d.r.c.RepositoryConfigurationDelegate : Multiple Spring Data modules found, entering strict repository configuration mode
2023-03-01 20:04:24.917 INFO 14708 --- [ restartedMain] .s.d.r.c.RepositoryConfigurationDelegate : Bootstrapping Spring Data Redis repositories in DEFAULT mode.
2023-03-01 20:04:24.936 INFO 14708 --- [ restartedMain] .RepositoryConfigurationExtensionSupport : Spring Data Redis - Could not safely identify store assignment for repository candidate interface com.leekitman.pangea.evolution.kafka.dao.ProcessEventRepository; If you want this repository to be a Redis repository, consider annotating your entities with one of these annotations: org.springframework.data.redis.core.RedisHash (preferred), or consider extending one of the following types with your repository: org.springframework.data.keyvalue.repository.KeyValueRepository
2023-03-01 20:04:24.938 INFO 14708 --- [ restartedMain] .RepositoryConfigurationExtensionSupport : Spring Data Redis - Could not safely identify store assignment for repository candidate interface com.leekitman.pangea.evolution.kafka.dao.SchedulingTaskRepository; If you want this repository to be a Redis repository, consider annotating your entities with one of these annotations: org.springframework.data.redis.core.RedisHash (preferred), or consider extending one of the following types with your repository: org.springframework.data.keyvalue.repository.KeyValueRepository
2023-03-01 20:04:24.938 INFO 14708 --- [ restartedMain] .RepositoryConfigurationExtensionSupport : Spring Data Redis - Could not safely identify store assignment for repository candidate interface com.leekitman.pangea.evolution.kafka.dao.TenantRepository; If you want this repository to be a Redis repository, consider annotating your entities with one of these annotations: org.springframework.data.redis.core.RedisHash (preferred), or consider extending one of the following types with your repository: org.springframework.data.keyvalue.repository.KeyValueRepository
2023-03-01 20:04:24.938 INFO 14708 --- [ restartedMain] .s.d.r.c.RepositoryConfigurationDelegate : Finished Spring Data repository scanning in 11 ms. Found 0 Redis repository interfaces.
2023-03-01 20:04:26.085 INFO 14708 --- [ restartedMain] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat initialized with port(s): 8080 (http)
2023-03-01 20:04:26.097 INFO 14708 --- [ restartedMain] o.apache.catalina.core.StandardService : Starting service [Tomcat]
2023-03-01 20:04:26.097 INFO 14708 --- [ restartedMain] org.apache.catalina.core.StandardEngine : Starting Servlet engine: [Apache Tomcat/9.0.65]
2023-03-01 20:04:26.187 INFO 14708 --- [ restartedMain] o.a.c.c.C.[Tomcat].[localhost].[/] : Initializing Spring embedded WebApplicationContext
2023-03-01 20:04:26.187 INFO 14708 --- [ restartedMain] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 3387 ms
2023-03-01 20:04:26.225 INFO 14708 --- [ restartedMain] com.zaxxer.hikari.HikariDataSource : HikariPool-1 - Starting...
2023-03-01 20:04:26.499 INFO 14708 --- [ restartedMain] com.zaxxer.hikari.HikariDataSource : HikariPool-1 - Start completed.
2023-03-01 20:04:26.512 INFO 14708 --- [ restartedMain] o.s.b.a.h2.H2ConsoleAutoConfiguration : H2 console available at '/h2-console'. Database available at 'jdbc:h2:mem:3c1e605b-1fc2-4278-9533-d8453cc929a0'
2023-03-01 20:04:26.703 INFO 14708 --- [ restartedMain] o.hibernate.jpa.internal.util.LogHelper : HHH000204: Processing PersistenceUnitInfo [name: default]
2023-03-01 20:04:26.761 INFO 14708 --- [ restartedMain] org.hibernate.Version : HHH000412: Hibernate ORM core version 5.6.11.Final
2023-03-01 20:04:26.926 INFO 14708 --- [ restartedMain] o.hibernate.annotations.common.Version : HCANN000001: Hibernate Commons Annotations {5.1.2.Final}
2023-03-01 20:04:27.085 INFO 14708 --- [ restartedMain] org.hibernate.dialect.Dialect : HHH000400: Using dialect: org.hibernate.dialect.H2Dialect
Hibernate: drop table if exists process_event CASCADE
Hibernate: drop table if exists scheduling_task CASCADE
Hibernate: drop table if exists tenant CASCADE
Hibernate: create table process_event (id varchar(255) not null, exception varchar(255), name varchar(255), status integer, tenant_id varchar(255), topic varchar(255), primary key (id))
Hibernate: create table scheduling_task (id varchar(255) not null, assessed_emp_names varchar(255), event_id varchar(255), status integer, tenant_id varchar(255), title varchar(255), primary key (id))
Hibernate: create table tenant (id varchar(255) not null, name varchar(255), primary key (id))
2023-03-01 20:04:27.620 INFO 14708 --- [ restartedMain] o.h.t.schema.internal.SchemaCreatorImpl : HHH000476: Executing import script 'file:/F:/workbox/termux/spring-kafka-evo/target/classes/import.sql'
Hibernate: INSERT INTO process_event( id, topic, name, status, tenant_id ) VALUES('111', 'PLAN_NODE_FINISHED','计划节点结束111', 0, '111111111111')
Hibernate: INSERT INTO process_event( id, topic, name, status, tenant_id ) VALUES('222', 'PLAN_NODE_FINISHED','计划节点结束222', 0, '222222222222')
Hibernate: INSERT INTO process_event( id, topic, name, status, tenant_id ) VALUES('333', 'PLAN_NODE_FINISHED','计划节点结束333', 0, '333333333333')
Hibernate: INSERT INTO tenant( id, name ) VALUES('111111111111', '微软')
Hibernate: INSERT INTO tenant( id, name ) VALUES('222222222222', '百度')
Hibernate: INSERT INTO tenant( id, name ) VALUES('333333333333', '新浪')
2023-03-01 20:04:27.626 INFO 14708 --- [ restartedMain] o.h.e.t.j.p.i.JtaPlatformInitiator : HHH000490: Using JtaPlatform implementation: [org.hibernate.engine.transaction.jta.platform.internal.NoJtaPlatform]
2023-03-01 20:04:27.634 INFO 14708 --- [ restartedMain] j.LocalContainerEntityManagerFactoryBean : Initialized JPA EntityManagerFactory for persistence unit 'default'
2023-03-01 20:04:27.726 INFO 14708 --- [ restartedMain] c.l.p.e.kafka.config.BeanObserveConfig : 有几个事务管理器=2
2023-03-01 20:04:28.287 TRACE 14708 --- [ restartedMain] t.a.AnnotationTransactionAttributeSource : Adding transactional method 'com.leekitman.pangea.evolution.kafka.service.SenderService.doEventV1' with attribute: PROPAGATION_REQUIRED,ISOLATION_DEFAULT,-java.lang.Exception
2023-03-01 20:04:28.296 TRACE 14708 --- [ restartedMain] t.a.AnnotationTransactionAttributeSource : Adding transactional method 'com.leekitman.pangea.evolution.kafka.service.SenderService.doEventV2' with attribute: PROPAGATION_REQUIRED,ISOLATION_DEFAULT,-java.lang.Exception
2023-03-01 20:04:28.296 TRACE 14708 --- [ restartedMain] t.a.AnnotationTransactionAttributeSource : Adding transactional method 'com.leekitman.pangea.evolution.kafka.service.SenderService.sendTransactionTwo' with attribute: PROPAGATION_REQUIRED,ISOLATION_DEFAULT,-java.lang.Exception
2023-03-01 20:04:28.304 TRACE 14708 --- [ restartedMain] t.a.AnnotationTransactionAttributeSource : Adding transactional method 'com.leekitman.pangea.evolution.kafka.consumer.TransactionOneEventListener.listenEvent1' with attribute: PROPAGATION_REQUIRED,ISOLATION_DEFAULT; 'kafkaTransactionManager',-java.lang.Exception
2023-03-01 20:04:28.343 TRACE 14708 --- [ restartedMain] t.a.AnnotationTransactionAttributeSource : Adding transactional method 'com.leekitman.pangea.evolution.kafka.service.SchedulingTaskService.doTask' with attribute: PROPAGATION_REQUIRED,ISOLATION_DEFAULT,-java.lang.Exception
2023-03-01 20:04:28.344 TRACE 14708 --- [ restartedMain] t.a.AnnotationTransactionAttributeSource : Adding transactional method 'com.leekitman.pangea.evolution.kafka.service.SchedulingTaskService.generateNodeStartTask' with attribute: PROPAGATION_REQUIRED,ISOLATION_DEFAULT,-java.lang.Exception
2023-03-01 20:04:28.391 TRACE 14708 --- [ restartedMain] t.a.AnnotationTransactionAttributeSource : Adding transactional method 'com.leekitman.pangea.evolution.kafka.service.ProcessEventService.doEvent' with attribute: PROPAGATION_REQUIRED,ISOLATION_DEFAULT,-java.lang.Exception
2023-03-01 20:04:28.392 TRACE 14708 --- [ restartedMain] t.a.AnnotationTransactionAttributeSource : Adding transactional method 'com.leekitman.pangea.evolution.kafka.service.ProcessEventService.dlt' with attribute: PROPAGATION_REQUIRED,ISOLATION_DEFAULT,-java.lang.Exception
2023-03-01 20:04:28.405 TRACE 14708 --- [ restartedMain] t.a.AnnotationTransactionAttributeSource : Adding transactional method 'com.leekitman.pangea.evolution.kafka.controller.SenderController.sendTransactionTwo' with attribute: PROPAGATION_REQUIRED,ISOLATION_DEFAULT; 'kafkaTransactionManager',-java.lang.Exception
2023-03-01 20:04:28.405 TRACE 14708 --- [ restartedMain] t.a.AnnotationTransactionAttributeSource : Adding transactional method 'com.leekitman.pangea.evolution.kafka.controller.SenderController.sendTransactionOne' with attribute: PROPAGATION_REQUIRED,ISOLATION_DEFAULT,-java.lang.Exception
2023-03-01 20:04:28.542 WARN 14708 --- [ restartedMain] JpaBaseConfiguration$JpaWebConfiguration : spring.jpa.open-in-view is enabled by default. Therefore, database queries may be performed during view rendering. Explicitly configure spring.jpa.open-in-view to disable this warning
2023-03-01 20:04:28.665 INFO 14708 --- [ restartedMain] o.s.b.a.w.s.WelcomePageHandlerMapping : Adding welcome page template: index
2023-03-01 20:04:29.080 INFO 14708 --- [ restartedMain] o.s.b.d.a.OptionalLiveReloadServer : LiveReload server is running on port 35729
2023-03-01 20:04:29.136 INFO 14708 --- [ restartedMain] o.a.k.clients.admin.AdminClientConfig : AdminClientConfig values:
bootstrap.servers = [localhost:9092]
client.dns.lookup = use_all_dns_ips
client.id =
connections.max.idle.ms = 300000
default.api.timeout.ms = 60000
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
receive.buffer.bytes = 65536
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retries = 2147483647
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.connect.timeout.ms = null
sasl.login.read.timeout.ms = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.login.retry.backoff.max.ms = 10000
sasl.login.retry.backoff.ms = 100
sasl.mechanism = GSSAPI
sasl.oauthbearer.clock.skew.seconds = 30
sasl.oauthbearer.expected.audience = null
sasl.oauthbearer.expected.issuer = null
sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
sasl.oauthbearer.jwks.endpoint.url = null
sasl.oauthbearer.scope.claim.name = scope
sasl.oauthbearer.sub.claim.name = sub
sasl.oauthbearer.token.endpoint.url = null
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
socket.connection.setup.timeout.max.ms = 30000
socket.connection.setup.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLSv1.2
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
2023-03-01 20:04:29.196 WARN 14708 --- [ restartedMain] o.a.k.clients.admin.AdminClientConfig : The configuration 'max.poll.interval.ms' was supplied but isn't a known config.
2023-03-01 20:04:29.197 INFO 14708 --- [ restartedMain] o.a.kafka.common.utils.AppInfoParser : Kafka version: 3.1.2
2023-03-01 20:04:29.197 INFO 14708 --- [ restartedMain] o.a.kafka.common.utils.AppInfoParser : Kafka commitId: f8c67dc3ae0a3265
2023-03-01 20:04:29.197 INFO 14708 --- [ restartedMain] o.a.kafka.common.utils.AppInfoParser : Kafka startTimeMs: 1677672269196
2023-03-01 20:04:30.071 INFO 14708 --- [| adminclient-1] o.a.kafka.common.utils.AppInfoParser : App info kafka.admin.client for adminclient-1 unregistered
2023-03-01 20:04:30.074 INFO 14708 --- [| adminclient-1] org.apache.kafka.common.metrics.Metrics : Metrics scheduler closed
2023-03-01 20:04:30.074 INFO 14708 --- [| adminclient-1] org.apache.kafka.common.metrics.Metrics : Closing reporter org.apache.kafka.common.metrics.JmxReporter
2023-03-01 20:04:30.075 INFO 14708 --- [| adminclient-1] org.apache.kafka.common.metrics.Metrics : Metrics reporters closed
2023-03-01 20:04:30.095 INFO 14708 --- [ restartedMain] o.a.k.clients.consumer.ConsumerConfig : ConsumerConfig values:
allow.auto.create.topics = true
auto.commit.interval.ms = 5000
auto.offset.reset = latest
bootstrap.servers = [localhost:9092]
check.crcs = true
client.dns.lookup = use_all_dns_ips
client.id = consumer-spring-kafka-evo-consumer-004-1
client.rack =
connections.max.idle.ms = 540000
default.api.timeout.ms = 60000
enable.auto.commit = false
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = spring-kafka-evo-consumer-004
group.instance.id = null
heartbeat.interval.ms = 3000
interceptor.classes = []
internal.leave.group.on.close = true
internal.throw.on.fetch.stable.offset.unsupported = false
isolation.level = read_committed
key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 10000
max.poll.records = 500
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
receive.buffer.bytes = 65536
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.connect.timeout.ms = null
sasl.login.read.timeout.ms = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.login.retry.backoff.max.ms = 10000
sasl.login.retry.backoff.ms = 100
sasl.mechanism = GSSAPI
sasl.oauthbearer.clock.skew.seconds = 30
sasl.oauthbearer.expected.audience = null
sasl.oauthbearer.expected.issuer = null
sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
sasl.oauthbearer.jwks.endpoint.url = null
sasl.oauthbearer.scope.claim.name = scope
sasl.oauthbearer.sub.claim.name = sub
sasl.oauthbearer.token.endpoint.url = null
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
session.timeout.ms = 45000
socket.connection.setup.timeout.max.ms = 30000
socket.connection.setup.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLSv1.2
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
2023-03-01 20:04:30.124 INFO 14708 --- [ restartedMain] o.a.kafka.common.utils.AppInfoParser : Kafka version: 3.1.2
2023-03-01 20:04:30.124 INFO 14708 --- [ restartedMain] o.a.kafka.common.utils.AppInfoParser : Kafka commitId: f8c67dc3ae0a3265
2023-03-01 20:04:30.124 INFO 14708 --- [ restartedMain] o.a.kafka.common.utils.AppInfoParser : Kafka startTimeMs: 1677672270124
2023-03-01 20:04:30.125 INFO 14708 --- [ restartedMain] o.a.k.clients.consumer.KafkaConsumer : [Consumer clientId=consumer-spring-kafka-evo-consumer-004-1, groupId=spring-kafka-evo-consumer-004] Subscribed to partition(s): TRANSACTION-TOPIC-1-0
2023-03-01 20:04:30.132 INFO 14708 --- [ restartedMain] o.a.k.clients.consumer.ConsumerConfig : ConsumerConfig values:
allow.auto.create.topics = true
auto.commit.interval.ms = 5000
auto.offset.reset = latest
bootstrap.servers = [localhost:9092]
check.crcs = true
client.dns.lookup = use_all_dns_ips
client.id = consumer-spring-kafka-evo-consumer-004-2
client.rack =
connections.max.idle.ms = 540000
default.api.timeout.ms = 60000
enable.auto.commit = false
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = spring-kafka-evo-consumer-004
group.instance.id = null
heartbeat.interval.ms = 3000
interceptor.classes = []
internal.leave.group.on.close = true
internal.throw.on.fetch.stable.offset.unsupported = false
isolation.level = read_committed
key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 10000
max.poll.records = 500
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor, class org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
receive.buffer.bytes = 65536
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.connect.timeout.ms = null
sasl.login.read.timeout.ms = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.login.retry.backoff.max.ms = 10000
sasl.login.retry.backoff.ms = 100
sasl.mechanism = GSSAPI
sasl.oauthbearer.clock.skew.seconds = 30
sasl.oauthbearer.expected.audience = null
sasl.oauthbearer.expected.issuer = null
sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
sasl.oauthbearer.jwks.endpoint.url = null
sasl.oauthbearer.scope.claim.name = scope
sasl.oauthbearer.sub.claim.name = sub
sasl.oauthbearer.token.endpoint.url = null
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
session.timeout.ms = 45000
socket.connection.setup.timeout.max.ms = 30000
socket.connection.setup.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLSv1.2
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
2023-03-01 20:04:30.136 INFO 14708 --- [ restartedMain] o.a.kafka.common.utils.AppInfoParser : Kafka version: 3.1.2
2023-03-01 20:04:30.137 INFO 14708 --- [ restartedMain] o.a.kafka.common.utils.AppInfoParser : Kafka commitId: f8c67dc3ae0a3265
2023-03-01 20:04:30.137 INFO 14708 --- [ restartedMain] o.a.kafka.common.utils.AppInfoParser : Kafka startTimeMs: 1677672270136
2023-03-01 20:04:30.137 INFO 14708 --- [ restartedMain] o.a.k.clients.consumer.KafkaConsumer : [Consumer clientId=consumer-spring-kafka-evo-consumer-004-2, groupId=spring-kafka-evo-consumer-004] Subscribed to partition(s): TRANSACTION-TOPIC-2-0
2023-03-01 20:04:30.148 INFO 14708 --- [ restartedMain] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat started on port(s): 8080 (http) with context path ''
2023-03-01 20:04:30.157 INFO 14708 --- [ntainer#0-0-C-1] org.apache.kafka.clients.Metadata : [Consumer clientId=consumer-spring-kafka-evo-consumer-004-1, groupId=spring-kafka-evo-consumer-004] Cluster ID: KtSoJctvTiyoVntH9E-rzA
2023-03-01 20:04:30.157 INFO 14708 --- [ntainer#1-0-C-1] org.apache.kafka.clients.Metadata : [Consumer clientId=consumer-spring-kafka-evo-consumer-004-2, groupId=spring-kafka-evo-consumer-004] Cluster ID: KtSoJctvTiyoVntH9E-rzA
2023-03-01 20:04:30.161 INFO 14708 --- [ restartedMain] c.l.p.e.kafka.SpringKafkaEvoApplication : Started SpringKafkaEvoApplication in 8.211 seconds (JVM running for 9.466)
2023-03-01 20:04:34.600 INFO 14708 --- [ntainer#0-0-C-1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-spring-kafka-evo-consumer-004-1, groupId=spring-kafka-evo-consumer-004] Discovered group coordinator KIT-DP-A0010:9092 (id: 2147483647 rack: null)
2023-03-01 20:04:34.607 INFO 14708 --- [ntainer#0-0-C-1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-spring-kafka-evo-consumer-004-1, groupId=spring-kafka-evo-consumer-004] Found no committed offset for partition TRANSACTION-TOPIC-1-0
2023-03-01 20:04:34.613 INFO 14708 --- [ntainer#0-0-C-1] o.a.k.c.c.internals.SubscriptionState : [Consumer clientId=consumer-spring-kafka-evo-consumer-004-1, groupId=spring-kafka-evo-consumer-004] Resetting offset for partition TRANSACTION-TOPIC-1-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[KIT-DP-A0010:9092 (id: 0 rack: null)], epoch=absent}}.
2023-03-01 20:04:34.614 INFO 14708 --- [ntainer#1-0-C-1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-spring-kafka-evo-consumer-004-2, groupId=spring-kafka-evo-consumer-004] Discovered group coordinator KIT-DP-A0010:9092 (id: 2147483647 rack: null)
2023-03-01 20:04:34.616 INFO 14708 --- [ntainer#1-0-C-1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-spring-kafka-evo-consumer-004-2, groupId=spring-kafka-evo-consumer-004] Found no committed offset for partition TRANSACTION-TOPIC-2-0
2023-03-01 20:04:34.617 INFO 14708 --- [ntainer#1-0-C-1] o.a.k.c.c.internals.SubscriptionState : [Consumer clientId=consumer-spring-kafka-evo-consumer-004-2, groupId=spring-kafka-evo-consumer-004] Resetting offset for partition TRANSACTION-TOPIC-2-0 to position FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[KIT-DP-A0010:9092 (id: 0 rack: null)], epoch=absent}}.
2.1.3. 客户端启动时Kafka日志
关键日志:
Topic creation Map
:创建主题及分区,包括客户自定义主题以及Kafka内部主题Replica loaded for partition
:给每个分区分配副本节点starts at Leader
:确定分区副本的 leader 节点Created log for partition
:创建分区日志, 可以根据具体日志打开物理目录:~\tmp\kafka-logs
查看创建好的分区日志文件,并用kafka.tools.DumpLogSegments
程序进行物理文件解码查看日志内容
[2023-03-01 20:04:29,576] INFO Topic creation Map(TRANSACTION-TOPIC-2-0 -> ArrayBuffer(0)) (kafka.zk.AdminZkClient)
[2023-03-01 20:04:29,692] INFO Topic creation Map(TRANSACTION-TOPIC-1-0 -> ArrayBuffer(0)) (kafka.zk.AdminZkClient)
[2023-03-01 20:04:29,804] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions TRANSACTION-TOPIC-2-0 (kafka.server.ReplicaFetcherManager)
[2023-03-01 20:04:29,875] INFO [Log partition=TRANSACTION-TOPIC-2-0, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:29,878] INFO [Log partition=TRANSACTION-TOPIC-2-0, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 29 ms (kafka.log.Log)
[2023-03-01 20:04:29,879] INFO Created log for partition TRANSACTION-TOPIC-2-0 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:29,879] INFO [Partition TRANSACTION-TOPIC-2-0 broker=0] No checkpointed highwatermark is found for partition TRANSACTION-TOPIC-2-0 (kafka.cluster.Partition)
[2023-03-01 20:04:29,880] INFO Replica loaded for partition TRANSACTION-TOPIC-2-0 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:29,881] INFO [Partition TRANSACTION-TOPIC-2-0 broker=0] TRANSACTION-TOPIC-2-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:29,949] INFO [ReplicaAlterLogDirsManager on broker 0] Added fetcher for partitions List() (kafka.server.ReplicaAlterLogDirsManager)
[2023-03-01 20:04:29,953] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions TRANSACTION-TOPIC-1-0 (kafka.server.ReplicaFetcherManager)
[2023-03-01 20:04:29,967] INFO [Log partition=TRANSACTION-TOPIC-1-0, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:29,968] INFO [Log partition=TRANSACTION-TOPIC-1-0, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 13 ms (kafka.log.Log)
[2023-03-01 20:04:29,968] INFO Created log for partition TRANSACTION-TOPIC-1-0 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> [delete], flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 1073741824, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:30,009] INFO [Partition TRANSACTION-TOPIC-1-0 broker=0] No checkpointed highwatermark is found for partition TRANSACTION-TOPIC-1-0 (kafka.cluster.Partition)
[2023-03-01 20:04:30,009] INFO Replica loaded for partition TRANSACTION-TOPIC-1-0 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:30,009] INFO [Partition TRANSACTION-TOPIC-1-0 broker=0] TRANSACTION-TOPIC-1-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:30,066] INFO [ReplicaAlterLogDirsManager on broker 0] Added fetcher for partitions List() (kafka.server.ReplicaAlterLogDirsManager)
[2023-03-01 20:04:30,224] INFO Topic creation Map(__consumer_offsets-22 -> ArrayBuffer(0), __consumer_offsets-30 -> ArrayBuffer(0), __consumer_offsets-8 -> ArrayBuffer(0), __consumer_offsets-21 -> ArrayBuffer(0), __consumer_offsets-4 -> ArrayBuffer(0), __consumer_offsets-27 -> ArrayBuffer(0), __consumer_offsets-7 -> ArrayBuffer(0), __consumer_offsets-9 -> ArrayBuffer(0), __consumer_offsets-46 -> ArrayBuffer(0), __consumer_offsets-25 -> ArrayBuffer(0), __consumer_offsets-35 -> ArrayBuffer(0), __consumer_offsets-41 -> ArrayBuffer(0), __consumer_offsets-33 -> ArrayBuffer(0), __consumer_offsets-23 -> ArrayBuffer(0), __consumer_offsets-49 -> ArrayBuffer(0), __consumer_offsets-47 -> ArrayBuffer(0), __consumer_offsets-16 -> ArrayBuffer(0), __consumer_offsets-28 -> ArrayBuffer(0), __consumer_offsets-31 -> ArrayBuffer(0), __consumer_offsets-36 -> ArrayBuffer(0), __consumer_offsets-42 -> ArrayBuffer(0), __consumer_offsets-3 -> ArrayBuffer(0), __consumer_offsets-18 -> ArrayBuffer(0), __consumer_offsets-37 -> ArrayBuffer(0), __consumer_offsets-15 -> ArrayBuffer(0), __consumer_offsets-24 -> ArrayBuffer(0), __consumer_offsets-38 -> ArrayBuffer(0), __consumer_offsets-17 -> ArrayBuffer(0), __consumer_offsets-48 -> ArrayBuffer(0), __consumer_offsets-19 -> ArrayBuffer(0), __consumer_offsets-11 -> ArrayBuffer(0), __consumer_offsets-13 -> ArrayBuffer(0), __consumer_offsets-2 -> ArrayBuffer(0), __consumer_offsets-43 -> ArrayBuffer(0), __consumer_offsets-6 -> ArrayBuffer(0), __consumer_offsets-14 -> ArrayBuffer(0), __consumer_offsets-20 -> ArrayBuffer(0), __consumer_offsets-0 -> ArrayBuffer(0), __consumer_offsets-44 -> ArrayBuffer(0), __consumer_offsets-39 -> ArrayBuffer(0), __consumer_offsets-12 -> ArrayBuffer(0), __consumer_offsets-45 -> ArrayBuffer(0), __consumer_offsets-1 -> ArrayBuffer(0), __consumer_offsets-5 -> ArrayBuffer(0), __consumer_offsets-26 -> ArrayBuffer(0), __consumer_offsets-29 -> ArrayBuffer(0), __consumer_offsets-34 -> ArrayBuffer(0), __consumer_offsets-10 -> ArrayBuffer(0), __consumer_offsets-32 -> ArrayBuffer(0), __consumer_offsets-40 -> ArrayBuffer(0)) (kafka.zk.AdminZkClient)
[2023-03-01 20:04:30,282] INFO [KafkaApi-0] Auto creation of topic __consumer_offsets with 50 partitions and replication factor 1 is successful (kafka.server.KafkaApis)
[2023-03-01 20:04:30,312] INFO Topic creation Map(__consumer_offsets-22 -> ArrayBuffer(0), __consumer_offsets-30 -> ArrayBuffer(0), __consumer_offsets-8 -> ArrayBuffer(0), __consumer_offsets-21 -> ArrayBuffer(0), __consumer_offsets-4 -> ArrayBuffer(0), __consumer_offsets-27 -> ArrayBuffer(0), __consumer_offsets-7 -> ArrayBuffer(0), __consumer_offsets-9 -> ArrayBuffer(0), __consumer_offsets-46 -> ArrayBuffer(0), __consumer_offsets-25 -> ArrayBuffer(0), __consumer_offsets-35 -> ArrayBuffer(0), __consumer_offsets-41 -> ArrayBuffer(0), __consumer_offsets-33 -> ArrayBuffer(0), __consumer_offsets-23 -> ArrayBuffer(0), __consumer_offsets-49 -> ArrayBuffer(0), __consumer_offsets-47 -> ArrayBuffer(0), __consumer_offsets-16 -> ArrayBuffer(0), __consumer_offsets-28 -> ArrayBuffer(0), __consumer_offsets-31 -> ArrayBuffer(0), __consumer_offsets-36 -> ArrayBuffer(0), __consumer_offsets-42 -> ArrayBuffer(0), __consumer_offsets-3 -> ArrayBuffer(0), __consumer_offsets-18 -> ArrayBuffer(0), __consumer_offsets-37 -> ArrayBuffer(0), __consumer_offsets-15 -> ArrayBuffer(0), __consumer_offsets-24 -> ArrayBuffer(0), __consumer_offsets-38 -> ArrayBuffer(0), __consumer_offsets-17 -> ArrayBuffer(0), __consumer_offsets-48 -> ArrayBuffer(0), __consumer_offsets-19 -> ArrayBuffer(0), __consumer_offsets-11 -> ArrayBuffer(0), __consumer_offsets-13 -> ArrayBuffer(0), __consumer_offsets-2 -> ArrayBuffer(0), __consumer_offsets-43 -> ArrayBuffer(0), __consumer_offsets-6 -> ArrayBuffer(0), __consumer_offsets-14 -> ArrayBuffer(0), __consumer_offsets-20 -> ArrayBuffer(0), __consumer_offsets-0 -> ArrayBuffer(0), __consumer_offsets-44 -> ArrayBuffer(0), __consumer_offsets-39 -> ArrayBuffer(0), __consumer_offsets-12 -> ArrayBuffer(0), __consumer_offsets-45 -> ArrayBuffer(0), __consumer_offsets-1 -> ArrayBuffer(0), __consumer_offsets-5 -> ArrayBuffer(0), __consumer_offsets-26 -> ArrayBuffer(0), __consumer_offsets-29 -> ArrayBuffer(0), __consumer_offsets-34 -> ArrayBuffer(0), __consumer_offsets-10 -> ArrayBuffer(0), __consumer_offsets-32 -> ArrayBuffer(0), __consumer_offsets-40 -> ArrayBuffer(0)) (kafka.zk.AdminZkClient)
[2023-03-01 20:04:31,087] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions __consumer_offsets-22,__consumer_offsets-30,__consumer_offsets-8,__consumer_offsets-21,__consumer_offsets-4,__consumer_offsets-27,__consumer_offsets-7,__consumer_offsets-9,__consumer_offsets-46,__consumer_offsets-25,__consumer_offsets-35,__consumer_offsets-41,__consumer_offsets-33,__consumer_offsets-23,__consumer_offsets-49,__consumer_offsets-47,__consumer_offsets-16,__consumer_offsets-28,__consumer_offsets-31,__consumer_offsets-36,__consumer_offsets-42,__consumer_offsets-3,__consumer_offsets-18,__consumer_offsets-37,__consumer_offsets-15,__consumer_offsets-24,__consumer_offsets-38,__consumer_offsets-17,__consumer_offsets-48,__consumer_offsets-19,__consumer_offsets-11,__consumer_offsets-13,__consumer_offsets-2,__consumer_offsets-43,__consumer_offsets-6,__consumer_offsets-14,__consumer_offsets-20,__consumer_offsets-0,__consumer_offsets-44,__consumer_offsets-39,__consumer_offsets-12,__consumer_offsets-45,__consumer_offsets-1,__consumer_offsets-5,__consumer_offsets-26,__consumer_offsets-29,__consumer_offsets-34,__consumer_offsets-10,__consumer_offsets-32,__consumer_offsets-40 (kafka.server.ReplicaFetcherManager)
[2023-03-01 20:04:31,090] INFO [Log partition=__consumer_offsets-0, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:31,090] INFO [Log partition=__consumer_offsets-0, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:31,090] INFO Created log for partition __consumer_offsets-0 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:31,091] INFO [Partition __consumer_offsets-0 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-0 (kafka.cluster.Partition)
[2023-03-01 20:04:31,091] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:31,091] INFO [Partition __consumer_offsets-0 broker=0] __consumer_offsets-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:31,149] INFO [Log partition=__consumer_offsets-29, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:31,150] INFO [Log partition=__consumer_offsets-29, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:31,150] INFO Created log for partition __consumer_offsets-29 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:31,150] INFO [Partition __consumer_offsets-29 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-29 (kafka.cluster.Partition)
[2023-03-01 20:04:31,150] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:31,150] INFO [Partition __consumer_offsets-29 broker=0] __consumer_offsets-29 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:31,224] INFO [Log partition=__consumer_offsets-48, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:31,225] INFO [Log partition=__consumer_offsets-48, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:31,225] INFO Created log for partition __consumer_offsets-48 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:31,225] INFO [Partition __consumer_offsets-48 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-48 (kafka.cluster.Partition)
[2023-03-01 20:04:31,225] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:31,225] INFO [Partition __consumer_offsets-48 broker=0] __consumer_offsets-48 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:31,299] INFO [Log partition=__consumer_offsets-10, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:31,299] INFO [Log partition=__consumer_offsets-10, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:31,299] INFO Created log for partition __consumer_offsets-10 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:31,300] INFO [Partition __consumer_offsets-10 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-10 (kafka.cluster.Partition)
[2023-03-01 20:04:31,300] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:31,300] INFO [Partition __consumer_offsets-10 broker=0] __consumer_offsets-10 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:31,357] INFO [Log partition=__consumer_offsets-45, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:31,357] INFO [Log partition=__consumer_offsets-45, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:31,357] INFO Created log for partition __consumer_offsets-45 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:31,358] INFO [Partition __consumer_offsets-45 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-45 (kafka.cluster.Partition)
[2023-03-01 20:04:31,358] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:31,358] INFO [Partition __consumer_offsets-45 broker=0] __consumer_offsets-45 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:31,425] INFO [Log partition=__consumer_offsets-26, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:31,425] INFO [Log partition=__consumer_offsets-26, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:31,426] INFO Created log for partition __consumer_offsets-26 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:31,426] INFO [Partition __consumer_offsets-26 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-26 (kafka.cluster.Partition)
[2023-03-01 20:04:31,426] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:31,426] INFO [Partition __consumer_offsets-26 broker=0] __consumer_offsets-26 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:31,499] INFO [Log partition=__consumer_offsets-7, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:31,499] INFO [Log partition=__consumer_offsets-7, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:31,499] INFO Created log for partition __consumer_offsets-7 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:31,499] INFO [Partition __consumer_offsets-7 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-7 (kafka.cluster.Partition)
[2023-03-01 20:04:31,499] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:31,499] INFO [Partition __consumer_offsets-7 broker=0] __consumer_offsets-7 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:31,564] INFO [Log partition=__consumer_offsets-42, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:31,565] INFO [Log partition=__consumer_offsets-42, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:31,565] INFO Created log for partition __consumer_offsets-42 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:31,565] INFO [Partition __consumer_offsets-42 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-42 (kafka.cluster.Partition)
[2023-03-01 20:04:31,565] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:31,565] INFO [Partition __consumer_offsets-42 broker=0] __consumer_offsets-42 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:31,631] INFO [Log partition=__consumer_offsets-4, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:31,631] INFO [Log partition=__consumer_offsets-4, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:31,631] INFO Created log for partition __consumer_offsets-4 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:31,632] INFO [Partition __consumer_offsets-4 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-4 (kafka.cluster.Partition)
[2023-03-01 20:04:31,632] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:31,632] INFO [Partition __consumer_offsets-4 broker=0] __consumer_offsets-4 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:31,698] INFO [Log partition=__consumer_offsets-23, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:31,698] INFO [Log partition=__consumer_offsets-23, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:31,698] INFO Created log for partition __consumer_offsets-23 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:31,699] INFO [Partition __consumer_offsets-23 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-23 (kafka.cluster.Partition)
[2023-03-01 20:04:31,699] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:31,699] INFO [Partition __consumer_offsets-23 broker=0] __consumer_offsets-23 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:31,772] INFO [Log partition=__consumer_offsets-1, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:31,773] INFO [Log partition=__consumer_offsets-1, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:31,773] INFO Created log for partition __consumer_offsets-1 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:31,773] INFO [Partition __consumer_offsets-1 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-1 (kafka.cluster.Partition)
[2023-03-01 20:04:31,773] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:31,773] INFO [Partition __consumer_offsets-1 broker=0] __consumer_offsets-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:31,847] INFO [Log partition=__consumer_offsets-20, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:31,847] INFO [Log partition=__consumer_offsets-20, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:31,848] INFO Created log for partition __consumer_offsets-20 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:31,848] INFO [Partition __consumer_offsets-20 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-20 (kafka.cluster.Partition)
[2023-03-01 20:04:31,848] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:31,848] INFO [Partition __consumer_offsets-20 broker=0] __consumer_offsets-20 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,022] INFO [Log partition=__consumer_offsets-39, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,023] INFO [Log partition=__consumer_offsets-39, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:32,023] INFO Created log for partition __consumer_offsets-39 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,023] INFO [Partition __consumer_offsets-39 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-39 (kafka.cluster.Partition)
[2023-03-01 20:04:32,023] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,023] INFO [Partition __consumer_offsets-39 broker=0] __consumer_offsets-39 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,096] INFO [Log partition=__consumer_offsets-17, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,096] INFO [Log partition=__consumer_offsets-17, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:32,097] INFO Created log for partition __consumer_offsets-17 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,097] INFO [Partition __consumer_offsets-17 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-17 (kafka.cluster.Partition)
[2023-03-01 20:04:32,097] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,097] INFO [Partition __consumer_offsets-17 broker=0] __consumer_offsets-17 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,164] INFO [Log partition=__consumer_offsets-36, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,164] INFO [Log partition=__consumer_offsets-36, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:32,164] INFO Created log for partition __consumer_offsets-36 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,164] INFO [Partition __consumer_offsets-36 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-36 (kafka.cluster.Partition)
[2023-03-01 20:04:32,164] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,164] INFO [Partition __consumer_offsets-36 broker=0] __consumer_offsets-36 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,222] INFO [Log partition=__consumer_offsets-14, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,222] INFO [Log partition=__consumer_offsets-14, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:32,222] INFO Created log for partition __consumer_offsets-14 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,223] INFO [Partition __consumer_offsets-14 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-14 (kafka.cluster.Partition)
[2023-03-01 20:04:32,223] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,223] INFO [Partition __consumer_offsets-14 broker=0] __consumer_offsets-14 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,287] INFO [Log partition=__consumer_offsets-33, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,287] INFO [Log partition=__consumer_offsets-33, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:32,288] INFO Created log for partition __consumer_offsets-33 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,288] INFO [Partition __consumer_offsets-33 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-33 (kafka.cluster.Partition)
[2023-03-01 20:04:32,288] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,288] INFO [Partition __consumer_offsets-33 broker=0] __consumer_offsets-33 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,346] INFO [Log partition=__consumer_offsets-49, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,346] INFO [Log partition=__consumer_offsets-49, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:32,347] INFO Created log for partition __consumer_offsets-49 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,347] INFO [Partition __consumer_offsets-49 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-49 (kafka.cluster.Partition)
[2023-03-01 20:04:32,347] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,347] INFO [Partition __consumer_offsets-49 broker=0] __consumer_offsets-49 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,420] INFO [Log partition=__consumer_offsets-11, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,421] INFO [Log partition=__consumer_offsets-11, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:32,421] INFO Created log for partition __consumer_offsets-11 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,421] INFO [Partition __consumer_offsets-11 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-11 (kafka.cluster.Partition)
[2023-03-01 20:04:32,421] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,421] INFO [Partition __consumer_offsets-11 broker=0] __consumer_offsets-11 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,487] INFO [Log partition=__consumer_offsets-30, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,488] INFO [Log partition=__consumer_offsets-30, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:32,488] INFO Created log for partition __consumer_offsets-30 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,488] INFO [Partition __consumer_offsets-30 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-30 (kafka.cluster.Partition)
[2023-03-01 20:04:32,488] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,488] INFO [Partition __consumer_offsets-30 broker=0] __consumer_offsets-30 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,563] INFO [Log partition=__consumer_offsets-46, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,563] INFO [Log partition=__consumer_offsets-46, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:32,564] INFO Created log for partition __consumer_offsets-46 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,564] INFO [Partition __consumer_offsets-46 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-46 (kafka.cluster.Partition)
[2023-03-01 20:04:32,564] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,564] INFO [Partition __consumer_offsets-46 broker=0] __consumer_offsets-46 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,629] INFO [Log partition=__consumer_offsets-27, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,629] INFO [Log partition=__consumer_offsets-27, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:32,630] INFO Created log for partition __consumer_offsets-27 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,630] INFO [Partition __consumer_offsets-27 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-27 (kafka.cluster.Partition)
[2023-03-01 20:04:32,630] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,630] INFO [Partition __consumer_offsets-27 broker=0] __consumer_offsets-27 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,695] INFO [Log partition=__consumer_offsets-8, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,695] INFO [Log partition=__consumer_offsets-8, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:32,696] INFO Created log for partition __consumer_offsets-8 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,696] INFO [Partition __consumer_offsets-8 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-8 (kafka.cluster.Partition)
[2023-03-01 20:04:32,696] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,696] INFO [Partition __consumer_offsets-8 broker=0] __consumer_offsets-8 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,753] INFO [Log partition=__consumer_offsets-24, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,753] INFO [Log partition=__consumer_offsets-24, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:32,754] INFO Created log for partition __consumer_offsets-24 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,754] INFO [Partition __consumer_offsets-24 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-24 (kafka.cluster.Partition)
[2023-03-01 20:04:32,754] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,754] INFO [Partition __consumer_offsets-24 broker=0] __consumer_offsets-24 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,811] INFO [Log partition=__consumer_offsets-43, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,812] INFO [Log partition=__consumer_offsets-43, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:32,812] INFO Created log for partition __consumer_offsets-43 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,812] INFO [Partition __consumer_offsets-43 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-43 (kafka.cluster.Partition)
[2023-03-01 20:04:32,812] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,812] INFO [Partition __consumer_offsets-43 broker=0] __consumer_offsets-43 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,878] INFO [Log partition=__consumer_offsets-5, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,879] INFO [Log partition=__consumer_offsets-5, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:32,879] INFO Created log for partition __consumer_offsets-5 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,879] INFO [Partition __consumer_offsets-5 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-5 (kafka.cluster.Partition)
[2023-03-01 20:04:32,879] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,879] INFO [Partition __consumer_offsets-5 broker=0] __consumer_offsets-5 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:32,945] INFO [Log partition=__consumer_offsets-21, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:32,945] INFO [Log partition=__consumer_offsets-21, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:32,945] INFO Created log for partition __consumer_offsets-21 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:32,945] INFO [Partition __consumer_offsets-21 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-21 (kafka.cluster.Partition)
[2023-03-01 20:04:32,946] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:32,946] INFO [Partition __consumer_offsets-21 broker=0] __consumer_offsets-21 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,011] INFO [Log partition=__consumer_offsets-2, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,011] INFO [Log partition=__consumer_offsets-2, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:33,011] INFO Created log for partition __consumer_offsets-2 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,011] INFO [Partition __consumer_offsets-2 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-2 (kafka.cluster.Partition)
[2023-03-01 20:04:33,011] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,012] INFO [Partition __consumer_offsets-2 broker=0] __consumer_offsets-2 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,086] INFO [Log partition=__consumer_offsets-40, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,086] INFO [Log partition=__consumer_offsets-40, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:33,086] INFO Created log for partition __consumer_offsets-40 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,087] INFO [Partition __consumer_offsets-40 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-40 (kafka.cluster.Partition)
[2023-03-01 20:04:33,087] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,087] INFO [Partition __consumer_offsets-40 broker=0] __consumer_offsets-40 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,144] INFO [Log partition=__consumer_offsets-37, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,144] INFO [Log partition=__consumer_offsets-37, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:33,144] INFO Created log for partition __consumer_offsets-37 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,145] INFO [Partition __consumer_offsets-37 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-37 (kafka.cluster.Partition)
[2023-03-01 20:04:33,145] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,145] INFO [Partition __consumer_offsets-37 broker=0] __consumer_offsets-37 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,210] INFO [Log partition=__consumer_offsets-18, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,210] INFO [Log partition=__consumer_offsets-18, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:33,210] INFO Created log for partition __consumer_offsets-18 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,210] INFO [Partition __consumer_offsets-18 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-18 (kafka.cluster.Partition)
[2023-03-01 20:04:33,210] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,210] INFO [Partition __consumer_offsets-18 broker=0] __consumer_offsets-18 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,268] INFO [Log partition=__consumer_offsets-34, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,269] INFO [Log partition=__consumer_offsets-34, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:33,269] INFO Created log for partition __consumer_offsets-34 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,269] INFO [Partition __consumer_offsets-34 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-34 (kafka.cluster.Partition)
[2023-03-01 20:04:33,269] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,269] INFO [Partition __consumer_offsets-34 broker=0] __consumer_offsets-34 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,327] INFO [Log partition=__consumer_offsets-15, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,327] INFO [Log partition=__consumer_offsets-15, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:33,327] INFO Created log for partition __consumer_offsets-15 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,327] INFO [Partition __consumer_offsets-15 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-15 (kafka.cluster.Partition)
[2023-03-01 20:04:33,327] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,327] INFO [Partition __consumer_offsets-15 broker=0] __consumer_offsets-15 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,393] INFO [Log partition=__consumer_offsets-12, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,393] INFO [Log partition=__consumer_offsets-12, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:33,393] INFO Created log for partition __consumer_offsets-12 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,393] INFO [Partition __consumer_offsets-12 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-12 (kafka.cluster.Partition)
[2023-03-01 20:04:33,393] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,394] INFO [Partition __consumer_offsets-12 broker=0] __consumer_offsets-12 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,451] INFO [Log partition=__consumer_offsets-31, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,451] INFO [Log partition=__consumer_offsets-31, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:33,451] INFO Created log for partition __consumer_offsets-31 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,451] INFO [Partition __consumer_offsets-31 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-31 (kafka.cluster.Partition)
[2023-03-01 20:04:33,451] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,451] INFO [Partition __consumer_offsets-31 broker=0] __consumer_offsets-31 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,517] INFO [Log partition=__consumer_offsets-9, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,517] INFO [Log partition=__consumer_offsets-9, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:33,518] INFO Created log for partition __consumer_offsets-9 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,518] INFO [Partition __consumer_offsets-9 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-9 (kafka.cluster.Partition)
[2023-03-01 20:04:33,518] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,518] INFO [Partition __consumer_offsets-9 broker=0] __consumer_offsets-9 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,584] INFO [Log partition=__consumer_offsets-47, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,584] INFO [Log partition=__consumer_offsets-47, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:33,584] INFO Created log for partition __consumer_offsets-47 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,585] INFO [Partition __consumer_offsets-47 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-47 (kafka.cluster.Partition)
[2023-03-01 20:04:33,585] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,585] INFO [Partition __consumer_offsets-47 broker=0] __consumer_offsets-47 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,643] INFO [Log partition=__consumer_offsets-19, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,643] INFO [Log partition=__consumer_offsets-19, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:33,643] INFO Created log for partition __consumer_offsets-19 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,644] INFO [Partition __consumer_offsets-19 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-19 (kafka.cluster.Partition)
[2023-03-01 20:04:33,644] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,644] INFO [Partition __consumer_offsets-19 broker=0] __consumer_offsets-19 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,708] INFO [Log partition=__consumer_offsets-28, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,708] INFO [Log partition=__consumer_offsets-28, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:33,708] INFO Created log for partition __consumer_offsets-28 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,709] INFO [Partition __consumer_offsets-28 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-28 (kafka.cluster.Partition)
[2023-03-01 20:04:33,709] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,709] INFO [Partition __consumer_offsets-28 broker=0] __consumer_offsets-28 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,775] INFO [Log partition=__consumer_offsets-38, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,776] INFO [Log partition=__consumer_offsets-38, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:33,776] INFO Created log for partition __consumer_offsets-38 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,776] INFO [Partition __consumer_offsets-38 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-38 (kafka.cluster.Partition)
[2023-03-01 20:04:33,776] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,776] INFO [Partition __consumer_offsets-38 broker=0] __consumer_offsets-38 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,843] INFO [Log partition=__consumer_offsets-35, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,843] INFO [Log partition=__consumer_offsets-35, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:33,844] INFO Created log for partition __consumer_offsets-35 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,844] INFO [Partition __consumer_offsets-35 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-35 (kafka.cluster.Partition)
[2023-03-01 20:04:33,844] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,844] INFO [Partition __consumer_offsets-35 broker=0] __consumer_offsets-35 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,917] INFO [Log partition=__consumer_offsets-44, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,917] INFO [Log partition=__consumer_offsets-44, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:33,918] INFO Created log for partition __consumer_offsets-44 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,918] INFO [Partition __consumer_offsets-44 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-44 (kafka.cluster.Partition)
[2023-03-01 20:04:33,918] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,918] INFO [Partition __consumer_offsets-44 broker=0] __consumer_offsets-44 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:33,983] INFO [Log partition=__consumer_offsets-6, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:33,983] INFO [Log partition=__consumer_offsets-6, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:33,983] INFO Created log for partition __consumer_offsets-6 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:33,983] INFO [Partition __consumer_offsets-6 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-6 (kafka.cluster.Partition)
[2023-03-01 20:04:33,983] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:33,983] INFO [Partition __consumer_offsets-6 broker=0] __consumer_offsets-6 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:34,041] INFO [Log partition=__consumer_offsets-25, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:34,041] INFO [Log partition=__consumer_offsets-25, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:34,045] INFO Created log for partition __consumer_offsets-25 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:34,045] INFO [Partition __consumer_offsets-25 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-25 (kafka.cluster.Partition)
[2023-03-01 20:04:34,045] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:34,045] INFO [Partition __consumer_offsets-25 broker=0] __consumer_offsets-25 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:34,116] INFO [Log partition=__consumer_offsets-16, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:34,116] INFO [Log partition=__consumer_offsets-16, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:34,116] INFO Created log for partition __consumer_offsets-16 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:34,116] INFO [Partition __consumer_offsets-16 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-16 (kafka.cluster.Partition)
[2023-03-01 20:04:34,116] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:34,117] INFO [Partition __consumer_offsets-16 broker=0] __consumer_offsets-16 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:34,175] INFO [Log partition=__consumer_offsets-22, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:34,175] INFO [Log partition=__consumer_offsets-22, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:34,176] INFO Created log for partition __consumer_offsets-22 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:34,176] INFO [Partition __consumer_offsets-22 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-22 (kafka.cluster.Partition)
[2023-03-01 20:04:34,176] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:34,176] INFO [Partition __consumer_offsets-22 broker=0] __consumer_offsets-22 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:34,249] INFO [Log partition=__consumer_offsets-41, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:34,250] INFO [Log partition=__consumer_offsets-41, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:34,250] INFO Created log for partition __consumer_offsets-41 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:34,250] INFO [Partition __consumer_offsets-41 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-41 (kafka.cluster.Partition)
[2023-03-01 20:04:34,250] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:34,250] INFO [Partition __consumer_offsets-41 broker=0] __consumer_offsets-41 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:34,315] INFO [Log partition=__consumer_offsets-32, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:34,316] INFO [Log partition=__consumer_offsets-32, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:34,316] INFO Created log for partition __consumer_offsets-32 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:34,316] INFO [Partition __consumer_offsets-32 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-32 (kafka.cluster.Partition)
[2023-03-01 20:04:34,316] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:34,316] INFO [Partition __consumer_offsets-32 broker=0] __consumer_offsets-32 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:34,382] INFO [Log partition=__consumer_offsets-3, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:34,382] INFO [Log partition=__consumer_offsets-3, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:04:34,383] INFO Created log for partition __consumer_offsets-3 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:34,383] INFO [Partition __consumer_offsets-3 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-3 (kafka.cluster.Partition)
[2023-03-01 20:04:34,383] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:34,383] INFO [Partition __consumer_offsets-3 broker=0] __consumer_offsets-3 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:34,456] INFO [Log partition=__consumer_offsets-13, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:04:34,457] INFO [Log partition=__consumer_offsets-13, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:04:34,457] INFO Created log for partition __consumer_offsets-13 in F:\tmp\kafka-logs with properties {compression.type -> producer, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:04:34,457] INFO [Partition __consumer_offsets-13 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-13 (kafka.cluster.Partition)
[2023-03-01 20:04:34,457] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:04:34,457] INFO [Partition __consumer_offsets-13 broker=0] __consumer_offsets-13 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:04:34,522] INFO [ReplicaAlterLogDirsManager on broker 0] Added fetcher for partitions List() (kafka.server.ReplicaAlterLogDirsManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-22 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-25 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-28 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-31 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-34 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-37 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-40 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-43 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-46 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-49 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-41 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-44 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-47 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-1 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-4 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-7 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-10 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-13 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-16 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-19 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-2 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-5 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-8 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-11 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-14 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-17 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,523] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-20 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-23 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-26 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-29 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-32 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-35 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-38 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-0 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-3 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-6 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-9 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-12 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-15 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-18 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-21 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-24 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-27 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-30 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-33 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-36 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-39 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-42 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-45 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,524] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-48 (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,528] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-22 in 5 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,529] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-25 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,529] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-28 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,529] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-31 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,529] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-34 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,529] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-37 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,529] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-40 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,529] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-43 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,529] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-46 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,529] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-49 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,529] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-41 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-44 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-47 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-1 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-4 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-7 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-10 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-13 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-16 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-19 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-2 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-5 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-8 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-11 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-14 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-17 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-20 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-23 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-26 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,530] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-29 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-32 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-35 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-38 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-0 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-3 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-6 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-9 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-12 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-15 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-18 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-21 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-24 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-27 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-30 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-33 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-36 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-39 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-42 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-45 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2023-03-01 20:04:34,531] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-48 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
2.2. 第一次发送事务消息日志
第一次发送事务消息,会有查找事务协调器所在节点,初始化 Kafka 内部主题 __transaction_state
及其分区,初始化 epoch = 0
的生产者id(PID
)等操作。
对比再次发送事务消息的日志,明显第一次发送事务消息多出来的初始化操作不会每次发送事务都会存在。
2.2.1. 客户端日志
2023-03-01 20:06:00.317 INFO 14708 --- [nio-8080-exec-2] o.a.c.c.C.[Tomcat].[localhost].[/] : Initializing Spring DispatcherServlet 'dispatcherServlet'
2023-03-01 20:06:00.317 INFO 14708 --- [nio-8080-exec-2] o.s.web.servlet.DispatcherServlet : Initializing Servlet 'dispatcherServlet'
2023-03-01 20:06:00.318 INFO 14708 --- [nio-8080-exec-2] o.s.web.servlet.DispatcherServlet : Completed initialization in 1 ms
2023-03-01 20:06:00.350 DEBUG 14708 --- [nio-8080-exec-2] o.s.k.t.KafkaTransactionManager : Creating new transaction with name [com.leekitman.pangea.evolution.kafka.controller.SenderController.sendTransactionTwo]: PROPAGATION_REQUIRED,ISOLATION_DEFAULT; 'kafkaTransactionManager',-java.lang.Exception
2023-03-01 20:06:00.356 INFO 14708 --- [nio-8080-exec-2] o.a.k.clients.producer.ProducerConfig : ProducerConfig values:
acks = -1
batch.size = 16384
bootstrap.servers = [localhost:9092]
buffer.memory = 33554432
client.dns.lookup = use_all_dns_ips
client.id = producer-tx-kafka-0
compression.type = none
connections.max.idle.ms = 540000
delivery.timeout.ms = 120000
enable.idempotence = true
interceptor.classes = []
key.serializer = class org.apache.kafka.common.serialization.StringSerializer
linger.ms = 0
max.block.ms = 60000
max.in.flight.requests.per.connection = 5
max.request.size = 1048576
metadata.max.age.ms = 300000
metadata.max.idle.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner
receive.buffer.bytes = 32768
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retries = 2147483647
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.connect.timeout.ms = null
sasl.login.read.timeout.ms = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.login.retry.backoff.max.ms = 10000
sasl.login.retry.backoff.ms = 100
sasl.mechanism = GSSAPI
sasl.oauthbearer.clock.skew.seconds = 30
sasl.oauthbearer.expected.audience = null
sasl.oauthbearer.expected.issuer = null
sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
sasl.oauthbearer.jwks.endpoint.url = null
sasl.oauthbearer.scope.claim.name = scope
sasl.oauthbearer.sub.claim.name = sub
sasl.oauthbearer.token.endpoint.url = null
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
socket.connection.setup.timeout.max.ms = 30000
socket.connection.setup.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLSv1.2
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
transaction.timeout.ms = 60000
transactional.id = tx-kafka-0
value.serializer = class org.springframework.kafka.support.serializer.ToStringSerializer
2023-03-01 20:06:00.366 INFO 14708 --- [nio-8080-exec-2] o.a.k.clients.producer.KafkaProducer : [Producer clientId=producer-tx-kafka-0, transactionalId=tx-kafka-0] Instantiated a transactional producer.
2023-03-01 20:06:00.384 WARN 14708 --- [nio-8080-exec-2] o.a.k.clients.producer.ProducerConfig : The configuration 'max.poll.interval.ms' was supplied but isn't a known config.
2023-03-01 20:06:00.384 INFO 14708 --- [nio-8080-exec-2] o.a.kafka.common.utils.AppInfoParser : Kafka version: 3.1.2
2023-03-01 20:06:00.384 INFO 14708 --- [nio-8080-exec-2] o.a.kafka.common.utils.AppInfoParser : Kafka commitId: f8c67dc3ae0a3265
2023-03-01 20:06:00.384 INFO 14708 --- [nio-8080-exec-2] o.a.kafka.common.utils.AppInfoParser : Kafka startTimeMs: 1677672360384
2023-03-01 20:06:00.385 INFO 14708 --- [nio-8080-exec-2] o.a.k.c.p.internals.TransactionManager : [Producer clientId=producer-tx-kafka-0, transactionalId=tx-kafka-0] Invoking InitProducerId for the first time in order to acquire a producer ID
2023-03-01 20:06:00.388 INFO 14708 --- [ucer-tx-kafka-0] org.apache.kafka.clients.Metadata : [Producer clientId=producer-tx-kafka-0, transactionalId=tx-kafka-0] Cluster ID: KtSoJctvTiyoVntH9E-rzA
2023-03-01 20:06:04.812 INFO 14708 --- [ucer-tx-kafka-0] o.a.k.c.p.internals.TransactionManager : [Producer clientId=producer-tx-kafka-0, transactionalId=tx-kafka-0] Discovered transaction coordinator KIT-DP-A0010:9092 (id: 0 rack: null)
2023-03-01 20:06:04.938 INFO 14708 --- [ucer-tx-kafka-0] o.a.k.c.p.internals.TransactionManager : [Producer clientId=producer-tx-kafka-0, transactionalId=tx-kafka-0] ProducerId set to 0 with epoch 0
2023-03-01 20:06:04.940 DEBUG 14708 --- [nio-8080-exec-2] o.s.k.t.KafkaTransactionManager : Created Kafka transaction on producer [CloseSafeProducer [delegate=org.apache.kafka.clients.producer.KafkaProducer@38717385]]
2023-03-01 20:06:04.940 TRACE 14708 --- [nio-8080-exec-2] o.s.t.i.TransactionInterceptor : Getting transaction for [com.leekitman.pangea.evolution.kafka.controller.SenderController.sendTransactionTwo]
2023-03-01 20:06:04.944 INFO 14708 --- [nio-8080-exec-2] c.l.p.e.k.controller.SenderController : 发送消息:success-success
2023-03-01 20:06:04.946 TRACE 14708 --- [nio-8080-exec-2] o.s.t.i.TransactionInterceptor : Getting transaction for [com.leekitman.pangea.evolution.kafka.service.SenderService.sendTransactionTwo]
2023-03-01 20:06:04.953 TRACE 14708 --- [nio-8080-exec-2] o.s.t.i.TransactionInterceptor : Getting transaction for [org.springframework.data.jpa.repository.support.SimpleJpaRepository.findAll]
Hibernate: select processeve0_.id as id1_0_, processeve0_.exception as exceptio2_0_, processeve0_.name as name3_0_, processeve0_.status as status4_0_, processeve0_.tenant_id as tenant_i5_0_, processeve0_.topic as topic6_0_ from process_event processeve0_
2023-03-01 20:06:05.016 TRACE 14708 --- [nio-8080-exec-2] o.s.t.i.TransactionInterceptor : Completing transaction for [org.springframework.data.jpa.repository.support.SimpleJpaRepository.findAll]
2023-03-01 20:06:05.016 INFO 14708 --- [nio-8080-exec-2] c.l.p.e.kafka.service.SenderService : 1-验证数据库事务,查询数据库:[ProcessEventEntity(id=111, topic=PLAN_NODE_FINISHED, name=计划节点结束111, status=0, exception=null, tenantId=111111111111), ProcessEventEntity(id=222, topic=PLAN_NODE_FINISHED, name=计划节点结束222, status=0, exception=null, tenantId=222222222222), ProcessEventEntity(id=333, topic=PLAN_NODE_FINISHED, name=计划节点结束333, status=0, exception=null, tenantId=333333333333)]
2023-03-01 20:06:05.031 TRACE 14708 --- [nio-8080-exec-2] o.s.t.i.TransactionInterceptor : Completing transaction for [com.leekitman.pangea.evolution.kafka.service.SenderService.sendTransactionTwo]
2023-03-01 20:06:05.042 TRACE 14708 --- [nio-8080-exec-2] o.s.t.i.TransactionInterceptor : Completing transaction for [com.leekitman.pangea.evolution.kafka.controller.SenderController.sendTransactionTwo]
2023-03-01 20:06:05.042 DEBUG 14708 --- [nio-8080-exec-2] o.s.k.t.KafkaTransactionManager : Initiating transaction commit
2023-03-01 20:06:05.067 INFO 14708 --- [ucer-tx-kafka-0] c.l.p.e.kafka.service.SenderService : 2-事务发送kafka消息成功回调: SendResult [producerRecord=ProducerRecord(topic=TRANSACTION-TOPIC-1, partition=null, headers=RecordHeaders(headers = [RecordHeader(key = spring.message.value.type, value = [106, 97, 118, 97, 46, 108, 97, 110, 103, 46, 83, 116, 114, 105, 110, 103])], isReadOnly = true), key=null, value=第一事件:success-success, timestamp=null), recordMetadata=TRANSACTION-TOPIC-1-0@0]
2023-03-01 20:06:05.089 DEBUG 14708 --- [ntainer#0-0-C-1] o.s.k.t.KafkaTransactionManager : Creating new transaction with name [null]: PROPAGATION_REQUIRED,ISOLATION_DEFAULT
2023-03-01 20:06:05.089 INFO 14708 --- [ntainer#0-0-C-1] o.a.k.clients.producer.ProducerConfig : ProducerConfig values:
acks = -1
batch.size = 16384
bootstrap.servers = [localhost:9092]
buffer.memory = 33554432
client.dns.lookup = use_all_dns_ips
client.id = producer-tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-1.0
compression.type = none
connections.max.idle.ms = 540000
delivery.timeout.ms = 120000
enable.idempotence = true
interceptor.classes = []
key.serializer = class org.apache.kafka.common.serialization.StringSerializer
linger.ms = 0
max.block.ms = 60000
max.in.flight.requests.per.connection = 5
max.request.size = 1048576
metadata.max.age.ms = 300000
metadata.max.idle.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner
receive.buffer.bytes = 32768
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retries = 2147483647
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.connect.timeout.ms = null
sasl.login.read.timeout.ms = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.login.retry.backoff.max.ms = 10000
sasl.login.retry.backoff.ms = 100
sasl.mechanism = GSSAPI
sasl.oauthbearer.clock.skew.seconds = 30
sasl.oauthbearer.expected.audience = null
sasl.oauthbearer.expected.issuer = null
sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
sasl.oauthbearer.jwks.endpoint.url = null
sasl.oauthbearer.scope.claim.name = scope
sasl.oauthbearer.sub.claim.name = sub
sasl.oauthbearer.token.endpoint.url = null
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
socket.connection.setup.timeout.max.ms = 30000
socket.connection.setup.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLSv1.2
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
transaction.timeout.ms = 60000
transactional.id = tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-1.0
value.serializer = class org.springframework.kafka.support.serializer.ToStringSerializer
2023-03-01 20:06:05.090 INFO 14708 --- [ntainer#0-0-C-1] o.a.k.clients.producer.KafkaProducer : [Producer clientId=producer-tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-1.0, transactionalId=tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-1.0] Instantiated a transactional producer.
2023-03-01 20:06:05.092 WARN 14708 --- [ntainer#0-0-C-1] o.a.k.clients.producer.ProducerConfig : The configuration 'max.poll.interval.ms' was supplied but isn't a known config.
2023-03-01 20:06:05.092 INFO 14708 --- [ntainer#0-0-C-1] o.a.kafka.common.utils.AppInfoParser : Kafka version: 3.1.2
2023-03-01 20:06:05.092 INFO 14708 --- [ntainer#0-0-C-1] o.a.kafka.common.utils.AppInfoParser : Kafka commitId: f8c67dc3ae0a3265
2023-03-01 20:06:05.092 INFO 14708 --- [ntainer#0-0-C-1] o.a.kafka.common.utils.AppInfoParser : Kafka startTimeMs: 1677672365092
2023-03-01 20:06:05.092 INFO 14708 --- [ntainer#0-0-C-1] o.a.k.c.p.internals.TransactionManager : [Producer clientId=producer-tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-1.0, transactionalId=tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-1.0] Invoking InitProducerId for the first time in order to acquire a producer ID
2023-03-01 20:06:05.094 INFO 14708 --- [CTION-TOPIC-1.0] org.apache.kafka.clients.Metadata : [Producer clientId=producer-tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-1.0, transactionalId=tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-1.0] Cluster ID: KtSoJctvTiyoVntH9E-rzA
2023-03-01 20:06:05.095 INFO 14708 --- [CTION-TOPIC-1.0] o.a.k.c.p.internals.TransactionManager : [Producer clientId=producer-tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-1.0, transactionalId=tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-1.0] Discovered transaction coordinator KIT-DP-A0010:9092 (id: 0 rack: null)
2023-03-01 20:06:05.198 INFO 14708 --- [CTION-TOPIC-1.0] o.a.k.c.p.internals.TransactionManager : [Producer clientId=producer-tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-1.0, transactionalId=tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-1.0] ProducerId set to 1 with epoch 0
2023-03-01 20:06:05.198 DEBUG 14708 --- [ntainer#0-0-C-1] o.s.k.t.KafkaTransactionManager : Created Kafka transaction on producer [CloseSafeProducer [delegate=org.apache.kafka.clients.producer.KafkaProducer@7c443754]]
2023-03-01 20:06:05.202 DEBUG 14708 --- [ntainer#0-0-C-1] o.s.k.t.KafkaTransactionManager : Participating in existing transaction
2023-03-01 20:06:05.202 TRACE 14708 --- [ntainer#0-0-C-1] o.s.t.i.TransactionInterceptor : Getting transaction for [com.leekitman.pangea.evolution.kafka.consumer.TransactionOneEventListener.listenEvent1]
2023-03-01 20:06:05.205 INFO 14708 --- [ntainer#0-0-C-1] c.l.p.e.k.c.TransactionOneEventListener : listenEvent1:接收kafka消息:[第一事件:success-success],from TRANSACTION-TOPIC-1 @ 0@ 0
2023-03-01 20:06:05.206 TRACE 14708 --- [ntainer#0-0-C-1] o.s.t.i.TransactionInterceptor : Getting transaction for [com.leekitman.pangea.evolution.kafka.service.SenderService.doEventV1]
2023-03-01 20:06:05.206 TRACE 14708 --- [ntainer#0-0-C-1] o.s.t.i.TransactionInterceptor : Getting transaction for [org.springframework.data.jpa.repository.support.SimpleJpaRepository.findAll]
Hibernate: select processeve0_.id as id1_0_, processeve0_.exception as exceptio2_0_, processeve0_.name as name3_0_, processeve0_.status as status4_0_, processeve0_.tenant_id as tenant_i5_0_, processeve0_.topic as topic6_0_ from process_event processeve0_
2023-03-01 20:06:05.206 TRACE 14708 --- [ntainer#0-0-C-1] o.s.t.i.TransactionInterceptor : Completing transaction for [org.springframework.data.jpa.repository.support.SimpleJpaRepository.findAll]
2023-03-01 20:06:05.206 INFO 14708 --- [ntainer#0-0-C-1] c.l.p.e.kafka.service.SenderService : 3-验证数据库事务,查询数据库:[ProcessEventEntity(id=111, topic=PLAN_NODE_FINISHED, name=计划节点结束111, status=0, exception=null, tenantId=111111111111), ProcessEventEntity(id=222, topic=PLAN_NODE_FINISHED, name=计划节点结束222, status=0, exception=null, tenantId=222222222222), ProcessEventEntity(id=333, topic=PLAN_NODE_FINISHED, name=计划节点结束333, status=0, exception=null, tenantId=333333333333)]
2023-03-01 20:06:05.208 TRACE 14708 --- [ntainer#0-0-C-1] o.s.t.i.TransactionInterceptor : Completing transaction for [com.leekitman.pangea.evolution.kafka.service.SenderService.doEventV1]
2023-03-01 20:06:05.208 TRACE 14708 --- [ntainer#0-0-C-1] o.s.t.i.TransactionInterceptor : Completing transaction for [com.leekitman.pangea.evolution.kafka.consumer.TransactionOneEventListener.listenEvent1]
2023-03-01 20:06:05.210 INFO 14708 --- [CTION-TOPIC-1.0] c.l.p.e.kafka.service.SenderService : 4-事务发送kafka消息成功回调: SendResult [producerRecord=ProducerRecord(topic=TRANSACTION-TOPIC-2, partition=null, headers=RecordHeaders(headers = [RecordHeader(key = spring.message.value.type, value = [106, 97, 118, 97, 46, 108, 97, 110, 103, 46, 83, 116, 114, 105, 110, 103])], isReadOnly = true), key=null, value=第二事件:第一事件:success-success, timestamp=null), recordMetadata=TRANSACTION-TOPIC-2-0@0]
2023-03-01 20:06:05.215 INFO 14708 --- [CTION-TOPIC-1.0] o.a.k.c.p.internals.TransactionManager : [Producer clientId=producer-tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-1.0, transactionalId=tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-1.0] Discovered group coordinator KIT-DP-A0010:9092 (id: 0 rack: null)
2023-03-01 20:06:05.323 DEBUG 14708 --- [ntainer#0-0-C-1] o.s.k.t.KafkaTransactionManager : Initiating transaction commit
2023-03-01 20:06:05.326 DEBUG 14708 --- [ntainer#1-0-C-1] o.s.k.t.KafkaTransactionManager : Creating new transaction with name [null]: PROPAGATION_REQUIRED,ISOLATION_DEFAULT
2023-03-01 20:06:05.327 INFO 14708 --- [ntainer#1-0-C-1] o.a.k.clients.producer.ProducerConfig : ProducerConfig values:
acks = -1
batch.size = 16384
bootstrap.servers = [localhost:9092]
buffer.memory = 33554432
client.dns.lookup = use_all_dns_ips
client.id = producer-tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-2.0
compression.type = none
connections.max.idle.ms = 540000
delivery.timeout.ms = 120000
enable.idempotence = true
interceptor.classes = []
key.serializer = class org.apache.kafka.common.serialization.StringSerializer
linger.ms = 0
max.block.ms = 60000
max.in.flight.requests.per.connection = 5
max.request.size = 1048576
metadata.max.age.ms = 300000
metadata.max.idle.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner
receive.buffer.bytes = 32768
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retries = 2147483647
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.connect.timeout.ms = null
sasl.login.read.timeout.ms = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.login.retry.backoff.max.ms = 10000
sasl.login.retry.backoff.ms = 100
sasl.mechanism = GSSAPI
sasl.oauthbearer.clock.skew.seconds = 30
sasl.oauthbearer.expected.audience = null
sasl.oauthbearer.expected.issuer = null
sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000
sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000
sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100
sasl.oauthbearer.jwks.endpoint.url = null
sasl.oauthbearer.scope.claim.name = scope
sasl.oauthbearer.sub.claim.name = sub
sasl.oauthbearer.token.endpoint.url = null
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
socket.connection.setup.timeout.max.ms = 30000
socket.connection.setup.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLSv1.2
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
transaction.timeout.ms = 60000
transactional.id = tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-2.0
value.serializer = class org.springframework.kafka.support.serializer.ToStringSerializer
2023-03-01 20:06:05.328 INFO 14708 --- [ntainer#1-0-C-1] o.a.k.clients.producer.KafkaProducer : [Producer clientId=producer-tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-2.0, transactionalId=tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-2.0] Instantiated a transactional producer.
2023-03-01 20:06:05.331 WARN 14708 --- [ntainer#1-0-C-1] o.a.k.clients.producer.ProducerConfig : The configuration 'max.poll.interval.ms' was supplied but isn't a known config.
2023-03-01 20:06:05.331 INFO 14708 --- [ntainer#1-0-C-1] o.a.kafka.common.utils.AppInfoParser : Kafka version: 3.1.2
2023-03-01 20:06:05.331 INFO 14708 --- [ntainer#1-0-C-1] o.a.kafka.common.utils.AppInfoParser : Kafka commitId: f8c67dc3ae0a3265
2023-03-01 20:06:05.331 INFO 14708 --- [ntainer#1-0-C-1] o.a.kafka.common.utils.AppInfoParser : Kafka startTimeMs: 1677672365331
2023-03-01 20:06:05.331 INFO 14708 --- [ntainer#1-0-C-1] o.a.k.c.p.internals.TransactionManager : [Producer clientId=producer-tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-2.0, transactionalId=tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-2.0] Invoking InitProducerId for the first time in order to acquire a producer ID
2023-03-01 20:06:05.333 INFO 14708 --- [CTION-TOPIC-2.0] org.apache.kafka.clients.Metadata : [Producer clientId=producer-tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-2.0, transactionalId=tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-2.0] Cluster ID: KtSoJctvTiyoVntH9E-rzA
2023-03-01 20:06:05.333 INFO 14708 --- [CTION-TOPIC-2.0] o.a.k.c.p.internals.TransactionManager : [Producer clientId=producer-tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-2.0, transactionalId=tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-2.0] Discovered transaction coordinator KIT-DP-A0010:9092 (id: 0 rack: null)
2023-03-01 20:06:05.436 INFO 14708 --- [CTION-TOPIC-2.0] o.a.k.c.p.internals.TransactionManager : [Producer clientId=producer-tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-2.0, transactionalId=tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-2.0] ProducerId set to 2 with epoch 0
2023-03-01 20:06:05.436 DEBUG 14708 --- [ntainer#1-0-C-1] o.s.k.t.KafkaTransactionManager : Created Kafka transaction on producer [CloseSafeProducer [delegate=org.apache.kafka.clients.producer.KafkaProducer@2f9b816d]]
2023-03-01 20:06:05.436 INFO 14708 --- [ntainer#1-0-C-1] c.l.p.e.k.c.TransactionTwoEventListener : listenEvent2:接收kafka消息:[第二事件:第一事件:success-success],from TRANSACTION-TOPIC-2 @ 0@ 0
2023-03-01 20:06:05.437 INFO 14708 --- [CTION-TOPIC-2.0] o.a.k.c.p.internals.TransactionManager : [Producer clientId=producer-tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-2.0, transactionalId=tx-kafka-spring-kafka-evo-consumer-004.TRANSACTION-TOPIC-2.0] Discovered group coordinator KIT-DP-A0010:9092 (id: 0 rack: null)
2023-03-01 20:06:05.539 DEBUG 14708 --- [ntainer#1-0-C-1] o.s.k.t.KafkaTransactionManager : Initiating transaction commit
2.2.2. Kafka日志
[2023-03-01 20:06:00,493] INFO Topic creation Map(__transaction_state-42 -> ArrayBuffer(0), __transaction_state-31 -> ArrayBuffer(0), __transaction_state-45 -> ArrayBuffer(0), __transaction_state-15 -> ArrayBuffer(0), __transaction_state-12 -> ArrayBuffer(0), __transaction_state-7 -> ArrayBuffer(0), __transaction_state-46 -> ArrayBuffer(0), __transaction_state-48 -> ArrayBuffer(0), __transaction_state-49 -> ArrayBuffer(0), __transaction_state-28 -> ArrayBuffer(0), __transaction_state-2 -> ArrayBuffer(0), __transaction_state-20 -> ArrayBuffer(0), __transaction_state-24 -> ArrayBuffer(0), __transaction_state-13 -> ArrayBuffer(0), __transaction_state-0 -> ArrayBuffer(0), __transaction_state-37 -> ArrayBuffer(0), __transaction_state-3 -> ArrayBuffer(0), __transaction_state-21 -> ArrayBuffer(0), __transaction_state-29 -> ArrayBuffer(0), __transaction_state-39 -> ArrayBuffer(0), __transaction_state-38 -> ArrayBuffer(0), __transaction_state-6 -> ArrayBuffer(0), __transaction_state-14 -> ArrayBuffer(0), __transaction_state-10 -> ArrayBuffer(0), __transaction_state-44 -> ArrayBuffer(0), __transaction_state-9 -> ArrayBuffer(0), __transaction_state-22 -> ArrayBuffer(0), __transaction_state-43 -> ArrayBuffer(0), __transaction_state-4 -> ArrayBuffer(0), __transaction_state-30 -> ArrayBuffer(0), __transaction_state-33 -> ArrayBuffer(0), __transaction_state-32 -> ArrayBuffer(0), __transaction_state-25 -> ArrayBuffer(0), __transaction_state-17 -> ArrayBuffer(0), __transaction_state-23 -> ArrayBuffer(0), __transaction_state-47 -> ArrayBuffer(0), __transaction_state-18 -> ArrayBuffer(0), __transaction_state-26 -> ArrayBuffer(0), __transaction_state-36 -> ArrayBuffer(0), __transaction_state-5 -> ArrayBuffer(0), __transaction_state-8 -> ArrayBuffer(0), __transaction_state-16 -> ArrayBuffer(0), __transaction_state-11 -> ArrayBuffer(0), __transaction_state-40 -> ArrayBuffer(0), __transaction_state-19 -> ArrayBuffer(0), __transaction_state-27 -> ArrayBuffer(0), __transaction_state-41 -> ArrayBuffer(0), __transaction_state-1 -> ArrayBuffer(0), __transaction_state-34 -> ArrayBuffer(0), __transaction_state-35 -> ArrayBuffer(0)) (kafka.zk.AdminZkClient)
[2023-03-01 20:06:00,521] INFO [KafkaApi-0] Auto creation of topic __transaction_state with 50 partitions and replication factor 1 is successful (kafka.server.KafkaApis)
[2023-03-01 20:06:01,239] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions __transaction_state-42,__transaction_state-31,__transaction_state-45,__transaction_state-15,__transaction_state-12,__transaction_state-7,__transaction_state-46,__transaction_state-48,__transaction_state-49,__transaction_state-28,__transaction_state-2,__transaction_state-20,__transaction_state-24,__transaction_state-13,__transaction_state-0,__transaction_state-37,__transaction_state-3,__transaction_state-21,__transaction_state-29,__transaction_state-39,__transaction_state-38,__transaction_state-6,__transaction_state-14,__transaction_state-10,__transaction_state-44,__transaction_state-9,__transaction_state-22,__transaction_state-43,__transaction_state-4,__transaction_state-30,__transaction_state-33,__transaction_state-32,__transaction_state-25,__transaction_state-17,__transaction_state-23,__transaction_state-47,__transaction_state-18,__transaction_state-26,__transaction_state-36,__transaction_state-5,__transaction_state-8,__transaction_state-16,__transaction_state-11,__transaction_state-40,__transaction_state-19,__transaction_state-27,__transaction_state-41,__transaction_state-1,__transaction_state-34,__transaction_state-35 (kafka.server.ReplicaFetcherManager)
[2023-03-01 20:06:01,242] INFO [Log partition=__transaction_state-25, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:01,242] INFO [Log partition=__transaction_state-25, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:01,242] INFO Created log for partition __transaction_state-25 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:01,243] INFO [Partition __transaction_state-25 broker=0] No checkpointed highwatermark is found for partition __transaction_state-25 (kafka.cluster.Partition)
[2023-03-01 20:06:01,243] INFO Replica loaded for partition __transaction_state-25 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:01,243] INFO [Partition __transaction_state-25 broker=0] __transaction_state-25 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:01,304] INFO [Log partition=__transaction_state-44, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:01,304] INFO [Log partition=__transaction_state-44, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:01,304] INFO Created log for partition __transaction_state-44 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:01,305] INFO [Partition __transaction_state-44 broker=0] No checkpointed highwatermark is found for partition __transaction_state-44 (kafka.cluster.Partition)
[2023-03-01 20:06:01,305] INFO Replica loaded for partition __transaction_state-44 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:01,305] INFO [Partition __transaction_state-44 broker=0] __transaction_state-44 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:01,380] INFO [Log partition=__transaction_state-6, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:01,380] INFO [Log partition=__transaction_state-6, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:01,381] INFO Created log for partition __transaction_state-6 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:01,381] INFO [Partition __transaction_state-6 broker=0] No checkpointed highwatermark is found for partition __transaction_state-6 (kafka.cluster.Partition)
[2023-03-01 20:06:01,381] INFO Replica loaded for partition __transaction_state-6 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:01,381] INFO [Partition __transaction_state-6 broker=0] __transaction_state-6 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:01,463] INFO [Log partition=__transaction_state-41, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:01,463] INFO [Log partition=__transaction_state-41, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:01,464] INFO Created log for partition __transaction_state-41 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:01,464] INFO [Partition __transaction_state-41 broker=0] No checkpointed highwatermark is found for partition __transaction_state-41 (kafka.cluster.Partition)
[2023-03-01 20:06:01,464] INFO Replica loaded for partition __transaction_state-41 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:01,464] INFO [Partition __transaction_state-41 broker=0] __transaction_state-41 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:01,529] INFO [Log partition=__transaction_state-3, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:01,529] INFO [Log partition=__transaction_state-3, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:01,530] INFO Created log for partition __transaction_state-3 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:01,530] INFO [Partition __transaction_state-3 broker=0] No checkpointed highwatermark is found for partition __transaction_state-3 (kafka.cluster.Partition)
[2023-03-01 20:06:01,530] INFO Replica loaded for partition __transaction_state-3 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:01,530] INFO [Partition __transaction_state-3 broker=0] __transaction_state-3 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:01,604] INFO [Log partition=__transaction_state-22, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:01,604] INFO [Log partition=__transaction_state-22, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:01,604] INFO Created log for partition __transaction_state-22 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:01,605] INFO [Partition __transaction_state-22 broker=0] No checkpointed highwatermark is found for partition __transaction_state-22 (kafka.cluster.Partition)
[2023-03-01 20:06:01,605] INFO Replica loaded for partition __transaction_state-22 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:01,605] INFO [Partition __transaction_state-22 broker=0] __transaction_state-22 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:01,670] INFO [Log partition=__transaction_state-0, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:01,671] INFO [Log partition=__transaction_state-0, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:01,671] INFO Created log for partition __transaction_state-0 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:01,671] INFO [Partition __transaction_state-0 broker=0] No checkpointed highwatermark is found for partition __transaction_state-0 (kafka.cluster.Partition)
[2023-03-01 20:06:01,671] INFO Replica loaded for partition __transaction_state-0 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:01,671] INFO [Partition __transaction_state-0 broker=0] __transaction_state-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:01,737] INFO [Log partition=__transaction_state-38, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:01,737] INFO [Log partition=__transaction_state-38, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:01,737] INFO Created log for partition __transaction_state-38 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:01,738] INFO [Partition __transaction_state-38 broker=0] No checkpointed highwatermark is found for partition __transaction_state-38 (kafka.cluster.Partition)
[2023-03-01 20:06:01,738] INFO Replica loaded for partition __transaction_state-38 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:01,738] INFO [Partition __transaction_state-38 broker=0] __transaction_state-38 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:01,812] INFO [Log partition=__transaction_state-19, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:01,812] INFO [Log partition=__transaction_state-19, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:01,813] INFO Created log for partition __transaction_state-19 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:01,813] INFO [Partition __transaction_state-19 broker=0] No checkpointed highwatermark is found for partition __transaction_state-19 (kafka.cluster.Partition)
[2023-03-01 20:06:01,813] INFO Replica loaded for partition __transaction_state-19 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:01,813] INFO [Partition __transaction_state-19 broker=0] __transaction_state-19 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:01,887] INFO [Log partition=__transaction_state-16, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:01,887] INFO [Log partition=__transaction_state-16, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:01,887] INFO Created log for partition __transaction_state-16 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:01,888] INFO [Partition __transaction_state-16 broker=0] No checkpointed highwatermark is found for partition __transaction_state-16 (kafka.cluster.Partition)
[2023-03-01 20:06:01,888] INFO Replica loaded for partition __transaction_state-16 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:01,888] INFO [Partition __transaction_state-16 broker=0] __transaction_state-16 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:01,953] INFO [Log partition=__transaction_state-35, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:01,953] INFO [Log partition=__transaction_state-35, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:01,953] INFO Created log for partition __transaction_state-35 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:01,954] INFO [Partition __transaction_state-35 broker=0] No checkpointed highwatermark is found for partition __transaction_state-35 (kafka.cluster.Partition)
[2023-03-01 20:06:01,954] INFO Replica loaded for partition __transaction_state-35 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:01,954] INFO [Partition __transaction_state-35 broker=0] __transaction_state-35 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,027] INFO [Log partition=__transaction_state-13, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:02,028] INFO [Log partition=__transaction_state-13, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:06:02,028] INFO Created log for partition __transaction_state-13 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:02,028] INFO [Partition __transaction_state-13 broker=0] No checkpointed highwatermark is found for partition __transaction_state-13 (kafka.cluster.Partition)
[2023-03-01 20:06:02,028] INFO Replica loaded for partition __transaction_state-13 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:02,028] INFO [Partition __transaction_state-13 broker=0] __transaction_state-13 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,094] INFO [Log partition=__transaction_state-32, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:02,095] INFO [Log partition=__transaction_state-32, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:06:02,095] INFO Created log for partition __transaction_state-32 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:02,095] INFO [Partition __transaction_state-32 broker=0] No checkpointed highwatermark is found for partition __transaction_state-32 (kafka.cluster.Partition)
[2023-03-01 20:06:02,095] INFO Replica loaded for partition __transaction_state-32 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:02,095] INFO [Partition __transaction_state-32 broker=0] __transaction_state-32 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,161] INFO [Log partition=__transaction_state-48, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:02,161] INFO [Log partition=__transaction_state-48, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:02,161] INFO Created log for partition __transaction_state-48 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:02,162] INFO [Partition __transaction_state-48 broker=0] No checkpointed highwatermark is found for partition __transaction_state-48 (kafka.cluster.Partition)
[2023-03-01 20:06:02,162] INFO Replica loaded for partition __transaction_state-48 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:02,162] INFO [Partition __transaction_state-48 broker=0] __transaction_state-48 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,226] INFO [Log partition=__transaction_state-29, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:02,226] INFO [Log partition=__transaction_state-29, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:02,226] INFO Created log for partition __transaction_state-29 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:02,227] INFO [Partition __transaction_state-29 broker=0] No checkpointed highwatermark is found for partition __transaction_state-29 (kafka.cluster.Partition)
[2023-03-01 20:06:02,227] INFO Replica loaded for partition __transaction_state-29 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:02,227] INFO [Partition __transaction_state-29 broker=0] __transaction_state-29 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,313] INFO [Log partition=__transaction_state-10, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:02,313] INFO [Log partition=__transaction_state-10, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:02,314] INFO Created log for partition __transaction_state-10 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:02,314] INFO [Partition __transaction_state-10 broker=0] No checkpointed highwatermark is found for partition __transaction_state-10 (kafka.cluster.Partition)
[2023-03-01 20:06:02,314] INFO Replica loaded for partition __transaction_state-10 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:02,314] INFO [Partition __transaction_state-10 broker=0] __transaction_state-10 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,380] INFO [Log partition=__transaction_state-7, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:02,380] INFO [Log partition=__transaction_state-7, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:02,380] INFO Created log for partition __transaction_state-7 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:02,381] INFO [Partition __transaction_state-7 broker=0] No checkpointed highwatermark is found for partition __transaction_state-7 (kafka.cluster.Partition)
[2023-03-01 20:06:02,381] INFO Replica loaded for partition __transaction_state-7 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:02,381] INFO [Partition __transaction_state-7 broker=0] __transaction_state-7 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,445] INFO [Log partition=__transaction_state-26, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:02,446] INFO [Log partition=__transaction_state-26, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:06:02,446] INFO Created log for partition __transaction_state-26 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:02,446] INFO [Partition __transaction_state-26 broker=0] No checkpointed highwatermark is found for partition __transaction_state-26 (kafka.cluster.Partition)
[2023-03-01 20:06:02,446] INFO Replica loaded for partition __transaction_state-26 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:02,446] INFO [Partition __transaction_state-26 broker=0] __transaction_state-26 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,522] INFO [Log partition=__transaction_state-45, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:02,523] INFO [Log partition=__transaction_state-45, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 6 ms (kafka.log.Log)
[2023-03-01 20:06:02,527] INFO Created log for partition __transaction_state-45 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:02,528] INFO [Partition __transaction_state-45 broker=0] No checkpointed highwatermark is found for partition __transaction_state-45 (kafka.cluster.Partition)
[2023-03-01 20:06:02,528] INFO Replica loaded for partition __transaction_state-45 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:02,528] INFO [Partition __transaction_state-45 broker=0] __transaction_state-45 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,612] INFO [Log partition=__transaction_state-23, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:02,612] INFO [Log partition=__transaction_state-23, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:02,613] INFO Created log for partition __transaction_state-23 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:02,613] INFO [Partition __transaction_state-23 broker=0] No checkpointed highwatermark is found for partition __transaction_state-23 (kafka.cluster.Partition)
[2023-03-01 20:06:02,613] INFO Replica loaded for partition __transaction_state-23 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:02,613] INFO [Partition __transaction_state-23 broker=0] __transaction_state-23 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,695] INFO [Log partition=__transaction_state-42, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:02,696] INFO [Log partition=__transaction_state-42, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:06:02,696] INFO Created log for partition __transaction_state-42 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:02,696] INFO [Partition __transaction_state-42 broker=0] No checkpointed highwatermark is found for partition __transaction_state-42 (kafka.cluster.Partition)
[2023-03-01 20:06:02,696] INFO Replica loaded for partition __transaction_state-42 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:02,696] INFO [Partition __transaction_state-42 broker=0] __transaction_state-42 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,770] INFO [Log partition=__transaction_state-4, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:02,770] INFO [Log partition=__transaction_state-4, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:02,771] INFO Created log for partition __transaction_state-4 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:02,771] INFO [Partition __transaction_state-4 broker=0] No checkpointed highwatermark is found for partition __transaction_state-4 (kafka.cluster.Partition)
[2023-03-01 20:06:02,771] INFO Replica loaded for partition __transaction_state-4 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:02,771] INFO [Partition __transaction_state-4 broker=0] __transaction_state-4 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,828] INFO [Log partition=__transaction_state-39, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:02,829] INFO [Log partition=__transaction_state-39, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:06:02,829] INFO Created log for partition __transaction_state-39 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:02,829] INFO [Partition __transaction_state-39 broker=0] No checkpointed highwatermark is found for partition __transaction_state-39 (kafka.cluster.Partition)
[2023-03-01 20:06:02,829] INFO Replica loaded for partition __transaction_state-39 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:02,829] INFO [Partition __transaction_state-39 broker=0] __transaction_state-39 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,887] INFO [Log partition=__transaction_state-1, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:02,887] INFO [Log partition=__transaction_state-1, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:02,887] INFO Created log for partition __transaction_state-1 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:02,888] INFO [Partition __transaction_state-1 broker=0] No checkpointed highwatermark is found for partition __transaction_state-1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,888] INFO Replica loaded for partition __transaction_state-1 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:02,888] INFO [Partition __transaction_state-1 broker=0] __transaction_state-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:02,953] INFO [Log partition=__transaction_state-20, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:02,953] INFO [Log partition=__transaction_state-20, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:02,954] INFO Created log for partition __transaction_state-20 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:02,954] INFO [Partition __transaction_state-20 broker=0] No checkpointed highwatermark is found for partition __transaction_state-20 (kafka.cluster.Partition)
[2023-03-01 20:06:02,954] INFO Replica loaded for partition __transaction_state-20 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:02,954] INFO [Partition __transaction_state-20 broker=0] __transaction_state-20 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:03,019] INFO [Log partition=__transaction_state-17, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:03,019] INFO [Log partition=__transaction_state-17, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:03,020] INFO Created log for partition __transaction_state-17 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:03,020] INFO [Partition __transaction_state-17 broker=0] No checkpointed highwatermark is found for partition __transaction_state-17 (kafka.cluster.Partition)
[2023-03-01 20:06:03,020] INFO Replica loaded for partition __transaction_state-17 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:03,020] INFO [Partition __transaction_state-17 broker=0] __transaction_state-17 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:03,077] INFO [Log partition=__transaction_state-36, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:03,077] INFO [Log partition=__transaction_state-36, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:03,077] INFO Created log for partition __transaction_state-36 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:03,078] INFO [Partition __transaction_state-36 broker=0] No checkpointed highwatermark is found for partition __transaction_state-36 (kafka.cluster.Partition)
[2023-03-01 20:06:03,078] INFO Replica loaded for partition __transaction_state-36 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:03,078] INFO [Partition __transaction_state-36 broker=0] __transaction_state-36 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:03,144] INFO [Log partition=__transaction_state-33, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:03,145] INFO [Log partition=__transaction_state-33, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:06:03,145] INFO Created log for partition __transaction_state-33 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:03,145] INFO [Partition __transaction_state-33 broker=0] No checkpointed highwatermark is found for partition __transaction_state-33 (kafka.cluster.Partition)
[2023-03-01 20:06:03,145] INFO Replica loaded for partition __transaction_state-33 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:03,145] INFO [Partition __transaction_state-33 broker=0] __transaction_state-33 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:03,202] INFO [Log partition=__transaction_state-14, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:03,203] INFO [Log partition=__transaction_state-14, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 2 ms (kafka.log.Log)
[2023-03-01 20:06:03,203] INFO Created log for partition __transaction_state-14 in F:\tmp\kafka-logs with properties {compression.type -> uncompressed, message.format.version -> 1.1-IV0, file.delete.delay.ms -> 60000, max.message.bytes -> 1000012, min.compaction.lag.ms -> 0, message.timestamp.type -> CreateTime, min.insync.replicas -> 1, segment.jitter.ms -> 0, preallocate -> false, min.cleanable.dirty.ratio -> 0.5, index.interval.bytes -> 4096, unclean.leader.election.enable -> false, retention.bytes -> -1, delete.retention.ms -> 86400000, cleanup.policy -> compact, flush.ms -> 9223372036854775807, segment.ms -> 604800000, segment.bytes -> 104857600, retention.ms -> 604800000, message.timestamp.difference.max.ms -> 9223372036854775807, segment.index.bytes -> 10485760, flush.messages -> 9223372036854775807}. (kafka.log.LogManager)
[2023-03-01 20:06:03,203] INFO [Partition __transaction_state-14 broker=0] No checkpointed highwatermark is found for partition __transaction_state-14 (kafka.cluster.Partition)
[2023-03-01 20:06:03,203] INFO Replica loaded for partition __transaction_state-14 with initial high watermark 0 (kafka.cluster.Replica)
[2023-03-01 20:06:03,203] INFO [Partition __transaction_state-14 broker=0] __transaction_state-14 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition)
[2023-03-01 20:06:03,261] INFO [Log partition=__transaction_state-30, dir=F:\tmp\kafka-logs] Loading producer state from offset 0 with message format version 2 (kafka.log.Log)
[2023-03-01 20:06:03,261] INFO [Log partition=__transaction_state-30, dir=F:\tmp\kafka-logs] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 1 ms (kafka.log.Log)
[2023-03-01 20:06:03,261] INFO Created log for partition __transaction_state-30 in F:\tmp\kafka-logs with properties {co