Hbase common commands kafka group reset

#Group reset
kafka-consumer-groups.sh --bootstrap-server 192.168.64.210:9092 --group cm --reset-offsets --all-topics --to-earliest --execute

#进行行数据统计
#进入hbase shell
count 'events'
# 在数据库外
 $HBASE_HOME/bin/hbase org.apache.hadoop.hbase.mapreduce.RowCounter 'events'
Modify group id

cd /opt/soft/kafka200/config/
vim consumer.properties
Insert image description here

#View topic
 kafka-topics.sh --zookeeper 192.168.64.210:2181 --list
#Start zookeeper
zkServer.sh start
#Start kafka
start-all.sh

kafka-server-start.sh /opt/soft/kafka200/config/server.properties
#Start hbase
start-hbase.sh
#monitorkafka
kafka-console-consumer.sh --bootstrap-server 192.168.64.200:9092 --topic events
#Start mapreduce
hbase shell
count event

#行
hbase org.apache.hadoop.hbase.mapreduce.RowCounter 'userfriends'
#linuxcopy command
#复制
yy
#粘贴
p
#Exit hdoop safe mode
hadoop dfsadmin -safemode leave
#hbasedelete
disable 'eventAttendees'
drop 'eventAttendees'
#hbase Create table
create 'eventAttendees' 'base'
count 'eventAttendees'
#Configure zookeeper maximum timeout
vim zof.config
maxSessionTimeout=1800000
#IDEA code formatting shortcut keys
Ctrl+Alt+L
简直不要太好用!!!
#Read and write directly to hbase without a framework
public class Kth {

    static int i = 0;
    static Connection conn;

    static {
        try {
            Configuration cnf = HBaseConfiguration.create();
            cnf.set("hbase.zookeeper.quorum", "192.168.64.210:2181");
            conn = ConnectionFactory.createConnection(cnf);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    public static void main(String[] args) {
        Properties prop = new Properties();
        prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.64.210:9092");
        prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        prop.put(ConsumerConfig.GROUP_ID_CONFIG, "cm");
        prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(prop);
        consumer.subscribe(Arrays.asList(new String[]{"user_friends_raw"}));
        while (true) {

            ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(10));
            System.out.println("获取数据:" + records.count() + "===========" + (++i));
            List<Put> puts = new ArrayList<>();
            for (ConsumerRecord<String, String> record : records) {
                //调用hbase写入数据
                String[] lines = record.value().split(",", -1);
                if (lines.length > 1) {
                    String[] fids = lines[1].split(" ");
                    for (String fid : fids) {
                        Put put = new Put((lines[0] + "-" + fid).getBytes());
                        put.addColumn("base".getBytes(), "userid".getBytes(), lines[0].getBytes());
                        put.addColumn("base".getBytes(), "friendid".getBytes(), fid.getBytes());
                        puts.add(put);
                    }
                }
            }
//            //找到表
            try {
                Table htable = conn.getTable(TableName.valueOf("userfriends"));
                htable.put(puts);
            } catch (IOException e) {
                e.printStackTrace();
            }
            //清空一下list集合
            puts.clear();
//            consumer.commitAsync();
            consumer.commitSync();
        }
    }
}

Insert image description here

#Browser access hadoop
192.168.64.210:50070

Guess you like

Origin blog.csdn.net/just_learing/article/details/126320521