fume+kafka+storm+hbase的集成代码

1.启动hbase(前提启动了zookeeper和hdfs)

查看进程:

进入hbaseshell终端:

查看hbase状态:

查看表列表:

查询new_music_table数据:

scan 'new_music_table'

2.启动kafka:

3.编写storm代码:

stormKafka.java:
package stormHbase;

import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.StormSubmitter;
import backtype.storm.spout.SchemeAsMultiScheme;
import backtype.storm.topology.TopologyBuilder;
import storm.kafka.BrokerHosts;
import storm.kafka.KafkaSpout;
import storm.kafka.SpoutConfig;
import storm.kafka.StringScheme;
import storm.kafka.ZkHosts;

public class stormKafka {
    public static void main(String[] args) throws Exception {

        String topic = "badou_storm_kafka_test";
        String zkRoot = "/badou_storm_kafka_test";
        String spoutId = "kafkaSpout";

        BrokerHosts brokerHosts = new ZkHosts("master:2181");
        SpoutConfig kafkaConf = new SpoutConfig(brokerHosts, topic, zkRoot, spoutId);
        kafkaConf.forceFromStart = true;
        kafkaConf.scheme = new SchemeAsMultiScheme(new StringScheme());

        KafkaSpout kafkaSpout = new KafkaSpout(kafkaConf);

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("spout", kafkaSpout, 2);
        builder.setBolt("printer", new PrinterBolt()).shuffleGrouping("spout");

        Config config = new Config();
        config.setDebug(false);

        if (args != null && args.length > 0) {
            config.setNumWorkers(3);

            StormSubmitter.submitTopology(args[0], config, builder.createTopology());
        } else {
            config.setMaxTaskParallelism(3);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("kafka", config, builder.createTopology());

            // Thread.sleep(10000);

            // cluster.shutdown();
        }
    }
}

PrinterBolt.java:
package stormHbase;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;

import backtype.storm.topology.BasicOutputCollector;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseBasicBolt;
import backtype.storm.tuple.Tuple;

public class PrinterBolt extends BaseBasicBolt {

    public static final String TableName = "new_music_table";
    //public static final String ColumnFamily = "rec_list";
    public static Configuration conf = HBaseConfiguration.create();
    private static HTable table;

    public static void selectRowKey(String tablename, String rowKey) throws IOException {
        System.out.println("*****************");
//        table = new HTable(conf, tablename);
        System.out.println("*****************");
        Get g = new Get(rowKey.getBytes());
        System.out.println("*****************");
        Result rs = table.get(g);
        System.out.println("*****************");

        System.out.println("==> " + new String(rs.getRow()));

        /*for (Cell kv : rs.rawCells()) {
            System.out.println("--------------------" + new String(kv.getRow()) + "----------------------------");
            System.out.println("Column Family: " + new String(kv.getFamily()));
            System.out.println("Column       :" + new String(kv.getQualifier()));
            System.out.println("value        : " + new String(kv.getValue()));
        }*/
    }

    @Override
    public void execute(Tuple tuple, BasicOutputCollector collector) {

        System.out.println(tuple.getString(0));
        conf.set("hbase.master", "192.168.87.10:60000");
        conf.set("hbase.zookeeper.quorum", "192.168.87.10,192.168.87.11,192.168.87.12");

        // TODO Auto-generated method stub
        try {
            System.out.println("[1]=============");
            selectRowKey(TableName, tuple.getString(0));
            System.out.println("[2]=============");
        } catch (Exception e) {
            // TODO Auto-generated catch block
            System.out.println("[3]=============");
            System.out.println(tuple);
            e.printStackTrace();
        }
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer ofd) {
    }

}

3.运行storm命令:

4.启动flume:

5.测试及结果:

猜你喜欢

转载自blog.csdn.net/qq_28286027/article/details/88681808