storm处理来自redis当中的数据,将处理完的结果保存到hbase当中 将hdfs上的多个文件内容定量缓存到redis

//主方法

import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.topology.TopologyBuilder;
import backtype.storm.tuple.Fields;
import backtype.storm.utils.Utils;


public class MyMain {
    public static void main(String[] args) {
        TopologyBuilder topologyBuilder = new TopologyBuilder();
        topologyBuilder.setSpout("mySpout",new MySpout());
        topologyBuilder.setBolt("myBolt1",new MyBolt1()).shuffleGrouping("mySpout");
        topologyBuilder.setBolt("myBolt2",new MyBolt2(),3).fieldsGrouping("myBolt1",new Fields("one"));
        Config config = new Config();
        config.setDebug(false);
        LocalCluster localCluster = new LocalCluster();
        localCluster.submitTopology("my",config, topologyBuilder.createTopology());
        Utils.sleep(1000);
        //localCluster.shutdown();
    }
}

//spout

import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.IRichSpout;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;
import com.utils.JedisUtil;
import redis.clients.jedis.Jedis;

import java.util.Map;
import java.util.UUID;

public class MySpout implements IRichSpout {
    private static final long serialVersionUID = 1L;
    private SpoutOutputCollector spoutOutputCollector;
    private Jedis jedis = JedisUtil.newJedis();


    @Override
    public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
        this.spoutOutputCollector=spoutOutputCollector;
    }

    @Override
    public void close() {

    }

    @Override
    public void activate() {

    }

    @Override
    public void deactivate() {

    }

    @Override
    public void nextTuple() {
        String redisLine = jedis.rpop("waste");
        if (!redisLine.isEmpty()){
            spoutOutputCollector.emit(new Values(redisLine),UUID.randomUUID());
        }
    }

    @Override
    public void ack(Object o) {

    }

    @Override
    public void fail(Object o) {

    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
        outputFieldsDeclarer.declare(new Fields("redisLine"));
    }

    @Override
    public Map<String, Object> getComponentConfiguration() {
        return null;
    }
}


//第一个bolt(用到了反序列化和一个javaBean,具体方法在另一篇文章->

将hdfs上的多个文件内容定量缓存到redis

)

import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.IRichBolt;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;
import com.bean.WasteBean;
import com.utils.SerializeUtil;

import java.util.Map;

public class MyBolt1 implements IRichBolt {
    private static final long serialVersionUID = 1L;
    private OutputCollector outputCollector;

    @Override
    public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
        this.outputCollector = outputCollector;
    }

    @Override
    public void execute(Tuple tuple) {
        WasteBean wasteBean = (WasteBean) SerializeUtil.unserialize(tuple.getValueByField("redisLine").toString().getBytes());
        String sc_id_name = wasteBean.getSc_id()+"_"+wasteBean.getSc_name();
        String o_money = wasteBean.getO_money();
        outputCollector.emit(new Values(sc_id_name+"\t"+o_money));
    }

    @Override
    public void cleanup() {

    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
        outputFieldsDeclarer.declare(new Fields("one"));
    }

    @Override
    public Map<String, Object> getComponentConfiguration() {
        return null;
    }
}


//第二个bolt

import backtype.storm.Config;
import backtype.storm.Constants;
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.IRichBolt;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.tuple.Tuple;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.util.Bytes;

import java.io.IOException;
import java.util.*;

public class MyBolt2 implements IRichBolt {
    private static final long serialVersionUID = 1L;
    private OutputCollector outputCollector;
    Map<String,Integer> map;
    private static Configuration configuration =HBaseConfiguration.create();
    @Override
    public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
        this.outputCollector = outputCollector;
        this.map = new HashMap<>();
    }

    @Override
    public void execute(Tuple tuple) {
        if(tuple.getSourceComponent().equals(Constants.SYSTEM_COMPONENT_ID)
                && tuple.getSourceStreamId().equals(Constants.SYSTEM_TICK_STREAM_ID)){
            try {
                toHbase(map);//每6秒向hbase存入一次数据,也可以在getComponentConfiguration()方法中自行设置
            } catch (IOException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }else {
            String[] strings = tuple.getValueByField("one").toString().split("\t");
            String sc_id_name = strings[0];
            int o_money = Integer.parseInt(strings[1]);
            if (map.get(sc_id_name)!= null){
                map.put(sc_id_name,map.get(sc_id_name)+o_money);
            }else {
                map.put(sc_id_name,o_money);
            }
        }
    }

    @Override
    public void cleanup() {

    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {

    }

    @Override
    public Map<String, Object> getComponentConfiguration() {
        Config config = new Config();
        config.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS ,6);
        return config;

    }
    public static void toHbase(Map<String,Integer> map) throws IOException {
        HTable table = new HTable(configuration,"waste" );
        Set<String> set = map.keySet();
        List<Put> putList = new ArrayList<>();
        for (String s : set) {
            String[] strings = s.split("_");
            String sc_id = strings[0];
            String sc_name = strings[1];
            Put put = new Put(Bytes.toBytes(sc_id));//rowkey为收货员id
            put.add(Bytes.toBytes("info"),Bytes.toBytes("name"),sc_name.getBytes());
            put.add(Bytes.toBytes("info"),Bytes.toBytes("money"),map.get(s).toString().getBytes());
            putList.add(put);
        }
        table.put(putList);
        table.close();
    }
}




猜你喜欢

转载自blog.csdn.net/csdn_hzx/article/details/80851842