Storm计算模型——java(模拟实现经典案例)

一、storm计算模型

1、Topology – DAG有向无环图的实现

  • 对于Storm实时计算逻辑的封装,即,由一系列通过数据流相互关联的Spout、Bolt所组成的拓扑结构
    在这里插入图片描述
  • 生命周期:此拓扑只要启动就会一直在集群中运行,直到手动将其kill,否则不会终止(区别于MapReduce当中的Job,MR当中的Job在计算执行完成就会终止)

2、Tuple – 元组

  • Stream中最小数据组成单元

3、Stream – 数据流

  • 从Spout中源源不断传递数据给Bolt、以及上一个Bolt传递数据给下一个Bolt,所形成的这些数据通道即叫做Stream,
  • Stream声明时需给其指定一个Id(默认为Default)
    实际开发场景中,多使用单一数据流,此时不需要单独指定StreamId

4、Spout – 数据源

  • 拓扑中数据流的来源。一般会从指定外部的数据源读取元组(Tuple)发送到拓扑(Topology)中
  • 一个Spout可以发送多个数据流(Stream)
            可先通过OutputFieldsDeclarer中的declare方法声明定义的不同数据流,发送数据时通过SpoutOutputCollector中的emit方法指定数据流Id(streamId)参数将数据发送出去;
            Spout中最核心的方法是nextTuple,该方法会被Storm线程不断调用、主动从数据源拉取数据,再通过emit方法将数据生成元组(Tuple)发送给之后的Bolt计算。

5、Bolt – 数据流处理组件

  • 拓扑中数据处理均有Bolt完成。对于简单的任务或者数据流转换,单个Bolt可以简单实现;更加复杂场景往往需要多个Bolt分多个步骤完成
  • 一个Bolt可以发送多个数据流(Stream)
            可先通过OutputFieldsDeclarer中的declare方法声明定义的不同数据流,发送数据时通过SpoutOutputCollector中的emit方法指定数据流Id(streamId)参数将数据发送出去;
            Bolt中最核心的方法是execute方法,该方法负责接收到一个元组(Tuple)数据、真正实现核心的业务逻辑。

二、java代码实现wordcount案例

1、主程序MainClass类

package storm.wordcount;

import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.topology.TopologyBuilder;
import backtype.storm.tuple.Fields;

public class MainClass {
	
	public static void main(String[] args) throws InterruptedException {
		
		//拓扑构造器
		TopologyBuilder builder = new TopologyBuilder();
		
		//给拓扑设置水龙头
		builder.setSpout("sentence_spout", new SentenceSpout());
		//给拓扑设置闪电
		//以随机分组的形式接收sentence_spout发送过来的元组
		builder.setBolt("split_bolt", new SplitBolt(), 10)
			.shuffleGrouping("sentence_spout").setNumTasks(20);
		// 相同word值的元组分发给同一个bolt来处理,将word的值hash取模搞定
		builder.setBolt("count_bolt", new CountBolt())
			.fieldsGrouping("split_bolt", new Fields("word"));
		builder.setBolt("report_bolt", new ReportBolt())
			.globalGrouping("count_bolt");
		
		
		// 本地集群,模拟storm集群
		LocalCluster cluster = new LocalCluster();
		// 用于配置信息
		Config config = new Config();
		//启用debug模式
		config.setDebug(true);
		
		//向本地集群提交拓扑
		cluster.submitTopology("wordcount", config, builder.createTopology());
		
		Thread.sleep(5000);
		//关闭本地集群
		cluster.shutdown();
		
	}
	
}

2、SentenceSpout类(给拓扑设置水龙头)

package storm.wordcount;

import java.util.Map;

import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;

public class SentenceSpout extends BaseRichSpout {
	
	private SpoutOutputCollector collector;
	
	private String[] sentences = {
			"The logic for a realtime application is packaged into a Storm topology",
			"The stream is the core abstraction in Storm",
			"A spout is a source of streams in a topology",
			"All processing in topologies is done in bolts",
			"for each bolt which streams it should receive as input",
			"every spout tuple will be fully processed by the topology",
			"Each spout or bolt executes as many tasks across the cluster",
			"Topologies execute across one or more worker processes"
	};
	
	private int index = 0;
	
	@Override
	public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
		// TODO Auto-generated method stub
		//当集群初始化该水龙头之后,调用该方法
		this.collector = collector;
	}

	@Override
	public void nextTuple() {
		//每次调用该方法,就从数据源读取一个句子,然后发射出去
		
		//发送当前元组
//		this.collector.emit(tuple);
		//发射指定的元组,同时给该元组设置一个消息ID,当该元组在下个bolt中处理失败,则该spout重发
//		this.collector.emit(tuple, messageId);
		// 可以指定使用哪个流来发射该元组,streamId就是在declareOutputFields声明的流名称
//		this.collector.emit(streamId, tuple);
//		this.collector.emit(streamId, tuple, messageId);
		//将当前元祖发送给哪个任务,可以通过taskId来指定,直接分组
//		this.collector.emitDirect(taskId, tuple);
		//还可以保证消息处理的完整性 messageId
//		this.collector.emitDirect(taskId, tuple, messageId);
		//指定流来发送当前元组,直接分组策略
//		this.collector.emitDirect(taskId, streamId, tuple);
		//可以保证消息完整性 messageId
//		this.collector.emitDirect(taskId, streamId, tuple, messageId);
		
		this.collector.emit(new Values(sentences[index % sentences.length]));
		index++;
		
		try {
			// 模拟流的均匀流进来
			Thread.sleep(10);
		} catch (InterruptedException e) {
			e.printStackTrace();
		}
	}

	@Override
	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		//声明一个流,使用的是默认流Default,声明元组的结构
//		declarer.declare(fields);
		// 声明一个流,可以声明为直接流,同时声明元组的结构,使用的是默认的流default
//		declarer.declare(direct, fields);
		//声明一个流,可以设置流的名称,声明元组的结构
//		declarer.declareStream(streamId, fields);
		// 声明一个流,可以声明为直接流,同时声明元组的结构,还可以设置流的名称
//		declarer.declareStream(streamId, direct, fields);
		// 声明元组的结构,字段名称为sentence,下个闪电bolt可以通过该名称获取元组的内容
		declarer.declare(new Fields("sentence"));
		
	}

}

3、随机分组SplitBolt类(以随机分组的形式接收sentence_spout发送过来的元组)

package storm.wordcount;

import java.util.Map;

import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;

public class SplitBolt extends BaseRichBolt {
	
	private OutputCollector collector;
	
	@Override
	public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
		// 该方法当work初始化好该闪电之后调用
		// 获取collector用于发射元组
		this.collector = collector;
	}

	@Override
	public void execute(Tuple input) {
		//获取上游发送过来的元组,从该元组中根据字段名称获取值
		String sentence = input.getStringByField("sentence");
		//主要的业务逻辑,拆词
		String[] words = sentence.split(" ");
		
		for (String word : words) {
			//发送二元组
			this.collector.emit(new Values(word, 1));
		}
		
	}

	@Override
	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		//声明二元组,第一个是单词,第二个是单词的计数,此处都是1
		declarer.declare(new Fields("word", "count"));
	}

}

4、CountBolt类(相同word值的元组分发给同一个bolt来处理)

package storm.wordcount;

import java.util.HashMap;
import java.util.Map;

import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;

public class CountBolt extends BaseRichBolt {
	
	private OutputCollector collector;
	private Map<String, Long> counts;
	
	@Override
	public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
		// TODO Auto-generated method stub
		this.collector = collector;
		counts = new HashMap<>();
	}

	@Override
	public void execute(Tuple input) {
		
		String word = input.getStringByField("word");
		Integer count = input.getIntegerByField("count");
		
		if (counts.get(word) == null) {
			counts.put(word, 0L);
		}
		
		Long sum = counts.get(word) + count;
		
		counts.put(word, sum);
		
		this.collector.emit(new Values(word, sum));
	}

	@Override
	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		declarer.declare(new Fields("word", "sum"));
	}
}

5、ReportBolt(类)获取输出结果

package storm.wordcount;

import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;

import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Tuple;

public class ReportBolt extends BaseRichBolt {
	
	private Map<String, Long> counts;
	
	@Override
	public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
		// TODO Auto-generated method stub
		counts = new HashMap<>();
	}

	@Override
	public void execute(Tuple input) {
		// TODO Auto-generated method stub
		String word = input.getStringByField("word");
		Long sum = input.getLongByField("sum");
		
		counts.put(word, sum);
	}

	@Override
	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		// TODO Auto-generated method stub
		
	}
	
	@Override
	public void cleanup() {
		System.err.println("============================================");
		for (Entry<String, Long> entry : counts.entrySet()) {
			System.err.println(entry.getKey() + "  ---  " + entry.getValue());
		}
		System.err.println("============================================");
	}
}

6、jar包

猜你喜欢

转载自blog.csdn.net/weixin_42312342/article/details/89386023