strom usage example

 

Strom program development requires several components

1 Topology is the main entry of the program, the configuration file should be loaded through the main function and put into conf, and then the bolt can get it. ./conf can go to the jar package to get the configuration file.

2 Data sources for the Sport program 

3 bolt program processing nodes, a program may have n bolt nodes. 

 

One  Topology  

 

import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.StormSubmitter;
import backtype.storm.topology.TopologyBuilder;

public class MyTopology1 {
public static void main(String[] args) {

	try{
		
		
        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("spout", new RandomWordSpout(), 1).setNumTasks(8);
        
        builder.setBolt("longBolt", new longBolt(),1).shuffleGrouping("spout");
        
        //Initialize the configuration file, save the configuration file to the global config
        ConfigFactory.init("./conf/config-kafka.xml");
        Config config = new Config();
        
        config.put("KafkaConfig", KafkaConfig.getProducerPro() ) ;
        config.put("redis.url", ConfigFactory.getString("redis.url") ) ;
  
        
        config.setDebug(true);
        
         
        LocalCluster localCluster = new LocalCluster();
		localCluster.submitTopology("MyTopology1", config, builder.createTopology());
        if(args!=null && args.length > 0) {
            config.setNumWorkers (2);
            
            StormSubmitter.submitTopology(args[0], config, builder.createTopology());
        } else {        
            config.setMaxTaskParallelism(3);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("MyTopology1", config, builder.createTopology());
        
            Thread.sleep(1000);

            //cluster.shutdown();
        }
	}catch (Exception e) {
		e.printStackTrace ();
	}

}
}

2. Spout data sources

  

import java.util.Map;
import java.util.Random;

import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;
import backtype.storm.utils.Utils;

public class RandomWordSpout extends BaseRichSpout{

	/**
	 *
	 */
	private static final long serialVersionUID = 1L;

	private SpoutOutputCollector collector;
	
	// simulate some data
	String[] words = {"iphone","xiaomi","mate","sony","sumsung","moto","meizu"};
	
	//Continuously send tuple messages to the next component
	//This is the core logic of the spout component
	@Override
	public void nextTuple() {

		//You can get the data from the kafka message queue. For simplicity, we randomly select a commodity name from the words array and send it out
		Random random = new Random();
		int index = random.nextInt(words.length);
		
		//Get a product name by random number
		String godName = words[index];
		
		
		//Encapsulate the commodity name into a tuple and send a message to the next component
		collector.emit(new Values(godName));
		
		//Every time a message is sent, sleep for 500ms,
		Utils.sleep(10000);
		
		
	}

	//Initialization method, called once when the spout component is instantiated
	@Override
	public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {

		this.collector = collector;
		
	}

	//Declare the field name of the data in the tuple sent by this spout component
	@Override
	public void declareOutputFields(OutputFieldsDeclarer declarer) {

		declarer.declare(new Fields("orignname"));
		
	}

}

 3. Bolt business processing code

 

import backtype.storm.task.TopologyContext;
import backtype.storm.topology.BasicOutputCollector;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseBasicBolt;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;

public class longBolt2 extends BaseBasicBolt {
	

	 
	private static final long serialVersionUID = -4561045897494106937L;

	@Override
	public void prepare(Map stormConf, TopologyContext context) {
		 
		
	}
	
	public void execute(Tuple tuple, BasicOutputCollector collector) {
 
	}

	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		declarer.declare(new Fields("word"));
	}

}

 

Start a storm program: 

 storm jar storm-kafka-0.0.2_wzt.jar   com.jusfoun.testStrom.MyTopology1 MyTopology1

Kill the previous program:

storm kill MyTopology1

 

 

 

Guess you like

Origin http://10.200.1.11:23101/article/api/json?id=327002025&siteId=291194637