Win7下eclipse luna配置storm1.1.0开发环境并跑中文WordCount

storm的开发环境搭建比较简单,无需安装插件,只需新建一个java项目并配置好lib包引用即可。本地跑也无需先启动storm,直接Run As->Java Application完事。具体过程描述如下:

首先,你的电脑上要安装有eclipse,否则的话,你需要自己去下载一个,java开发的话,jdk也是必须的,安装配置jdk的方法也比较多,这里就不在多说了。这里列一下自己用到的软件版本:

1. JDK的版本jdk-8u77-windows-x64.exe;

2. Storm的版本apache-storm-1.1.1.tar.gz;

3. 分词系统IKAnalyzer2012_u6.jar

第一步,新建项目,在eclipse中,选择File->new->Java Project, 设置项目名称为WordCount,点击Finish。

第二步,右键点击项目,选择Build Path->Configure Build Path,在打开的页面中,选择Libraries->Add External JARS,将storm解压后的lib中jar和分词软件的jar添加进去。

第三步,在项目的src目录下,新建myext.dic和mystopword.dic,具体内容如下:

myext.dic

高育良
祁同伟
陈海
陈岩石
侯亮平
高小琴
沙瑞金
李达康
蔡成功
mystopword.dic

你
我
他
是
的
了
啊
说
也
和
在
就
第四步,将 IKAnalyzer.cfg.xml拷贝到当前项目的src下,做如下修改:

    <!--用户可以在这里配置自己的扩展字典 -->
    <entry key="ext_dict">myext.dic</entry>
    <!--用户可以在这里配置自己的扩展停止词字典 -->
    <entry key="ext_stopwords">mystopword.dic</entry>
第五步,新建class,命名为WordCount.class,内容如下:

package com.xh.storm.wordsplit;


import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.StringReader;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;


import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseBasicBolt;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;


public class WordSplit {


/**
* 发射数据源

* @author Administrator
*
*/
public static class WordReaderSpout extends BaseRichSpout {


SpoutOutputCollector _collector;
InputStreamReader isr;
boolean isEnd = false;


@Override
public void open(Map conf, TopologyContext context,
SpoutOutputCollector collector) {
String inputFile = "D://testdata//peoples.txt";
try {
isr = new InputStreamReader(new FileInputStream(inputFile));
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
_collector = collector;
}


@Override
public void nextTuple() {
// 读取文件一次就无需再读了
if (isEnd) {
System.out
.println("*******Spout is over, no neccessary to emit.*********");
return;
}


// 读本地文件,一行发射一次
String line = null;
try (BufferedReader br = new BufferedReader(isr)) {
while ((line = br.readLine()) != null) {
System.out.printf("line : %s", line);
_collector.emit(new Values(line));
}
} catch (IOException e) {
e.printStackTrace();
} finally {
isEnd = true; // 文件读完了
}


}


@Override
public void ack(Object id) {
}


@Override
public void fail(Object id) {
}


@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("word"));
}


}


/**
* 处理上面发射过来的数据源

* @author Administrator
*
*/
public static class SplitWordBolt extends BaseRichBolt {


private OutputCollector outputCollector;


@Override
public void prepare(Map stormConf, TopologyContext context,
OutputCollector collector) {
this.outputCollector = collector;
}


@Override
public void execute(Tuple tuple) {
String sentence = tuple.getString(0); // 一次处理一行
IKSegmenter ikSeg = new IKSegmenter(new StringReader(sentence),
true); // 智能分词
try {
for (Lexeme lexeme = ikSeg.next(); lexeme != null; lexeme = ikSeg
.next()) {
outputCollector.emit(new Values(lexeme.getLexemeText()));
}
} catch (IOException e) {
e.printStackTrace();
}
}


@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("word"));
}


}


/**
* 统计从上面取到的分词,关键人名统计后的放到result.txt

* @author Administrator
*
*/
public static class WordCountBolt extends BaseBasicBolt {
Map<String, Integer> counts = new HashMap<String, Integer>();
String out;
Set<String> keyName = new HashSet<>();


@Override
public void prepare(Map stormConf, TopologyContext context) {
out = "D://testdata//result.txt";


// 判断result文件是否已存在,是则先删掉,以待新建
File outFile = new File(out);
if (outFile.exists()) {
outFile.delete();
}


// 读字典文件并放入一个set,以备参照set里的人名读取统计结果,写入result.txt文件
try (BufferedReader br = new BufferedReader(new InputStreamReader(
WordSplit.class.getClassLoader().getResourceAsStream(
"myext.dic")))) {
String peopleName = null;
while ((peopleName = br.readLine()) != null) {
keyName.add(peopleName);
}
} catch (IOException e) {
e.printStackTrace();
}


}


@Override
public void execute(Tuple tuple, BasicOutputCollector collector) {
String word = tuple.getString(0); // 每次统计一个分词
Integer count = counts.get(word);
if (count == null)
count = 0;
count++;
counts.put(word, count);
collector.emit(new Values(word, count));
}


@Override
public void cleanup() {
// 最后时刻,输出关键人名的统计结果到result.txt文件
try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream(out)))) {
for (Map.Entry<String, Integer> keyWord : counts.entrySet()) {
if (keyName.contains(keyWord.getKey())) {
bw.write(keyWord.getKey() + " : " + keyWord.getValue()
+ "\r\n");
bw.flush();
}
}
} catch (IOException e) {
e.printStackTrace();
}
}


@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("word", "count"));
}
}


/**
* 输出分词结果到本地文件,过程数据放在tmp文件

* @author Administrator
*
*/
public static class SaveOutput extends BaseRichBolt {
String temp;


@Override
public void prepare(Map stormConf, TopologyContext context,
OutputCollector collector) {
temp = "D:/out/tmp" + System.currentTimeMillis();


// 判断tmp文件是否已存在,是则先删掉,以待新建
File tempFile = new File(temp);
if (tempFile.exists()) {
tempFile.delete();
}
}


@Override
public void execute(Tuple input) {
// 从上面获取分词的累计次数
String name = input.getStringByField("word");
Integer counts = input.getIntegerByField("count");


// 输出分词统计过程追加到tmp文件
try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream(temp, true)))) {
bw.write(name + " : " + counts + "\r\n");
bw.flush();
} catch (IOException e) {
e.printStackTrace();
}
}


@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
// TODO Auto-generated method stub


}
}


public static void main(String[] args) throws Exception {


TopologyBuilder builder = new TopologyBuilder(); // 新建一个拓扑


builder.setSpout("spout", new WordReaderSpout(), 1); // 设置数据源


// 读取spout里的数据,进行split处理
builder.setBolt("split", new SplitWordBolt(), 10).shuffleGrouping(
"spout");


// 读取split后的数据,进行count处理
builder.setBolt("count", new WordCountBolt(), 10).fieldsGrouping(
"split", new Fields("word"));


// 保存计算结果
builder.setBolt("save", new SaveOutput(), 10).allGrouping("count");


Config conf = new Config();
conf.setDebug(true);


conf.setMaxTaskParallelism(1);


// 有参数则到集群跑,没有则在本地跑
if (args != null && args.length > 0) {
conf.setNumWorkers(3);
StormSubmitter.submitTopology(args[0], conf,
builder.createTopology());
} else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("word-split", conf, builder.createTopology());
Thread.sleep(300000); // 5分钟后自动结束
cluster.shutdown();
}
}


}

然后,run as java application,等一会就可以看到结果了,result.txt文件内容如下:

侯亮平 : 1355
李达康 : 780
陈海 : 298
祁同伟 : 553
沙瑞金 : 353
陈岩石 : 173
高小琴 : 329
高育良 : 626
蔡成功 : 525

数据文件见我的资源http://download.csdn.net/download/yongge1981/10169399

参考:https://www.cnblogs.com/wuxun1997/p/6884378.html

猜你喜欢

转载自blog.csdn.net/yongge1981/article/details/78872774
今日推荐