本文为spark版的WordCount(java),主要实现了三个功能:
1.对单词出现的频数进行统计
2.对输出结果中单词的出现的频数从大到小进行了排序
3.去除了输出文件中的括号
具体代码如下:
package com.cxd.core;
import java.util.Arrays;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;
public class WordCountLocal {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("WordCountLocal").setMaster("local");
SparkSession spark = SparkSession.builder().config(conf).getOrCreate();
//wordCount核心代码
JavaRDD<String> input = spark.read().textFile("words.txt").javaRDD();
JavaRDD<String> words = input.flatMap(line -> Arrays.asList(line.split(",")).iterator());
JavaPairRDD<String, Integer> pair = words.mapToPair(word -> new Tuple2<String, Integer>(word, 1));
JavaPairRDD<String, Integer> res = pair.reduceByKey((v1, v2) -> (v1 + v2));
//res.saveAsTextFile("wc.out"); 若此时输出,则没有对单词频数进行排序,且有括号。
//将单词出现的次数由高到低输出
JavaPairRDD<Integer, String> resRDD = res.mapToPair(line -> new Tuple2<Integer, String>(line._2, line._1));
JavaPairRDD<Integer, String> tmp = resRDD.sortByKey(false);
JavaPairRDD<String, Integer> out = tmp.mapToPair(line -> new Tuple2<String, Integer>(line._2, line._1));
//去掉文件中的括号
JavaRDD<String> res1 = out.map(line -> {
String word = line._1;
int value = line._2;
return word + "," + value;
});
res1.saveAsTextFile("wc.res");
res.foreach(r -> System.out.println(r));
}
}