版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/qq1021979964/article/details/85056854
IDEA+Maven开发第一个Hadoop程序WordCount
1. 新建一个maven项目选择JDK版本。
2.设置GroupId和ArtifactId
3.设置项目名称
4.来到setting的javaCompiler更改为自己对应的JDK版本。
5.复制集群中hadoop-2.7.2.gz文件到D盘,并且在bin目录下添加hadoop.dll,winutils.exe,winutils.pdb文件
6.设置环境变量,并且在path中加入%HADOOP_HOME\bin%;
7.添加依赖
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.kevin.dt</groupId>
<artifactId>DTWorker</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<!--设置hadoop版本-->
<hadoop.version>2.7.2</hadoop.version>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-dependency-plugin</artifactId>
<configuration>
<excludeTransitive>false</excludeTransitive>
<stripVersion>true</stripVersion>
<outputDirectory>./lib</outputDirectory>
</configuration>
</plugin>
</plugins>
</build>
</project>
8.写日志可以看详细的运行信息或者异常
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
9.WordCount
package com.kevin.hadoop;
import java.io.IOException;
import java.net.URI;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* @author kevin
* @version 1.0
* @description 简单的WordCount示例,单词计数
* @createDate 2018/12/17
*/
public class WordCount {
public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration(); // 读取hadoop配置文件
Job job = Job.getInstance(conf, "word count"); // 新建一个Job类,传入配置信息
job.setJarByClass(WordCount.class); // 设置主类
job.setMapperClass(TokenizerMapper.class); // 设置map类
job.setCombinerClass(IntSumReducer.class); // 设置combiner类
job.setReducerClass(IntSumReducer.class); // 设置reduce类
job.setOutputKeyClass(Text.class); // 设置输出类型key
job.setOutputValueClass(IntWritable.class); // 设置输出类型value
FileInputFormat.addInputPath(job, new Path("hdfs://192.168.171.100:9000/test/input_01/")); // 设置输入文件
FileOutputFormat.setOutputPath(job, new Path("hdfs://192.168.171.100:9000/test/output_01")); // 设置输出文件
System.exit(job.waitForCompletion(true) ? 0 : 1); // 等待完成退出
}
}
10.输入源有两个文件
file01:
Hello World Bye World
Hello Hadoop Bye Hadoop
Bye Hadoop Hello Hadoop
yes me
good yes
file02:
Hello World Bye World
Hello Hadoop Bye Hadoop
Bye Hadoop Hello Hadoop
yes me
catch ese
11.结果