map-reduce之wordCount DEMO

版权声明:everything https://blog.csdn.net/wanbf123/article/details/82013419
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>hadoop</groupId>
    <artifactId>demo</artifactId>
    <version>0.0.1-SNAPSHOT</version>
    <packaging>jar</packaging>

    <properties>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>2.6.0</version>
        </dependency>

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>2.6.0</version>
        </dependency>

        <dependency>
            <groupId>jdk.tools</groupId>
            <artifactId>jdk.tools</artifactId>
            <version>1.6</version>
            <scope>system</scope>
            <systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
        </dependency>

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-client-core</artifactId>
            <version>2.6.0</version>
        </dependency>

        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>3.8.1</version>
            <scope>test</scope>
        </dependency>
    </dependencies>
</project>
package hadoop;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class WCMain {

    private static String iPath = "hdfs://localhost:9000/wordcount/input/test.txt";
    private static String oPath = "hdfs://localhost:9000/wordcount/output/";

    /**
     * 1. 业务逻辑相关信息通过job对象定义与实现 2. 将绑定好的job提交给集群去运行
     */
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        Job wcjob = Job.getInstance(conf);

        wcjob.setJarByClass(WCMain.class);
        wcjob.setMapperClass(WCMapper.class);
        wcjob.setReducerClass(WCReducer.class);

        // 设置业务逻辑Mapper类的输出key和value的数据类型
        wcjob.setMapOutputKeyClass(Text.class);
        wcjob.setMapOutputValueClass(IntWritable.class);

        // 设置业务逻辑Reducer类的输出key和value的数据类型
        wcjob.setOutputKeyClass(Text.class);
        wcjob.setOutputValueClass(IntWritable.class);

        // 指定要处理的数据所在的位置
        FileSystem fs = FileSystem.get(conf);
        Path IPath = new Path(iPath);
        if (fs.exists(IPath)) {
            FileInputFormat.addInputPath(wcjob, IPath);
        }

        // 指定处理完成之后的结果所保存的位置
        Path OPath = new Path(oPath);
        fs.delete(OPath, true);
        FileOutputFormat.setOutputPath(wcjob, OPath);

        // 向yarn集群提交这个job
        boolean res = wcjob.waitForCompletion(true);
        System.exit(res ? 0 : 1);
    }
}
package hadoop;

import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

/**
 * @ClassName: WCReducer
 * @Description: TODO
 * @author kngines
 * @date 2018年3月17日
 */

public class WCReducer extends Reducer<Text, IntWritable, Text, IntWritable> {

    //  生命周期:框架每传递进来一个kv 组,reduce方法被调用一次
    @Override
    protected void reduce(Text key, Iterable<IntWritable> values, Context context)
            throws IOException, InterruptedException {

        int count = 0;  // 定义一个计数器
        for (IntWritable value : values) { // 遍历所有v,并累加到count中
            count += value.get();
        }
        context.write(key, new IntWritable(count));
    }
}
package hadoop;

import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

/**
 * @ClassName: WCMapper
 * @Description: TODO
 * @author kngines
 * @date 2018年3月17日
 */

public class WCMapper extends Mapper<LongWritable, Text, Text, IntWritable> {

    // map方法的生命周期: 框架每传一行数据就被调用一次
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

        String line = value.toString();  // 行数据转换为string
        String[] words = line.split(" ");  // 行数据分隔单词

        for (String word : words) {  // 遍历数组,输出<单词,1>
            context.write(new Text(word), new IntWritable(1));
        }
    }
}
问题总结
问题 A :
FileAlreadyExistsException
FileAlreadyExistsException: Output directory hdfs://localhost:9000/wordcount/output already exists
解决
代码逻辑判断(java)
// 指定处理完成之后的结果所保存的位置
Path OPath = new Path(oPath);
fs.delete(OPath, true);
手动删除Hadoop 文件目录

问题 B:
SafeModeException
问题描述 & 原因分析 
该问题可能会使 Hadoop运行任务一直卡在: INFO mapreduce.Job: Runing job。
由空间磁盘剩余不足导致。实验时,虚拟机根目录剩余空间不足10%,将新安装的一些软件包删除后,重新运行问题得到解决。
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.namenode.SafeModeException): Cannot delete /benchmarks/TestDFSIO. Name node is in safe mode.  
Resources are low on NN. Please add or free up more resources then turn off safe mode manually. NOTE:  If you turn off safe mode before adding resources, the NN will immediately return to safe mode. Use "hdfs dfsadmin -safemode leave" to turn safe mode off.  
解决方式
离开安全模式
hdfs dfsadmin -safemode leave  
删除 LInux上 多余文件(实验中采取,简单有效), 或者 扩展虚拟机分区


其他知识
杀掉当前运行的 Hadoop 任务
hadoop job -list  # 列出当前运行所有 job

hadoop job -kill job_xx_xx  # 通过job_id 杀掉某个job任务
查找 Linux 系统上的 大文件
find . -type f -size +100M  # 查找100M以上的文件
df -hl  # 查看Linux 磁盘使用情况


 hadoop jar hadoop-mapreduce-examples-2.7.7.jar wordcount /user/root/input/ /user/root/out1/

猜你喜欢

转载自blog.csdn.net/wanbf123/article/details/82013419