Spark(四): spark第一个编程(JAVA)

一、问题描述

三个文件中分别存储了学生的语文、数学和英语成绩,输出每个学生的成绩及平均值。

数据格式如下: 
Chinese.txt

张三    78
李四    89
王五    96
赵六    67

Math.txt

张三    88
李四    99
王五    66
赵六    77

English.txt

张三    80
李四    82
王五    84
赵六    86

文件目录


二、Spark编程(JAVA)

pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <groupId>com.cl</groupId>
    <artifactId>mapreduce</artifactId>
    <version>1.0-SNAPSHOT</version>

    <url>http://maven.apache.org</url>
    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                </configuration>
            </plugin>
        </plugins>
    </build>

    <properties>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
        <spark.version>2.1.0</spark.version>
        <hadoop.version>2.8.3</hadoop.version>
    </properties>

    <dependencies>
        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>4.12</version>
        </dependency>
        <!-- hadoop 分布式文件系统类库 -->
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>${hadoop.version}</version>
        </dependency>
        <!-- hadoop 公共类库 -->
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>${hadoop.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-client-core</artifactId>
            <version>${hadoop.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
            <version>${hadoop.version}</version>
        </dependency>

        <!-- spark -->
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_2.11</artifactId>
            <version>${spark.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-sql_2.11</artifactId>
            <version>${spark.version}</version>
            <scope>compile</scope>
        </dependency>

        <dependency>
            <groupId>log4j</groupId>
            <artifactId>log4j</artifactId>
            <version>1.2.17</version>
        </dependency>
    </dependencies>
</project>

实现类

package com.cl.spark.avg;

import com.google.common.collect.Lists;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;

import java.util.*;

public class StudentAvgDouble {

    public static void main(String[] args) {
        args = new String[]{"input/avg/score/*.txt"};
        if (args.length < 1) {
            System.err.println("Usage: JavaWordCount <file>");
            System.exit(1);
        }
        //创建spark应用及模式
        SparkSession spark = SparkSession.builder().appName("StudentAvgDouble").master("local").getOrCreate();
        //读取到RRD
        JavaRDD<String> lines = spark.read().textFile(args[0]).javaRDD();
        //组成结果集<key,value>
        JavaPairRDD<String, Double> counts = lines.flatMapToPair(new PairFlatMapFunction<String, String, Double>() {
            @Override
            public Iterator<Tuple2<String, Double>> call(String s) {
                ArrayList<Tuple2<String, Double>> tpLists = new ArrayList<>();
                StringTokenizer tokenizer = new StringTokenizer(s.toString(), "\n");
                while (tokenizer.hasMoreElements()) {
                    StringTokenizer tokenizerLine = new StringTokenizer(tokenizer.nextToken());
                    String strName = tokenizerLine.nextToken();
                    String strScore = tokenizerLine.nextToken();
                    tpLists.add(new Tuple2<>(strName, Double.parseDouble(strScore)));
                }
                return tpLists.iterator();
            }
        });
        Map<String, Iterable<Double>> resultMap = counts.groupByKey().collectAsMap();
        //collect方法用于将spark的RDD类型转化为我们熟知的java常见类型
        for (String key : resultMap.keySet()) {
            DoubleSummaryStatistics score = Lists.newArrayList(resultMap.get(key)).stream().mapToDouble((x) -> x).summaryStatistics();
            System.out.println("(" + key + ", " + resultMap.get(key) + " avg:" + score.getAverage() + ")");
        }
        spark.stop();

    }
}

控制台结果


猜你喜欢

转载自blog.csdn.net/clypm/article/details/79608202
今日推荐