Spark系列--SpringBoot整合(测试API)

其他网址

Springboot2.X 集成 spark2.X 实现WordCount_李虹柏的博客-CSDN博客
Spring Boot集成Spark_weixin_33924770的博客-CSDN博客_springboot集成spark
springboot集成spark,ETL demo_MJ的博客-CSDN博客

简单测试API

简介

本处采用接口的方式简单测下api的使用,源码地址:https://gitee.com/shapeless/demo_bigdata/tree/testAPI/

依赖及配置

application.yml

custom:
  spark:
    appName: appName_demo
    sparkHome: .
    # master 可指定为:
    #  1. Spark,Mesos 或 YARN 的 cluster URL。例:spark://127.0.0.1:7077
    #  2. 本地模式(local mode)。例如:local;local[4]
    master: local[4]

pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.3.7.RELEASE</version>
        <relativePath/> <!-- lookup parent from repository -->
    </parent>
    <groupId>com.example</groupId>
    <artifactId>demo</artifactId>
    <version>0.0.1-SNAPSHOT</version>
    <name>demo</name>
    <description>Demo project for Spring Boot</description>
    <properties>
        <java.version>1.8</java.version>
        <scala.version>2.12</scala.version>
        <spark.version>3.0.1</spark.version>
    </properties>
    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>

        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <optional>true</optional>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_${scala.version}</artifactId>
            <version>${spark.version}</version>
            <exclusions>
                <exclusion>
                    <groupId>org.slf4j</groupId>
                    <artifactId>slf4j-log4j12</artifactId>
                </exclusion>
            </exclusions>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-streaming_${scala.version}</artifactId>
            <version>${spark.version}</version>
            <scope>provided</scope>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-sql_${scala.version}</artifactId>
            <version>${spark.version}</version>
        </dependency>

    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-maven-plugin</artifactId>
                <configuration>
                    <excludes>
                        <exclude>
                            <groupId>org.projectlombok</groupId>
                            <artifactId>lombok</artifactId>
                        </exclude>
                    </excludes>
                </configuration>
            </plugin>
        </plugins>
    </build>

</project>

代码

普通API

新建文件:src/main/java/com/example/file/TestFile.txt,内容如下:

java python javascript java javascript java

接口 

package com.example.controller;

import com.example.WordCount;
import com.example.spark.CustomReceiver;
import com.google.common.base.Joiner;
import lombok.extern.slf4j.Slf4j;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import scala.Tuple2;

import java.util.*;

@RestController
@RequestMapping("rdd")
public class RDDController {
    @Autowired
    private transient JavaSparkContext javaSparkContext;

    private final String filePath = "src/main/java/com/example/file/TestFile.txt";

    @RequestMapping("wordCount1")
    public Object wordCount1() {
        // 获取本地文件 生成javaRDD
        JavaRDD<String> file = javaSparkContext
                .textFile(filePath);

        // 按空格分解为数组 生成新的javaRDD
        JavaRDD<String> words = file.flatMap(
                line -> Arrays.asList(line.split(" ")).iterator());

        // 统计每个词出现的次数 生成新的javaRDD
        JavaRDD<WordCount> wordcount = words.map(
                word -> new WordCount(word, 1));

        // 将词与数转换为 key-value形式
        JavaPairRDD<String, Integer> pair = wordcount.mapToPair(
                wordCount -> new Tuple2<>(wordCount.getWord(), wordCount.getCount())
        );

        // 根据key进行整合
        JavaPairRDD<String, Integer> wordcounts = pair.reduceByKey(
                (count1, count2) -> count1 + count2);

        // 将结果转换为 WordCount对象
        JavaRDD<WordCount> map = wordcounts.map(
                (tuple2) -> new WordCount(tuple2._1, tuple2._2));
        // 将结果转换为 list并返回
        return map.collect();
    }

    @RequestMapping("wordCount2")
    public Object wordCount2() {
        JavaRDD<String> lines = javaSparkContext.textFile(filePath).cache();

        System.out.println();
        System.out.println("-------------------------------------------------------");
        System.out.println(lines.count());

        JavaRDD<String> words = lines.flatMap(str -> Arrays.asList(str.split(" ")).iterator());

        JavaPairRDD<String, Integer> ones = words.mapToPair(str -> new Tuple2<String, Integer>(str, 1));

        JavaPairRDD<String, Integer> counts = ones.reduceByKey((Integer i1, Integer i2) -> (i1 + i2));

        JavaPairRDD<Integer, String> temp = counts.mapToPair(tuple -> new Tuple2<Integer, String>(tuple._2, tuple._1));

        JavaPairRDD<String, Integer> sorted = temp.sortByKey(false)
                .mapToPair(tuple -> new Tuple2<String, Integer>(tuple._2, tuple._1));

        System.out.println(sorted.count());

        List<Tuple2<String, Integer>> output = sorted.collect();
        // List<Tuple2<String, Integer>> output = sorted.take(10);
        // List<Tuple2<String, Integer>> output = sorted.top(10);

        Map<String, Object> result = new HashMap<String, Object>();
        for (Tuple2<String, Integer> tuple : output) {
            result.put(tuple._1(), tuple._2());
        }

        return result;
    }

    @PostMapping("testAPI")
    public Object testAPI() {
        List<Integer> data = new ArrayList<>(Arrays.asList(1, 2, 3, 4, 5, 6));

        JavaRDD<Integer> rdd01 = javaSparkContext.parallelize(data);
        rdd01 = rdd01.map(num -> {
            return num * num;
        });
        //data map:1,4,9,16,25,36
        System.out.println("data map: " + Joiner.on(",").skipNulls().join(rdd01.collect()).toString());

        rdd01 = rdd01.filter(x -> x < 6);
        //data filter: 1,4
        System.out.println("data filter: " + Joiner.on(",").skipNulls().join(rdd01.collect()).toString());

        rdd01 = rdd01.flatMap(x -> {
            Integer[] test = {x, x + 1, x + 2};
            return Arrays.asList(test).iterator();
        });
        //flatMap: 1,2,3,4,5,6
        System.out.println("flatMap : " + Joiner.on(",").skipNulls().join(rdd01.collect()).toString());

        JavaRDD<Integer> unionRdd = javaSparkContext.parallelize(data);

        rdd01 = rdd01.union(unionRdd);
        //union: 1,2,3,4,5,6,1,2,3,4,5,6
        System.out.println("union : " + Joiner.on(",").skipNulls().join(rdd01.collect()).toString());

        List<Integer> result = new ArrayList<>();
        result.add(rdd01.reduce((Integer v1, Integer v2) -> {
            return v1 + v2;
        }));
        //reduce: 42
        System.out.println("reduce : " + Joiner.on(",").skipNulls().join(result).toString());
        result.forEach(System.out::print);

        JavaPairRDD<Integer, Iterable<Integer>> groupRdd = rdd01.groupBy(x -> {
            System.out.println("======grouby========: " + x);
            if (x > 10) return 0;
            else return 1;
        });

        List<Tuple2<Integer, Iterable<Integer>>> resul = groupRdd.collect();
        //group by==>  key: 1 value: 1,2,3,4,5,6,1,2,3,4,5,6
        resul.forEach(x -> {
            System.out.println("group by==> key: " + x._1 + " value: "
                    + Joiner.on(",").skipNulls().join(x._2).toString());
        });

        return null;
    }
}

SparkStreaming

package com.example.controller;

import com.example.spark.CustomReceiver;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

@RestController
@RequestMapping("streaming")
public class StreamingController {
    @Autowired
    private transient JavaSparkContext javaSparkContext;

    @PostMapping("customReceiver")
    public String customReceiver() throws InterruptedException{
        // 第二个参数:批间隔时间(以此间隔时间分割数据流)
        JavaStreamingContext jsc = new JavaStreamingContext(javaSparkContext, Durations.seconds(5));
        JavaReceiverInputDStream<String> lines = jsc.receiverStream(new CustomReceiver(StorageLevel.MEMORY_AND_DISK_2()));
        JavaDStream<Long> count =  lines.count();
        count = count.map(x -> {
            System.out.println("这批一共这些条数据:" + x);
            return x;
        });
        System.out.println("count.print()");
        count.print();
        System.out.println("jsc.start()");
        // 现在才是真正开始计算
        jsc.start();
        System.out.println("jsc.awaitTermination()");
        // 等待计算结束
        jsc.awaitTermination();
        System.out.println("jsc.stop()");
        jsc.stop();
        return "success";
    }
}

测试

启动服务

后台提示可以访问spark的地址:http://localhost:4040/

 访问:http://localhost:4040/

 测试普通API

计算每个单词出现的次数。

访问:http://localhost:8080/rdd/wordCount1

前端结果:

[
    {
        "word": "python",
        "count": 1
    },
    {
        "word": "java",
        "count": 3
    },
    {
        "word": "javascript",
        "count": 2
    }
]

spark结果:

计算每个单词出现的次数。

访问:http://localhost:8080/rdd/wordCount2

前端结果:

{
    "python": 1,
    "java": 3,
    "javascript": 2
}

spark结果

纯粹测API

访问:http://localhost:8080/rdd/testAPI

测试SparkStreaming

访问:http://localhost:8080/streaming/customReceiver

前端结果:前端会一直等待,没有回复

spark结果:

红色箭头所指处,我在Idea中暂停了应用,后来又恢复了。 

猜你喜欢

转载自blog.csdn.net/feiying0canglang/article/details/114041087