java实现kafka整合spark streaming完成wordCount,updateStateByKey完成实时状态更新

引入依赖

    <dependency>
        <groupId>org.apache.spark</groupId>
        <artifactId>spark-streaming_2.11</artifactId>
        <version>${spark.version}</version>
    </dependency>

<dependency>
    <groupId>org.apache.spark</groupId>
    <artifactId>spark-streaming-kafka-0-8_2.11</artifactId>
    <version>2.2.0</version>
</dependency>

1,receiveSpark,基于zookeeper保存offset等元数据

import com.google.common.collect.Lists;
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import scala.Tuple2;

import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.regex.Pattern;

/**
 * java实现spark streamingkafka整合并统计各单词数
 */
public class KafkaReceiverSpark {
    public static final Pattern SPACE = Pattern.compile(" ");

    public static void main(String[] args) throws InterruptedException {

        SparkConf conf = new SparkConf();
        JavaStreamingContext ssc = new JavaStreamingContext(conf, Durations.seconds(5));

        Map<String, Integer> map = new HashMap<String, Integer>();
        //主题和对应的分区
        map.put("kafka_streaming_topic", 1);
        //zookeeper地址及端口
        String zk = "hadoop000:2181";
        //group id
        String groupId = "test";

        //获取到的RDD数据集
        JavaPairReceiverInputDStream<String, String> lines = KafkaUtils.createStream(ssc, zk, groupId, map);
        //每行以空格分割成字符数组
        JavaDStream<String> words = lines.flatMap(new FlatMapFunction<Tuple2<String, String>, String>() {
            @Override
            public Iterator<String> call(Tuple2<String, String> stringStringTuple2) throws Exception {
                return Lists.newArrayList(SPACE.split(stringStringTuple2._2)).iterator();
            }
        });
        //字符串=》(字符串,1        JavaPairDStream<String, Integer> mapToPair = words.mapToPair(new PairFunction<String, String, Integer>() {
            @Override
            public Tuple2<String, Integer> call(String s) throws Exception {
                return new Tuple2<String, Integer>(s, 1);
            }
        });
        //统计各单词数
        JavaPairDStream<String, Integer> wordCounts = mapToPair.reduceByKey(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer integer, Integer integer2) throws Exception {
                return integer + integer2;
            }
        });

        wordCounts.print();

        ssc.start();
        ssc.awaitTermination();
    }

}

2,directSpark,不通过zookeeper,直接通过kafka

import com.google.common.collect.Lists;
import kafka.serializer.StringDecoder;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.Optional;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;
import scala.Tuple2;

import java.util.*;
import java.util.regex.Pattern;

public class KafkaDirectSpark {
    public static final Pattern SPACE = Pattern.compile(" ");

    public static void main(String[] args) throws InterruptedException {

        SparkConf sparkConf = new SparkConf();
        JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(5));

        //主题集合
        Set<String> topicSet = new HashSet<String>();
        topicSet.add("kafka_streaming_topic");
        //kafka参数(不通过zk        HashMap<String, String> kafkaParam = new HashMap<String, String>();
        kafkaParam.put("metadata.broker.list", "hadoop000:9092");

        JavaPairInputDStream<String, String> lines =
                KafkaUtils.createDirectStream(ssc,
                        String.class,
                        String.class,
                        StringDecoder.class,
                        StringDecoder.class,
                        kafkaParam,
                        topicSet);

        //每行以空格分割成字符数组
        JavaDStream<String> words = lines.flatMap(new FlatMapFunction<Tuple2<String, String>, String>() {
            @Override
            public Iterator<String> call(Tuple2<String, String> stringStringTuple2) throws Exception {
                return Lists.newArrayList(SPACE.split(stringStringTuple2._2)).iterator();
            }
        });
        //字符串=》(字符串,1        JavaPairDStream<String, Integer> mapToPair = words.mapToPair(new PairFunction<String, String, Integer>() {
            @Override
            public Tuple2<String, Integer> call(String s) throws Exception {
                return new Tuple2<String, Integer>(s, 1);
            }
        });
        //统计各单词数
        JavaPairDStream<String, Integer> wordCounts = mapToPair.reduceByKey(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer integer, Integer integer2) throws Exception {
                return integer + integer2;
            }
        });

        wordCounts.print();

        ssc.start();
        ssc.awaitTermination();
    }
}
3,updateStateByKey实时状态更新,job开始到现在的实时情况汇总

import com.google.common.collect.Lists;
import kafka.serializer.StringDecoder;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.Optional;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.*;
import org.apache.spark.streaming.kafka.KafkaUtils;
import scala.Tuple2;

import java.util.*;
import java.util.regex.Pattern;

/**
 * java实现spark streaming整合kafka作为数据源统计字数,实时更新状态
 */
public class KafkaUpdateByKey {
    public static final Pattern SPACE = Pattern.compile(" ");


    public static void main(String[] args) throws InterruptedException {

        SparkConf sparkConf = new SparkConf();
        JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(5));
        ssc.checkpoint("/home/hadoop/data/checkpoint");

        //主题集合
        Set<String> topicSet = new HashSet<String>();
        topicSet.add("kafka_streaming_topic");
        //kafka参数(不通过zk        HashMap<String, String> kafkaParam = new HashMap<String, String>();
        kafkaParam.put("metadata.broker.list", "hadoop000:9092");

        JavaPairInputDStream<String, String> lines =
                KafkaUtils.createDirectStream(ssc,
                        String.class,
                        String.class,
                        StringDecoder.class,
                        StringDecoder.class,
                        kafkaParam,
                        topicSet);

        //每行以空格分割成字符数组
        JavaDStream<String> words = lines.flatMap(new FlatMapFunction<Tuple2<String, String>, String>() {
            @Override
            public Iterator<String> call(Tuple2<String, String> stringStringTuple2) throws Exception {
                return Lists.newArrayList(SPACE.split(stringStringTuple2._2)).iterator();
            }
        });
        //字符串=》(字符串,1        JavaPairDStream<String, Integer> mapToPair = words.mapToPair(new PairFunction<String, String, Integer>() {
            @Override
            public Tuple2<String, Integer> call(String s) throws Exception {
                return new Tuple2<String, Integer>(s, 1);
            }
        });
        //统计各单词数
        JavaPairDStream<String, Integer> wordCounts = mapToPair.reduceByKey(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer integer, Integer integer2) throws Exception {
                return integer + integer2;
            }
        }).updateStateByKey(new Function2<List<Integer>, org.apache.spark.api.java.Optional<Integer>, org.apache.spark.api.java.Optional<Integer>>() {
            @Override
            public org.apache.spark.api.java.Optional<Integer> call(List<Integer> integers, org.apache.spark.api.java.Optional<Integer> integerOptional) throws Exception {
                Integer updateValue = 0;
                if (integerOptional.isPresent()){
                    updateValue = integerOptional.get();
                }
                for (Integer integer : integers) {
                    updateValue += integer;
                }
                return Optional.of(updateValue);
            }
        });

        wordCounts.print();

        ssc.start();
        ssc.awaitTermination();
    }
}

4,测试

打包成jar包,在spark上运行,--packages功能是在网上下载指定的依赖包,若下载了,可以用--jar指定

bin/spark-submit --name KafkaUpdateByKey --master local[2] --class KafkaUpdateByKey --packages org.apache.spark:spark-streaming-kafka-0-8_2.11:2.2.0 /home/hadoop/app/myjars/kafka-spark-updateState1.0-SNAPSHOT.jar 

猜你喜欢

转载自blog.csdn.net/qq_37755661/article/details/79666907
今日推荐