Streaming + Sparksql use sql real-time analysis rabbitmq + mongodb + hive

SparkConf = new new SparkConf SparkConf () 
// use here a link Remember to use a link or to report more than sparkcontext error
.setAppName ( "SparkConsumerRabbit")
.setMaster ( "local [2]")
the .set ( "hive.metastore. uris ", Thrift)
the .set (" spark.sql.warehouse.dir ", HDFS)
the .set (" spark.mongodb.input.uri "," MongoDB: // "+ rule.getMUName (jsonStr) +": " + rule.getMpwd (jsonStr) + "@ " + rule.getMIp (jsonStr) + ":" "." + rule.getMport (jsonStr) + "/" + rule.getMDBName (jsonStr) + + rule.getMtable (jsonStr ))
.set ( "spark.mongodb.output.uri", "MongoDB: // the root: [email protected]: 27010 / pachong.test");
JavaSparkContext new new JavaSparkContext SC = (sparkConf);
// Duration parameter seconds
// Streaming mode
JavaStreamingContext jsc = new JavaStreamingContext(sc, Durations.seconds(5));
//hivesql 方式
HiveContext hiveContext = new HiveContext(sc);
hiveContext.sql("show databases").show();
hiveContext.sql("use" + " " + db);
//mongodb 方式
JavaMongoRDD<Document> rdd = MongoSpark.load(sc);
Map<String, String> params = new HashMap<>();
//map中参数设置,加载map连接rabbit
params.put("hosts", "192.168.7.96");
params.put("port", "5672");
params.put("userName", "admin");
params.put("password", "admin");
params.put("queueName", "cj_ack");
params.put("durable", "false");
Function<QueueingConsumer.Delivery, String> handler = message -> new String(message.getBody());
JavaReceiverInputDStream<String> messages = RabbitMQUtils.createJavaStream(jsc,String.class,params,handler);
messages.print();

Guess you like

Origin www.cnblogs.com/Mr--zhao/p/11344372.html