java代码获取kafka数据上传到hdfs中

版权声明:本文为博主原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。
本文链接: https://blog.csdn.net/lucasmaluping/article/details/102566624

1.java代码从kafka获取数据

poll方法

KafkaConsumer.poll

package com.zpark.onekafka;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.log4j.Logger;


import java.io.BufferedWriter;
import java.io.FileOutputStream;
import java.io.OutputStreamWriter;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collections;
import java.util.Properties;


public class ConsumerDemo {


    public static void main(String[] args) {
        Logger logger = Logger.getLogger("logRollingFile");
        //调用接收消息的方法
        receiveMsg();
    }

    /**
     * 获取kafka topic(animal)上的数据
     */
    private static void receiveMsg() {
        Properties properties = new Properties();
        properties.put("bootstrap.servers", "hdp-3:9092");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("group.id","aaaa");
        properties.put("enable.auto.commit", true);
        //一个方法
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
        consumer.subscribe(Collections.singleton("animal"));
        URI uri = null;
        Configuration conf = null;
        String user = "root";
        try {
            uri = new URI("hdfs://hdp-1:9000");
            conf = new Configuration();
            conf = new Configuration();
            //dfs.replication:分布式文件系统副本的数量
            conf.set("dfs.replication", "2");
            //dfs.blocksize:分布式文件系统的块的大小   100M   64+36
            conf.set("dfs.blocksize", "64m");

        } catch (URISyntaxException e) {
            e.printStackTrace();
        }
        try {
            FileOutputStream fos = new FileOutputStream("D:/in.log");
            OutputStreamWriter osw = new OutputStreamWriter(fos);


//            FileSystem fs = FileSystem.get(uri, conf, user);
//            FSDataOutputStream fdos = fs.create(new Path("/cf.txt"));
            while(true) {

                /**
                 * 获取kafka
                 */
                ConsumerRecords<String, String> records = consumer.poll(100);
                for(ConsumerRecord<String, String> record: records) {
                    String msg = "key:" + record.key()+ ",value:" + record.value() + ",offset:" + record.offset()+",topic:" + record.topic()+"\r\n";
                    System.out.printf("key=%s,value=%s,offet=%s,topic=%s",record.key() , record.value() , record.offset(), record.topic());
//                    BufferedWriter bw = new BufferedWriter(osw);
//                    bw.write(msg);
//                    bw.flush();

                }
            }
        }catch (Exception e) {
            e.printStackTrace();
        } finally {
            consumer.close();
        }
    }
}







2. java代码发送消息到kafka

send方法

KafkaProducer.send

package com.zpark.onekafka;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

import java.util.Properties;
import java.util.concurrent.Future;

public class ProducerDemo {
    public static void main(String[] args) {
        send();
    }

    private static void send() {
        Properties properties = new Properties();
        properties.put("bootstrap.servers","hdp-2:9092");
        properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(properties);
        ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>("animal", "producerdata...");
        Future<RecordMetadata> send = producer.send(producerRecord);
        producer.close();
    }
}

3. Idea中java代码上传数据到hdfs

fs

fs = FileSystem.get()

package com.zpark.onekafka;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.net.URI;
import java.net.URISyntaxException;

public class HdfsTest {
    public static void main(String[] args) {
        URI uri = null;
        Configuration conf = null;
        String user = "root";
        FileSystem fs = null;
        try {
            uri = new URI("hdfs://hdp-1:9000");
            conf = new Configuration();
            //dfs.replication:分布式文件系统副本的数量
            conf.set("dfs.replication", "2");
            //dfs.blocksize:分布式文件系统的块的大小   100M   64+36
            conf.set("dfs.blocksize", "64m");
            fs = FileSystem.get(uri, conf, user);

            /**
             * 往hdfs中写文件
             */
            FSDataOutputStream out = fs.create(new Path("/bc.txt"));
            OutputStreamWriter outWriter = new OutputStreamWriter(out);
            BufferedWriter bw = new BufferedWriter(outWriter);
            bw.write("hello");
            bw.close();
            out.close();
            fs.close();
        } catch (URISyntaxException e) {
            e.printStackTrace();
        } catch (InterruptedException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        } finally {

        }
    }
}

4.porm

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>com.zpark</groupId>
    <artifactId>kafkatest</artifactId>
    <version>1.0-SNAPSHOT</version>

    <dependencies>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka_2.12</artifactId>
        </dependency>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>2.2.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>2.8.1</version>
        </dependency>
        <!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-common -->
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>2.8.1</version>
        </dependency>
        <!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-hdfs -->
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>2.8.1</version>
        </dependency>

        <!-- https://mvnrepository.com/artifact/log4j/log4j -->
        <dependency>
            <groupId>log4j</groupId>
            <artifactId>log4j</artifactId>
            <version>1.2.17</version>
        </dependency>


    </dependencies>



</project>

5、整合consumer+hdfs

任务:

在consumerDemo这个类中:

1、把flume采集到的数据保存到临时文件中    temp201910151440.log

2、间隔半小时,把临时文件上传到hdfs上

猜你喜欢

转载自blog.csdn.net/lucasmaluping/article/details/102566624