Flink入门示例——wordCount(二)

说明(批处理)

  • 读取HDFS数据
  • 将处理结果写入HDFS

代码示例

package com.test

import org.apache.flink.api.scala._
import org.apache.flink.core.fs.FileSystem.WriteMode

/**
  * 读取hdfs数据,把处理结果结果再写入HDFS
  */
object WordCountBatch{

  /**
    * main函数传递的参数
    * hdfs://artemis-02:9000/tmp/lvxw/tmp/logs  hdfs://artemis-02:9000//tmp/lvxw/tmp/out
    * @param args
    */
  def main(args: Array[String]): Unit = {
    if(args.length!=2){
      println(s"${this.getClass.getSimpleName} must be two param:inputDir outputDir" )
      System.exit(1)
    }

    // window环境下,以hadoop身份远程放完HDFS
    System.setProperty("HADOOP_USER_NAME","hadoop")
    val Array(inputDir,outputDir) = args

    val env = ExecutionEnvironment.getExecutionEnvironment

    val text = env.readTextFile(inputDir)

    val result = text.flatMap ( _.split("\\s"))
      .map ((_, 1))
      .groupBy(0)
      .sum(1)

    result.setParallelism(2).writeAsCsv(outputDir,"\n",",",WriteMode.OVERWRITE)
    env.execute(this.getClass.getSimpleName)

  }
}

猜你喜欢

转载自blog.csdn.net/believe2017slwx/article/details/79976211