将mysql中的数据导入到hdfs,spark程序实现

版权声明:& https://blog.csdn.net/qq_16760101/article/details/84852743

程序为:

import org.apache.spark.sql.SparkSession

object import_data {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession .builder()
    			 .appName("MysqlDemo") 
    			 .master("local").getOrCreate()
    val jdbcDF = spark.read.format("jdbc")
    			  .option("delimiter", ",")
    			  .option("heard", true)
    			  .option("url", "jdbc:mysql://ip:xxxx/test?useUnicode=true&characterEncoding=utf-8")
    			  .option("dbtable", "temp")
    			  .option("user", "root").option("password", "123").load()
    val res=jdbcDF.write.format("csv").option("delimiter", ",")
    		 .option("heard", true).save("hdfs://ip:xxxx/a.txt")
  }
}

猜你喜欢

转载自blog.csdn.net/qq_16760101/article/details/84852743
今日推荐