1、
/**
* 从MySQL读取表格,并根据需求再写回数据库
*/
object sparkJdbc {
def main(args: Array[String]): Unit = {
val spark: SparkSession = SparkSession.builder()
.master("local[*]")
.appName("sparkJdbc")
.getOrCreate()
val df: DataFrame = spark.read.format("jdbc")
.option("url", "jdbc:mysql://localhost:3306/test?useUnicode=true&characterEncoding=utf-8&useJDBCCompliantTimezoneShift=true&useLegacyDatetimeCode=false&serverTimezone=UTC")
.option("driver", "com.mysql.cj.jdbc.Driver")
.option("dbtable", "user")
.option("user", "root")
.option("password", "1234")
.load()
df.show()
df.createTempView("aaa")
val sqldf: DataFrame = spark.sql("select * from aaa where id > 5")
val prop = new Properties()
prop.put("user","root")
prop.put("password","1234")
prop.put("driver","com.mysql.cj.jdbc.Driver")
sqldf.write.mode("append").jdbc("jdbc:mysql://localhost:3306/test?useUnicode=true&characterEncoding=utf-8&useJDBCCompliantTimezoneShift=true&useLegacyDatetimeCode=false&serverTimezone=UTC","test.user2",prop)
println("插入数据库成功")
}
}
2、
/**
* 从文件读取文件,然后存入MySQL数据库
* @param userId
* @param Mname
* @param date
* @param score
* @param addr
*/
case class movie(userId: Int,Mname: String,date: String,score: Double,addr: String)
object jdbc2 {
def main(args: Array[String]): Unit = {
val spark: SparkSession = SparkSession.builder()
.appName("jdbc2")
.master("local[*]")
.getOrCreate()
import spark.implicits._
val lines: Dataset[String] = spark.read.textFile("file:///C:\\Users\\lenovo\\Desktop\\app-2019-12-12.log")
val words: Dataset[Array[String]] = lines.map(_.split(","))
val df: Dataset[movie] = words.map(x => {
movie(x(0).toInt, x(1), x(2), x(3) toDouble, x(4))
})
val prop = new Properties()
prop.put("user","root")
prop.put("password","1234")
prop.put("driver","com.mysql.cj.jdbc.Driver")
df.write.mode("append").jdbc("jdbc:mysql://localhost:3306/test?useUnicode=true&characterEncoding=utf-8&useJDBCCompliantTimezoneShift=true&useLegacyDatetimeCode=false&serverTimezone=UTC","test.movies",prop)
println("写入数据库成功")
}
}