Spark采用jdbc的方式访问hive

这里需要导入一个jdbc-hive依赖
需要注意的是,包的版本一定要确认好,切勿版本过高

<dependency>
            <groupId>org.apache.hive</groupId>
            <artifactId>hive-jdbc</artifactId>
            <version>1.2.1</version>
        </dependency>
import java.sql.{DriverManager}
import com.typesafe.config.ConfigFactory

object JDBCHive {

    def hiveJDBC_RowCount(sql:String,params:Map[Int,String]): Int ={
      val load = ConfigFactory.load()
      Class.forName(load.getString("hivedriverClassName")).newInstance();
      val conn = DriverManager.getConnection(load.getString("hiveurl"));
      val statement = conn.prepareStatement(sql)
      for(p <- params.keySet){
        statement.setString(p,params.getOrElse(p,"value not exist"));
      }
      val resultSet = statement.executeQuery()
      var rowNum = 0
      if(resultSet.next()){
        rowNum = resultSet.getInt(1)
      }
      return rowNum
    }
}

application.conf文件

hivedriverClassName="org.apache.hive.jdbc.HiveDriver"
hiveurl="jdbc:hive2://192.168.157.132:10000/default"

猜你喜欢

转载自blog.csdn.net/weixin_38653290/article/details/85877239
今日推荐