参考网址:https://cwiki.apache.org/confluence/display/Hive/HiveClient#HiveClient-JDBC
pom.xml
其实根据官网的wiki即可实现,连代码都不用自己敲,但是有几个地方需要注意的。
package com.hihi.hive; import java.sql.SQLException; import java.sql.Connection; import java.sql.ResultSet; import java.sql.Statement; import java.sql.DriverManager; public class Hive_JDBC { // 第一个需要注意的地方,由于现在是使用hiveserver2,所以驱动class应该为org.apache.hive.jdbc.HiveDriver private static String driverName = "org.apache.hive.jdbc.HiveDriver"; public static void main(String[] args) throws SQLException { try { Class.forName(driverName); } catch (ClassNotFoundException e) { // TODO Auto-generated catch block e.printStackTrace(); System.exit(1); } // 后台要把hiveserver2拉起,才能连接得上。 // 注意这里的URL和官网有差异,hiveserver2的schma应该是jdbc:hive2。 Connection con = DriverManager.getConnection("jdbc:hive2://hadoop001:10000/default", "root", ""); Statement stmt = con.createStatement(); String tableName = "jdbc_test"; String sql = "drop table " + tableName; System.out.println(sql); // DDL语句需要使用execute()执行!! //stmt.executeQuery("drop table " + tableName); stmt.execute(sql); sql="create table " + tableName + " (key int, value string) ROW FORMAT DELIMITED FIELDS TERMINATED BY \"\\t\""; System.out.println(sql); stmt.execute(sql); // show tables sql = "show tables '" + tableName + "'"; System.out.println("Running: " + sql); ResultSet res = stmt.executeQuery(sql); if (res.next()) { System.out.println(res.getString(1)); } // describe table sql = "describe " + tableName; System.out.println("Running: " + sql); res = stmt.executeQuery(sql); while (res.next()) { System.out.println(res.getString(1) + "\t" + res.getString(2)); } // load data into table // NOTE: filepath has to be local to the hive server // NOTE: /tmp/a.txt is a ctrl-A separated file with two fields per line String filepath = "/tmp/a.txt"; sql = "load data local inpath '" + filepath + "' into table " + tableName; System.out.println("Running: " + sql); if (!stmt.execute(sql)) System.out.println(sql + "failed!"); // select * query sql = "select * from " + tableName; System.out.println("Running: " + sql); res = stmt.executeQuery(sql); while (res.next()) { System.out.println(String.valueOf(res.getInt(1)) + "\t" + res.getString(2)); } // regular hive query sql = "select count(1) from " + tableName; System.out.println("Running: " + sql); res = stmt.executeQuery(sql); while (res.next()) { System.out.println(res.getString(1)); } } }
pom.xml
<?xml version="1.0" encoding="UTF-8"?> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>study-hadoop</groupId> <artifactId>hive</artifactId> <version>1.0</version> <properties> <projcet.build.sourceEncoding>UTF-8</projcet.build.sourceEncoding> <hadoop.version>2.6.0-cdh5.7.0</hadoop.version> <hive.version>1.1.0-cdh5.7.0</hive.version> </properties> <repositories> <repository> <id>cloudera</id> <url>https://repository.cloudera.com/artifactory/cloudera-repos/</url> </repository> </repositories> <dependencies> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hive</groupId> <artifactId>hive-jdbc</artifactId> <version>${hive.version}</version> </dependency> <dependency> <groupId>org.apache.hive</groupId> <artifactId>hive-exec</artifactId> <version>${hive.version}</version> </dependency> <!-- https://mvnrepository.com/artifact/mysql/mysql-connector-java --> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> <version>5.1.45</version> </dependency> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>4.10</version> </dependency> </dependencies> </project>【来自@若泽大数据】