InfluxDB usage tutorial: Java version InfluxDB tool class

Foreword:

In the last few lectures, after introducing the use of Influx on Linux and Windows, this section begins to introduce the use of Influx in Java. First, we provide a tool class encapsulated by the InfluxDB Java API, which is convenient for everyone to use directly.

1. InfluxDB tool class

The tool class is presented first, and then the usage method is introduced (updated on November 08, 2021).

package com.common.utils.influxdb;

import org.influxdb.InfluxDB;
import org.influxdb.InfluxDB.ConsistencyLevel;
import org.influxdb.InfluxDBFactory;
import org.influxdb.dto.*;
import org.influxdb.dto.Point.Builder;

import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;

/**
 * InfluxDB数据库连接操作类
 *
 * @author xuchao
 */
public class InfluxDBConnection {

	// 用户名
	private String username;
	// 密码
	private String password;
	// 连接地址
	private String openurl;
	// 数据库名称
	private String dbName;
	// 保留策略
	private String retentionPolicy;

	private InfluxDB influxDB;

	public InfluxDBConnection(String username, String password, String url, String dbName, String retentionPolicy) {
		this.username = username;
		this.password = password;
		this.openurl = url;
		this.dbName = dbName;
		this.retentionPolicy = retentionPolicy == null || retentionPolicy.equals("") ? "autogen" : retentionPolicy;
		influxDbBuild();
	}

	/**
	 * 创建数据库
	 *
	 * @param dbName
	 */
	@SuppressWarnings("deprecation")
	public void createDB(String dbName) {
		influxDB.createDatabase(dbName);
	}

	/**
	 * 删除数据库
	 *
	 * @param dbName
	 */
	@SuppressWarnings("deprecation")
	public void deleteDB(String dbName) {
		influxDB.deleteDatabase(dbName);
	}

	/**
	 * 测试连接是否正常
	 *
	 * @return true 正常
	 */
	public boolean ping() {
		boolean isConnected = false;
		Pong pong;
		try {
			pong = influxDB.ping();
			if (pong != null) {
				isConnected = true;
			}
		} catch (Exception e) {
			e.printStackTrace();
		}
		return isConnected;
	}

	/**
	 * 连接时序数据库 ,若不存在则创建
	 *
	 * @return
	 */
	public InfluxDB influxDbBuild() {
		if (influxDB == null) {
			influxDB = InfluxDBFactory.connect(openurl, username, password);
		}
		try {
			// if (!influxDB.databaseExists(database)) {
			// influxDB.createDatabase(database);
			// }
		} catch (Exception e) {
			// 该数据库可能设置动态代理,不支持创建数据库
			// e.printStackTrace();
		} finally {
			influxDB.setRetentionPolicy(retentionPolicy);
		}
		influxDB.setLogLevel(InfluxDB.LogLevel.NONE);
		return influxDB;
	}

	/**
	 * 创建自定义保留策略
	 *
	 * @param policyName  策略名
	 * @param days        保存天数
	 * @param replication 保存副本数量
	 * @param isDefault   是否设为默认保留策略
	 */
	public void createRetentionPolicy(String dataBaseName, String policyName, int days, int replication,
			Boolean isDefault) {
		String sql = String.format("CREATE RETENTION POLICY \"%s\" ON \"%s\" DURATION %sd REPLICATION %s ", policyName,
				dataBaseName, days, replication);
		if (isDefault) {
			sql = sql + " DEFAULT";
		}
		query(sql);
	}

	/**
	 * 创建默认的保留策略
	 *
	 * 策略名:hour,保存天数:30天,保存副本数量:1,设为默认保留策略
	 */
	public void createDefaultRetentionPolicy() {
		String command = String
				.format("CREATE RETENTION POLICY \"%s\" ON \"%s\" DURATION %s REPLICATION %s DEFAULT", "hour", dbName,
						"30d", 1);
		this.query(command);
	}

	/**
	 * 查询
	 *
	 * @param command 查询语句
	 * @return
	 */
	public QueryResult query(String command) {
		return influxDB.query(new Query(command, dbName));
	}

	/**
	 * 插入
	 *
	 * @param measurement 表
	 * @param tags        标签
	 * @param fields      字段
	 */
	public void insert(String measurement, Map<String, String> tags, Map<String, Object> fields, long time,
			TimeUnit timeUnit) {
		Builder builder = Point.measurement(measurement);
		builder.tag(tags);
		builder.fields(fields);
		if (0 != time) {
			builder.time(time, timeUnit);
		}
		influxDB.write(dbName, retentionPolicy, builder.build());
	}

	/**
	 * 批量写入测点
	 *
	 * @param batchPoints
	 */
	public void batchInsert(BatchPoints batchPoints, TimeUnit timeUnit) {
		influxDB.write(batchPoints);
		// influxDB.enableGzip();
		// influxDB.enableBatch(2000,100,timeUnit);
		// influxDB.disableGzip();
		// influxDB.disableBatch();
	}

	/**
	 * 批量写入数据
	 *
	 * @param database        数据库
	 * @param retentionPolicy 保存策略
	 * @param consistency     一致性
	 * @param records         要保存的数据(调用BatchPoints.lineProtocol()可得到一条record)
	 */
	public void batchInsert(final String database, final String retentionPolicy, final ConsistencyLevel consistency,
			TimeUnit timeUnit, final List<String> records) {
		influxDB.write(database, retentionPolicy, consistency, timeUnit, records);
	}

	/**
	 * 删除
	 *
	 * @param command 删除语句
	 * @return 返回错误信息
	 */
	public String deleteMeasurementData(String command) {
		QueryResult result = influxDB.query(new Query(command, dbName));
		return result.getError();
	}

	/**
	 * 关闭数据库
	 */
	public void close() {
		influxDB.close();
	}

	/**
	 * 构建Point
	 *
	 * @param measurement
	 * @param time
	 * @param fields
	 * @return
	 */
	public Point pointBuilder(String measurement, long time, TimeUnit timeUnit, Map<String, String> tags,
			Map<String, Object> fields) {
		Point point = Point.measurement(measurement).time(time, timeUnit).tag(tags).fields(fields).build();
		return point;
	}

}



Dependent Jar packages:

<dependency>
		<groupId>org.influxdb</groupId>
		<artifactId>influxdb-java</artifactId>
		<version>2.10</version>
</dependency>

or:

  <dependency>
            <groupId>org.influxdb</groupId>
            <artifactId>influxdb-java</artifactId>
            <version>2.21</version>
  </dependency>

Both versions above have been tested and available.

2. Use tools to query data

InfluxDB supports querying multiple SQLs at one time, and the SQLs ;can be separated by semicolons. The following only demonstrates how to parse the result set returned by the query when there is only one SQL.

public static void main(String[] args) {
		InfluxDBConnection influxDBConnection = new InfluxDBConnection("admin", "admin", "1.1.1.1", "db-test", "hour");
		QueryResult results = influxDBConnection
				.query("SELECT * FROM measurement where name = '大脑补丁'  order by time desc limit 1000");
		//results.getResults()是同时查询多条SQL语句的返回值,此处我们只有一条SQL,所以只取第一个结果集即可。
		Result oneResult = results.getResults().get(0);
		if (oneResult.getSeries() != null) {
			List<List<Object>> valueList = oneResult.getSeries().stream().map(Series::getValues)
					.collect(Collectors.toList()).get(0);
			if (valueList != null && valueList.size() > 0) {
				for (List<Object> value : valueList) {
					Map<String, String> map = new HashMap<String, String>();
					// 数据库中字段1取值
					String field1 = value.get(0) == null ? null : value.get(0).toString();
					// 数据库中字段2取值
					String field2 = value.get(1) == null ? null : value.get(1).toString();
					// TODO 用取出的字段做你自己的业务逻辑……
				}
			}
		}
	}

When fetching data, pay attention to the null value judgment. In this example, the returned data will be judged as null first oneResult.getSeries() != null, and then call oneResult.getSeries().getValues().get(0)to obtain the returned result set of the first SQL, and then traverse the valueList to retrieve the target field value in each record.

The result set encapsulated by InfluxDB is a bit deep, mainly because it supports multiple SQL one-time queries, which can improve the query speed, which is different from the use of relational databases.

2. Use the InfluxDB tool class to insert a single piece of data

InfluxDB的字段类型,由第一条插入的值得类型决定;tags的类型只能是String型,可以作为索引,提高检索速度。
	public static void main(String[] args) {
		InfluxDBConnection influxDBConnection = new InfluxDBConnection("admin", "admin", "1.1.1.1", "db-test", "hour");
		Map<String, String> tags = new HashMap<String, String>();
		tags.put("tag1", "标签值");
		Map<String, Object> fields = new HashMap<String, Object>();
		fields.put("field1", "String类型");
		// 数值型,InfluxDB的字段类型,由第一天插入的值得类型决定
		fields.put("field2", 3.141592657);
		// 时间使用毫秒为单位
		influxDBConnection.insert("表名", tags, fields, System.currentTimeMillis(), TimeUnit.MILLISECONDS);
	}

3. Using the InfluxDB tool class to write data in batches in two ways

Note: Using these two methods, the two data must be written to the same database with the same tag. If the tags are different, they need to be placed in different BatchPointobjects, otherwise there will be a problem of data writing disorder.

Method 1: After assembling the data through BatchPoints, insert it into the database cyclically.

public static void main(String[] args) {
		InfluxDBConnection influxDBConnection = new InfluxDBConnection("admin", "admin", "1.1.1.1", "db-test", "hour");
		Map<String, String> tags = new HashMap<String, String>();
		tags.put("tag1", "标签值");
		Map<String, Object> fields1 = new HashMap<String, Object>();
		fields1.put("field1", "abc");
		// 数值型,InfluxDB的字段类型,由第一天插入的值得类型决定
		fields1.put("field2", 123456);
		Map<String, Object> fields2 = new HashMap<String, Object>();
		fields2.put("field1", "String类型");
		fields2.put("field2", 3.141592657);
		// 一条记录值
		Point point1 = influxDBConnection.pointBuilder("表名", System.currentTimeMillis(), tags, fields1);
		Point point2 = influxDBConnection.pointBuilder("表名", System.currentTimeMillis(), tags, fields2);
		// 将两条记录添加到batchPoints中
		BatchPoints batchPoints1 = BatchPoints.database("db-test").tag("tag1", "标签值1").retentionPolicy("hour")
				.consistency(ConsistencyLevel.ALL).build();
		BatchPoints batchPoints2 = BatchPoints.database("db-test").tag("tag2", "标签值2").retentionPolicy("hour")
				.consistency(ConsistencyLevel.ALL).build();
		batchPoints1.point(point1);
		batchPoints2.point(point2);
		// 将两条数据批量插入到数据库中
		influxDBConnection.batchInsert(batchPoints1,TimeUnit.MILLISECONDS);
		influxDBConnection.batchInsert(batchPoints2,TimeUnit.MILLISECONDS);
	}

Method 2: Assemble the data through BatchPoints, serialize it, and insert it into the database at one time.

public static void main(String[] args) {
		InfluxDBConnection influxDBConnection = new InfluxDBConnection("admin", "admin", "1.1.1.1", "db-test", "hour");
		Map<String, String> tags1 = new HashMap<String, String>();
		tags1.put("tag1", "标签值");
		Map<String, String> tags2 = new HashMap<String, String>();
		tags2.put("tag2", "标签值");
		Map<String, Object> fields1 = new HashMap<String, Object>();
		fields1.put("field1", "abc");
		// 数值型,InfluxDB的字段类型,由第一天插入的值得类型决定
		fields1.put("field2", 123456);
		Map<String, Object> fields2 = new HashMap<String, Object>();
		fields2.put("field1", "String类型");
		fields2.put("field2", 3.141592657);
		// 一条记录值。(注意:生产环境不要用System.currentTimeMillis(),因为数据量大会产生重复时间戳,导致数据丢失,要用数据自己的时间戳,这里只做演示)
		Point point1 = influxDBConnection.pointBuilder("表名", System.currentTimeMillis(), TimeUnit.MILLISECONDS,tags1, fields1);
		Point point2 = influxDBConnection.pointBuilder("表名", System.currentTimeMillis(), TimeUnit.MILLISECONDS,tags2, fields2);
		BatchPoints batchPoints1 = BatchPoints.database("db-test").tag("tag1", "标签值1")
				.retentionPolicy("hour").consistency(ConsistencyLevel.ALL).precision(TimeUnit.MILLISECONDS).build();
		// 将两条记录添加到batchPoints中
		batchPoints1.point(point1);
		BatchPoints batchPoints2 = BatchPoints.database("db-test").tag("tag2", "标签值2")
				.retentionPolicy("hour").consistency(ConsistencyLevel.ALL).precision(TimeUnit.MILLISECONDS).build();
		// 将两条记录添加到batchPoints中
		batchPoints2.point(point2);
		// 将不同的batchPoints序列化后,一次性写入数据库,提高写入速度
		List<String> records = new ArrayList<String>();
		records.add(batchPoints1.lineProtocol());
		records.add(batchPoints2.lineProtocol());
		// 将两条数据批量插入到数据库中
		influxDBConnection.batchInsert("db-test", "hour", ConsistencyLevel.ALL,TimeUnit.MILLISECONDS, records);
	}

The second method is recommended. Data belonging to one database can be written in batches at one time, and the writing speed is the fastest.

Summarize:

The reading and writing of Influx in Java, as well as the two ways of writing data in batches, have been introduced to you today. I hope this tool class and case can help you.

Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=324152424&siteId=291194637