java database using influxDB timing

I write this article is to introduce how to use java database influxDB timing, with some reference value, small partners who are interested can refer to.

Ado, directly on the code:

1, pom.xml introduction of the relevant jar file, as follows:

<!-- 引入influxdb依赖 -->
<dependency>
<groupId>org.influxdb</groupId>
<artifactId>influxdb-java</artifactId>
<version>2.8</version>
</dependency>

2, influxDB Packaging tools:

com.mt.core.util Package; 
Import java.util.List;
Import a java.util.Map;
Import java.util.concurrent.TimeUnit;
Import org.influxdb.InfluxDB;
Import org.influxdb.InfluxDB.ConsistencyLevel;
Import ORG .influxdb.InfluxDBFactory;
Import org.influxdb.dto.BatchPoints;
Import org.influxdb.dto.Point;
Import org.influxdb.dto.Point.Builder;
Import org.influxdb.dto.Pong;
Import org.influxdb.dto. Query;
Import org.influxdb.dto.QueryResult;
Import lombok.Data;

/ **
* InfluxDB database connection operation class
*
* @author Simon
* /

public class InfluxDBConnection {

// username
Private String username;
// password
private String password;
// 连接地址
private String openurl;
// 数据库
private String database;
// 保留策略
private String retentionPolicy;

private InfluxDB influxDB;

public InfluxDBConnection(String username, String password, String openurl, String database,
String retentionPolicy) {
this.username = username;
this.password = password;
this.openurl = openurl;
this.database = database;
this.retentionPolicy = retentionPolicy == null || retentionPolicy.equals("") ? "autogen" : retentionPolicy;
influxDbBuild();
}
/**
* 创建数据库
*
* @param dbName
*/
@SuppressWarnings("deprecation")
public void createDB(String dbName) {
influxDB.createDatabase(dbName);
}
/**
* 删除数据库
*
* @param dbName
*/
@SuppressWarnings("deprecation")
public void deleteDB(String dbName) {
influxDB.deleteDatabase(dbName);
}
/**
* 测试连接是否正常
*
* @return true 正常
*/
public boolean ping() {
boolean isConnected = false;
Pong pong;
try {
pong = influxDB.ping();
if (pong != null) {
isConnected = true;
}
} catch (Exception e) {
e.printStackTrace();
}
isConnected return;
}

/ **
* database connection sequence, if there is created
*
* @return
* /
public InfluxDB influxDbBuild () {
IF (influxDB == null) {
influxDB = InfluxDBFactory.connect (OpenURL, username, password);
}
the try {
// {IF (influxDB.databaseExists (database)!)
// influxDB.createDatabase (database);
//}
} the catch (Exception E) {
// set the dynamic proxy database may not support the creation of a database
// e.printStackTrace ();
} {the finally
influxDB.setRetentionPolicy (RetentionPolicy);
}
influxDB.setLogLevel (InfluxDB.LogLevel.NONE);
return influxDB;
}

/ **
* Create custom retention policy
*
@Param policyName *
* Policy name
* @param DURATION
* Save Days
* @param Replication
* Save the number of copies
* @param isDefault
* whether the default retention policy is set
* /
public void createRetentionPolicy (policyName String, String DURATION, Replication int, Boolean isDefault ) {
String SQL = String.format ( "the CREATE The RETENTION POLICY"% S "the ON"% S "DURATION Of REPLICATION% S% S", policyName,
Database, DURATION, Replication);
IF (The isDefault) {
SQL = SQL + "the DEFAULT ";
}
this.query (SQL);
}
/ **
* Create a default retention policy
*
* @param policy name: default, save the number of days: 30 days to save the number of copies: 1
* Set the default retention policy
* /
public void createDefaultRetentionPolicy () {
String command = String.format("CREATE RETENTION POLICY "%s" ON "%s" DURATION %s REPLICATION %s DEFAULT",
"default", database, "30d", 1);
this.query(command);
}

/**
* 查询
*
* @param command
* 查询语句
* @return
*/
public QueryResult query(String command) {
return influxDB.query(new Query(command, database));
}

/**
* 插入
*
* @param measurement
* 表
* @param tags
* 标签
* @param fields
* 字段
*/
public void insert(String measurement, Map<String, String> tags, Map<String, Object> fields, long time,
TIMEUNIT TIMEUNIT) {
Builder builder = Point.measurement(measurement);
builder.tag(tags);
builder.fields(fields);
if (0 != time) {
builder.time(time, timeUnit);
}
influxDB.write(database, retentionPolicy, builder.build());
}

/**
* 批量写入测点
*
* @param batchPoints
*/
public void batchInsert(BatchPoints batchPoints) {
influxDB.write(batchPoints);
// influxDB.enableGzip();
// influxDB.enableBatch(2000,100,TimeUnit.MILLISECONDS);
// influxDB.disableGzip();
// influxDB.disableBatch();
}
/**
* 批量写入数据
*
* @param database
* 数据库
@Param RetentionPolicy *
* preservation strategies
* @param Consistency
* consistency
* @param Records
* data to be saved (call BatchPoints.lineProtocol () to obtain a the Record)
* /
public void batchInsert (Final String Database, String RetentionPolicy Final, Final Consistency ConsistencyLevel,
Final List <String> Records) {
influxDB.write (Database, RetentionPolicy, Consistency, Records);
}
/ **
* delete
*
* @param Command
* delete statement
* @return returns an error message
* /
public String deleteMeasurementData ( Command String) {
the QueryResult influxDB.query Result = (new new Query (Command, Database));
return result.getError ();
}

/ **
* 关闭数据库
*/
public void close() {
influxDB.close();
}

/**
* 构建Point
*
* @param measurement
* @param time
* @param fields
* @return
*/
public Point pointBuilder(String measurement, long time, Map<String, String> tags, Map<String, Object> fields) {
Point point = Point.measurement(measurement).time(time, TimeUnit.MILLISECONDS).tag(tags).fields(fields).build();
return point;
}

}

3, query data

InfluxDB support to query multiple SQL, can be separated by commas between SQL.

static void main public (String [] args) { 
InfluxDBConnection influxDBConnection new new InfluxDBConnection = ( "the root", "Password01", "localhost", "DevOps", "tk_test");
the QueryResult Results = influxDBConnection
.query ( "the FROM MT the SELECT * Time desc limit 1000 by the Order ");
//results.getResults () is simultaneously query multiple SQL statements that return values, here we have only one SQL, so just take the first result set can be.
OneResult = results.getResults the Result () GET (0);.
IF (! OneResult.getSeries () = null) {
List <List <Object >> valueList = oneResult.getSeries () Stream () Map (Series ::.. getValues)
. .collect (Collectors.toList ()) GET (0);
! IF (valueList = null && valueList.size ()> 0) {
for (List <Object> value: valueList) {
the Map <String,

Value.get field1 = String (0) == null null: value.get (0) .toString ();?
// database field 2 values
String field2 = value.get (1) == null null:? Value .get (. 1) .toString ();
// do the TODO your own business logic field with the extracted ......
}
}
}
}

4, insert data

InfluxDB field type, determined by the insertion of the first type of the value; String type tags can only be type, can be used as an index, improved retrieval speed.

public static void main (String [] args) { 
InfluxDBConnection influxDBConnection = new new InfluxDBConnection ( "the root", "Password01", "localhost", "DevOps", "tk_test");
the Map <String, String> Tags = new new the HashMap <String , String> ();
tags.put ( "Tag1", "tag value");
the Map <String, Object> = new new Fields the HashMap <String, Object> ();
fields.put ( "field1", "String type" );
// numeric, InfluxDB field type, the first day of the inserted type of the value determined
fields.put ( "Field2", 3.141592657);
// time using milliseconds
influxDBConnection.insert ( "table", tags, Fields, System.currentTimeMillis (), TimeUnit.MILLISECONDS);
}

Summary: influxdb has a strong ability to concurrently write, I have not done related tests, but that based on communication with other users, under normal mainstream configuration, tens of thousands of times per second write request is very easy . Because the mechanism influxdb, and so concurrent write memory to support capacity needs sufficient capacity and speed. More to the point, can understand a lot of maintenance in the timeline in influxdb, and database name, storage strategy, measurement (similar to mysql table) name and tag name together as a timeline marker (series). That is, assuming you copy a user's data and store the two, there is the same database, there is the same table, but the first data retention policy of 29 days, second data retention policy It is 30 days. It will be treated as two series to maintain. While the number of series, there is a limit.

Influxdb main role is to monitor the database.

java database using influxDB timing

 

Guess you like

Origin www.cnblogs.com/mengtaoadmin/p/11184043.html
Recommended