telegraf用于收集服务器的负载信息,存储到influxdb时序数据库中,然后使用python读取出来。
# 安装influxdb
wget https://dl.influxdata.com/influxdb/releases/influxdb-1.7.6.x86_64.rpm
sudo yum localinstall influxdb-1.7.6.x86_64.rpm -y
service influxdb start
service influxdb status
# 查看数据库现有保留策略
show retention policies on telegraf
# 新建两小时策略
create retention policy "2_hours" on "telegraf" duration 2h replication 1 default
# 删除策略
drop retention policy "2_hours" on "telegraf"
# 删除数据库
drop database my_telegraf
# 安装telegraf
wget https://dl.influxdata.com/telegraf/releases/telegraf-1.11.5-1.x86_64.rpm
sudo yum localinstall telegraf-1.11.5-1.x86_64.rpm
systemctl enable telegraf.service ## systemd服务开机自启动
systemctl start telegraf
telegraf -config /etc/telegraf/telegraf.conf # 手动启动
# 修改配置文件 /etc/telegraf/telegraf.conf
[[outputs.influxdb]]
urls = ["http://localhost:8086"] # required
database = "telegraf" # required
retention_policy = ""
precision = "s"
timeout = "5s"
username = "telegraf"
password = "password"
[agent]
## Default data collection interval for all inputs
interval = "1s" # 10 这样就可以多收集很多信息,时间戳就会密集很多。
# 记录一个问题,由于之前虚拟机在创建的时候,选择了4核4线程,所以在使用telegraf收集的时候,出现了cpu15,
# 一直构成干扰。改成1核1线程以后就没有干扰了。
nano /etc/telegraf/telegraf.conf
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "1s" #####10 修改收集的时间间隔为1秒
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
[[outputs.influxdb]]
## The full HTTP or UDP URL for your InfluxDB instance.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
# urls = ["unix:///var/run/influxdb.sock"]
# urls = ["udp://127.0.0.1:8089"]
urls = ["http://127.0.0.1:8086"] ##### http请求地址
## The target database for metrics; will be created as needed.
## For UDP url endpoint database needs to be configured on server side.
database = "telegraf" #### 存到influxdb中的数据库telegraf
# nano shell.sh
# !/bin/sh
for i in `seq 5000`
do
curl 172.16.0.118:30002
echo "数字是 $i"
done
# 循环访问
from influxdb import InfluxDBClient
import datetime
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import ujson
# result = InfluxDBClient('172.16.0.117',8086,'root','','telegraf').get_list_measurements()
# result = InfluxDBClient('172.16.0.117',8086,'root','','telegraf').get_list_database()
# result = InfluxDBClient('172.16.0.117',8086,'root','','telegraf').get_list_users()
# result = InfluxDBClient('172.16.0.117',8086,'root','','telegraf').get_list_retention_policies('telegraf')
# result = InfluxDBClient('172.16.0.117',8086,'root','','telegraf').ping()
# result = InfluxDBClient('172.16.0.117',8086,'root','','telegraf').get_list_continuous_queries()
# result = InfluxDBClient('172.16.0.117',8086,'root','','telegraf').get_list_series('telegraf','cpu',)
# result = InfluxDBClient('172.16.0.117',8086,'root','','telegraf').get_list_series('telegraf','cpu',)
# result = InfluxDBClient('111.229.175.232',8086,'root','','telegraf').query('select sample("usage_idle",12) from "cpu"')
result = InfluxDBClient('172.16.0.119',8086,'root','','telegraf').query("select usage_idle from cpu WHERE cpu = 'cpu-total' limit 10000")
result = list(result)
result = result[0]
print(len(result))
print(result)
# x = torch.zeros(1000) * 5
# x = [[0] * 1000] * 3
x = [[0] * 600] * 7
# x = torch.Tensor(a) # 避免过早引入张量
# print('x.shape',x.shape)
y = [0] * 4200
for k in range(4200):
# y = y + list(result[k].get('usage_idle'))
# y[k] = result[k].get('usage_idle')
y[k] = np.float64(result[k].get('usage_idle'))
# print(y)
# print(type(y))
# a[0] = y[0:1000]
for i in range(7):
# # x[i] = y[i * 1000 : 1000 + i * 1000]
# x[i] = y[i * 1000 : 1000 + i * 1000]
x[i] = y[i * 600 : 600 + i * 600]
# print(i)
print(type(x)) # <class 'list'>
x = np.array(x).astype('float64') # 非常重要
# x = np.array(x) # 非常重要
print(type(x)) # <class 'numpy.ndarray'>
# print(len(x))
# for m in range(7):
# for n in range(1000):
# print(y[m * 1000 + n] == x[m][n])
# torch.save(x, open('y.pt', 'wb'))
data = torch.load('y.pt')
print(type(data))
print('data.shape',data.shape)
###########################################################################
b = [[0] * 600] * 7
b[0] = data[6]
b = b[0]
# print(b)
print('len(b)',len(b))
for i in range(600):
if b[i] < 50:
print(i)
a = list(range(600))
# print(y)
# print(len(y))
plt.plot(a,b,'C3',lw=0.1)
# plt.scatter(y,x,marker='o',s=120)
plt.savefig('o.pdf')
plt.close()
###########################################################################
# print(len(data))
# data = torch.Tensor(data)
# print(data)
# xx = data[:1, :-1]
# print(xx)
# xx = xx[0]
# print(xx)
# print(xx.shape)
# print(range(6))
# print(type(y))
# print(result[k].get('usage_idle'))
# print(time.time())
# x = [{1632355200000000000: 99.5983935742884},
# {1632355210000000000: 98.49699398803367},
# {1632355220000000000: 99.29789368104613},
# {1632355230000000000: 99.59798994973985},
# {1632355240000000000: 99.398797595141},
# {1632355250000000000: 98.99799599199888},
# {1632355260000000000: 98.39357429722543},
# {1632355270000000000: 97.89368104306695},
# {1632355280000000000: 99.59839357436114},
# {1632355290000000000: 99.3975903613965}]
# x = [[1632355200000000000, 99.5983935742884],
# [1632355210000000000, 98.49699398803367],
# [1632355220000000000, 99.29789368104613],
# [1632355230000000000, 99.59798994973985],
# [1632355240000000000, 99.398797595141],
# [1632355250000000000, 98.99799599199888],
# [1632355260000000000, 98.39357429722543],
# [1632355270000000000, 97.89368104306695],
# [1632355280000000000, 99.59839357436114],
# [1632355290000000000, 99.3975903613965]]
# for j in range(10):
# x[j][0] = time.localtime( x[j][0] / 1000000000)
# torch.save(x, open('x.pt', 'wb'))
# data = torch.load('x.pt')
# print(data)
# print(type(data[0]))
# for i in range(10):
# # data[i][0] = time.localtime(data[i][0])
# # print(data[i][0])
# print(data[i][1])
# ts_infos = time.localtime(1632355290000000000)
# ts_infos = time.localtime(1632355290000000000 / 1000000000)
# print(ts_infos)