import numpy as np import pandas as pd import matplotlib.pyplot as plt import torch plt.rcParams [ 'font.sans-serif'] = [ 'SimHei'] # is defined to display normal Chinese font bold plt.rcParams [ 'axes.unicode_minus'] = False # indicates a negative number for the normal display # Import Data filename = 'RMB_EU.xltx' df = pd.read_excel(filename, encoding='utf-8') ### extract test data data = df.loc [:, [ 'time', 'high']] [:] # extraction time and maximum two data # # Maximum and minimum normalization method data [ 'height'] = (data [ 'height'] -data [ 'height'] .min ()) / (data [ 'height'] .max () - data [ 'height'] .min ()) def create_dataset(dataset, len_x=7, len_y=7): data_X, data_Y, data_Z = [], [], [] for i in range(len(dataset) - (len_x + len_y) + 1): a = dataset[i:(i + len_x)] data_X.append(a) b = dataset [(i + len_x) :( i + len_x + len_y)] data_Y.append(b) for i in range (referred to as (dataset) - (+ len_x len_y) +1, only (dataset) -len_x + 1): a = dataset[i:(i + len_x)] data_Z.append(a) return np.array(data_X), np.array(data_Y), np.array(data_Z) # Build data sets X, Y, Z = create_dataset(data['高'], 30, 30) train_size = int(len(X) * 0.7) test_size = len(X) - train_size train_X = X[:train_size] train_Y = Y[:train_size] test_X = X[train_size:] test_Y = Y[train_size:] train_X.shape,train_Y.shape,test_X.shape,test_Y.shape, Z.shape Shape setting data # train_X = train_X.reshape(-1, 1, 30) train_Y = train_Y.reshape(-1, 1, 30) test_X = test_X.reshape(-1, 1, 30) # test_Y = test_Y.reshape(-1, 1, 7) pred_Z Z.reshape = (-1, 1, 30) # Convert Tensor type train_X = torch.from_numpy(train_X) train_Y = torch.from_numpy(train_Y) test_X = torch.from_numpy(test_X) # test_Y = torch.from_numpy(test_Y) pred_Z = torch.from_numpy (pred_Z) Use cuda # train_X = train_X.cuda() train_Y = train_Y.cuda () test_X = test_X.cuda () Test_Y test_Y.cuda = # () pred_Z pred_Z.cuda = () from torch import nn from torch.autograd import Variable RNN nn.LSTM = (30,30,2) .cuda () rnn = rnn.double () # data types problem criterion = nn.MSELoss() optimizer = torch.optim.Adam(rnn.parameters(), lr=1e-2) # Start training for e in range(50): var_X = Variable(train_X) var_Y = Variable (train_Y) Propagation ago # out, (h, c) = rnn (var_X) loss = criterion(out, var_Y) # Backpropagation optimizer.zero_grad() loss.backward() optimizer.step() print('Epoch: {}, Loss: {:.5f}'.format(e + 1, loss.data[0])) model = rnn.eval () # is converted into a test mode var_data = Variable(test_X) pred_test, (th, tc) = model (var_data) # forecast results of the test set pred_test = pred_test.reshape (-1, 30) pred_test pred_test.cpu = () pred_test pred_test.detach = (). numpy () pred_test # Draw the actual results and the predicted results plt.figure(figsize=(12,8)) plt.plot(data['时间'][train_size:-59],pred_test[:,6], 'r', label='train') # plt.plot(data['时间'][train_size:-29],pred_t, 'r', label='prediction') plt.plot (data [ 'Time'] [train_size: -59], test_Y [:, 6], 'b', label = 'real') plt.plot (data [ 'Time'] [- 30:], pred_1, 'g', label = 'prediction') plt.legend(loc='best') # forecast result before, (_, _) = model (pred_Z) before pred.cpu = () before pred.detach = (). numpy () pred = pred.reshape (-1, 30) pred_1 = np.mean(pred, axis=1) pred_1.shape plt.plot (data [ 'Time'] [- 30:], pred_1, '-g', label = 'new')