Keras搭建神经网络LSTM(回归)

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.metrics import mean_squared_error

import warnings
warnings.filterwarnings('ignore')

#读取数据
data = pd.read_csv('air.csv',header=None,encoding='utf8')
arr = data.values[:,0]

#构造数据集
timespan = 6
size = 3
dim = 1
x,y =[],[]
for i in range(len(arr)-timespan):
    x.append(arr[i:i+timespan])
    y.append(arr[i+timespan])
x = np.array(x)
y = np.array(y)    

scale_x = preprocessing.MinMaxScaler(feature_range=(0,1))
scale_y = preprocessing.MinMaxScaler(feature_range=(0,1))
X = scale_x.fit_transform(x)
Y = scale_y.fit_transform(y.reshape(-1,1))

train_size = len(X)-size
lx = X.reshape(X.shape[0],timespan,dim)
x_train,x_test = lx[0:train_size],lx[train_size:]
y_train,y_test = Y[0:train_size],Y[train_size:]
print('Train: x = {} y = {}'.format(x_train.shape,y_train.shape))
print('Test: x = {} y = {}'.format(x_test.shape,y_test.shape))

#建立网络(adam)
hiddennum = 12
batch_size = 20
model = Sequential()
model.add(LSTM(hiddennum,return_sequences=True,input_shape=(timespan,dim)))
model.add(LSTM(hiddennum))
model.add(Dense(1,activation='linear'))
model.compile(optimizer='adam',loss='mse')

model.fit(x_train,y_train,batch_size=batch_size,epochs=100,verbose=0)

predict_train = scale_y.inverse_transform(model.predict(x_train,batch_size=batch_size))
actual_train = scale_y.inverse_transform(y_train)
train_acc = np.sqrt(mean_squared_error(actual_train,predict_train))
predict_test = scale_y.inverse_transform(model.predict(x_test,batch_size=batch_size))
actual_test = scale_y.inverse_transform(y_test)
print('网络层数 = ',len(model.layers),' 优化算法 = adam')
test_acc = np.sqrt(mean_squared_error(actual_test,predict_test))
print('Train mse = ',round(train_acc,4))
print('Test mse = ',round(test_acc,4))

#建立网络(rmsprop)
model = Sequential()
model.add(LSTM(hiddennum,return_sequences=True,input_shape=(timespan,dim)))
model.add(LSTM(hiddennum))
model.add(Dense(1,activation='linear'))
model.compile(optimizer='rmsprop',loss='mse')

model.fit(x_train,y_train,batch_size=batch_size,epochs=100,verbose=0)

predict_train = scale_y.inverse_transform(model.predict(x_train,batch_size=batch_size))
actual_train = scale_y.inverse_transform(y_train)
train_acc = np.sqrt(mean_squared_error(actual_train,predict_train))
predict_test = scale_y.inverse_transform(model.predict(x_test,batch_size=batch_size))
actual_test = scale_y.inverse_transform(y_test)
print('网络层数 = ',len(model.layers),' 优化算法 = rmsprop')
test_acc = np.sqrt(mean_squared_error(actual_test,predict_test))
print('Train mse = ',round(train_acc,4))
print('Test mse = ',round(test_acc,4))

#建立网络(sgd)
model = Sequential()
model.add(LSTM(hiddennum,return_sequences=True,input_shape=(timespan,dim)))
model.add(LSTM(hiddennum))
model.add(Dense(1,activation='linear'))
model.compile(optimizer='sgd',loss='mse')

model.fit(x_train,y_train,batch_size=batch_size,epochs=100,verbose=0)

predict_train = scale_y.inverse_transform(model.predict(x_train,batch_size=batch_size))
actual_train = scale_y.inverse_transform(y_train)
train_acc = np.sqrt(mean_squared_error(actual_train,predict_train))
predict_test = scale_y.inverse_transform(model.predict(x_test,batch_size=batch_size))
actual_test = scale_y.inverse_transform(y_test)
print('网络层数 = ',len(model.layers),' 优化算法 = sgd')
test_acc = np.sqrt(mean_squared_error(actual_test,predict_test))
print('Train mse = ',round(train_acc,4))
print('Test mse = ',round(test_acc,4))

猜你喜欢

转载自blog.csdn.net/qq_42394743/article/details/82954631