7月之前数据工作:Keras版本时间序列预测

Simple Rnn-single step:预测单个值


from keras.models import Sequential
from keras.layers import Embedding,SimpleRNN
model = Sequential()

model = Sequential()
# model.add(Embedding(10000,32))
model.add(SimpleRNN(32,input_shape=(train_x.shape[1], train_x.shape[2])))
# model.add(LSTM(50, input_shape=(train_x.shape[1], train_x.shape[2])))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(train_x, train_y, epochs=100, batch_size=1, verbose=2)

mkpath="DATA" #保存模型的路径
mkdir(mkpath)#创造文件夹
model.save(os.path.join("DATA","Test" + ".h5"))

Lstm-single step:预测单个值

# create and fit the LSTM network
model = Sequential()
# model.add(LSTM(4, input_shape=(None,1)))
model.add(LSTM(50, input_shape=(train_x.shape[1], train_x.shape[2])))
# model.add(LSTM(8, input_dim=4, input_length=1, return_sequences=True))
# model.add(LSTM(256, return_sequences=False)) 
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(train_x, train_y, epochs=100, batch_size=1, verbose=2)
# model = Sequential()
# model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
# model.add(Dense(1))
mkpath="DATA" #保存模型的路径
mkdir(mkpath)#创造文件夹
model.save(os.path.join("DATA","Test" + ".h5"))

Lstm-multi step:预测序列

example:用(t-4),...,(t-1)去预测(t),(t+1),(t+2) 

这部分代码调了很久,注意观察每层代码的Output shape

from keras.layers.core import Reshape
from keras.layers.core import Flatten
from keras.layers import TimeDistributed

# create and fit the LSTM network
model = Sequential()
# model.add(LSTM(4, input_shape=(None,1)))
model.add(LSTM(128, input_shape=(train_x.shape[1], train_x.shape[2]),return_sequences=True))
# model.add(Reshape((3, 1)))

# model.add(TimeDistributed(Dense(1)))
# model.add(LSTM(8, input_dim=4, input_length=1, return_sequences=True))
# model.add(LSTM(, return_sequences=True)) 
model.add(Flatten())
model.add(Dense(3))#因为要与你的train_y做mse_loss,所以输出的shape要与train_yshape一样
model.add(Reshape((3, 1)))#因为要与你的train_y做mse_loss,所以输出的shape要与train_yshape一样
model.add(TimeDistributed(Dense(1)))
#Dense
model.summary()
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(train_x, train_y, epochs=100, batch_size=1, verbose=2)

mkpath="DATA" #保存模型的路径
mkdir(mkpath)#创造文件夹
model.save(os.path.join("DATA","Test" + ".h5"))

训练过程

预测部分

print(train_x)
print("===================")
# print(train_x[0])#打印第零行训练集
print(train_x[1])#打印第一行训练集
print("===================")
predict_x = train_x[1].reshape(1, train_x.shape[1], 1)#shape=(1,seq_length,dim) sample=1 很好理解
print("===================")
y_train_pred_nn = model.predict(predict_x)#预测第一行训练集的输出
print(y_train_pred_nn)
print("===================")
y_train_pred_all = model.predict(train_x)#预测全部训练集
print(y_train_pred_all)
[[[0.]
  [1.]
  [2.]
  [3.]]

 [[1.]
  [2.]
  [3.]
  [4.]]

 [[2.]
  [3.]
  [4.]
  [5.]]

 [[3.]
  [4.]
  [5.]
  [6.]]]
===================
[[1.]
 [2.]
 [3.]
 [4.]]
===================
===================
[[[5.210974]
  [6.229568]
  [7.229979]]]
===================
[[[3.9487271]
  [4.867873 ]
  [5.6753364]]

 [[5.210974 ]
  [6.2295685]
  [7.2299795]]

 [[6.1354184]
  [7.164644 ]
  [8.229614 ]]

 [[6.7333   ]
  [7.742232 ]
  [8.802172 ]]]

Simple Rnn-multi step:预测序列 

与上面Lstm-multi step的情况一样只修改了一行代码

model.add(SimpleRNN(512,input_shape=(train_x.shape[1], train_x.shape[2]),return_sequences=True))

 

有趣现象:(目前是数据比较理想的情况),调大Lstm层Units,可以减小Loss,增大训练集的预测精度

完整代码已开源放进Gitee:

猜你喜欢

转载自blog.csdn.net/weixin_43332715/article/details/124073581