encoder-decoder模型实现seq2seq中的简单时间序列预测

刚刚入坑深度学习的小白,在学习采用encoder-decoder实现seq2seq时遇到了一个小问题:模型在训练过程中
decoder的输入是用已经存在的标签序列作为输入的,然而预测过程中decoder的输入应该由上一步的输出产生。参   
考了诸多大神的文章后终于解决了,记录一下。

import random
import math

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

def do_generate_x_y(batch_size, seqlen):
batch_x = []
batch_y = []
for _ in range(batch_size):
offset_rand = random.random() * 2 * math.pi
freq_rand = (random.random() ) / 1.5 * 15 + 0.5
amp_rand = random.random() + 0.1

    sin_data = amp_rand * np.sin(np.linspace(
        seqlen / 15.0 * freq_rand * 0.0 * math.pi + offset_rand,
        seqlen / 15.0 * freq_rand * 3.0 * math.pi + offset_rand, seqlen * 2))+1

    offset_rand = random.random() * 2 * math.pi
    freq_rand = (random.random() ) / 1.5 * 15 + 0.5
    amp_rand = random.random() * 1.2

    sig_data = amp_rand * np.cos(np.linspace(
        seqlen / 15.0 * freq_rand * 0.0 * math.pi + offset_rand,
        seqlen / 15.0 * freq_rand * 3.0 * math.pi + offset_rand, seqlen * 2))+1 + sin_data

    batch_x.append(np.array([ sig_data[:seqlen] ]).T)
    batch_y.append(np.array([ sig_data[seqlen:] ]).T)

# shape: (batch_size, seq_length, output_dim)
batch_x = np.array(batch_x).transpose((1, 0, 2))
batch_y = np.array(batch_y).transpose((1, 0, 2))
# shape: (seq_length, batch_size, output_dim)

return batch_x, batch_y   # (seq_length,batch_size,output_dim)

def generate_data(batch_size):

seq_length =15

return do_generate_x_y(batch_size, seq_length)

———————以上生成数据————————

seq_length = 15

batch_size = 15

encoder_hidden_units = 12

input_dim = output_dim = 1

learning_rate = 0.01

decoder_hidden_units = encoder_hidden_units

encoder_inputs = tf.placeholder(shape=(seq_length, batch_size,input_dim), dtype=tf.float32, name=’encoder_inputs’)

decoder_targets = tf.placeholder(shape=(seq_length+1, batch_size,output_dim), dtype=tf.float32, name=’decoder_targets’)

decoder_inputs = tf.placeholder(shape=(seq_length+1, batch_size,output_dim), dtype=tf.float32, name=’decoder_inputs’)

encoder_cell = tf.contrib.rnn.LSTMCell(encoder_hidden_units)

encoder_outputs, encoder_final_state = tf.nn.dynamic_rnn(
encoder_cell, encoder_inputs,
dtype=tf.float32, time_major=True,
)

——————–以上encoder————————-

decoder_cell = tf.contrib.rnn.LSTMCell(decoder_hidden_units)

decoder_outputs, decoder_final_state = tf.nn.dynamic_rnn(

decoder_cell, decoder_inputs,

initial_state=encoder_final_state,

dtype=tf.float32, time_major=True, scope="plain_decoder",

)

W = tf.Variable(tf.random_uniform([decoder_hidden_units,output_dim], -1, 1), dtype=tf.float32)
b = tf.Variable(tf.zeros([output_dim]), dtype=tf.float32)

decoder_max_steps, decoder_batch_size, decoder_dim = tf.unstack(tf.shape(decoder_outputs))
decoder_outputs_flat = tf.reshape(decoder_outputs, (-1, decoder_dim))
decoder_logits_flat = tf.add(tf.matmul(decoder_outputs_flat, W), b)
decoder_prediction = tf.reshape(decoder_logits_flat, (decoder_max_steps, decoder_batch_size, output_dim))

output_loss = tf.reduce_mean(tf.pow(decoder_prediction - decoder_targets, 2))

reg_loss = 0
lambda_l2_reg = 0.003

for tf_var in tf.trainable_variables(): #除了最后的全连接层,其他做L2正则
if not (“fully_connected” in tf_var.name ):
#print(tf_var.name)
reg_loss += tf.reduce_mean(tf.nn.l2_loss(tf_var))

loss = reg_loss*lambda_l2_reg + output_loss

train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)

——————以上decoder、训练器———————–

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

def next_feed():
batch_x,batch_y = generate_data(batch_size)
decoder_targets_ = np.concatenate((batch_y,np.zeros_like(batch_y[0].reshape(1,batch_size,1))))
decoder_inputs_ = np.concatenate((np.zeros_like(batch_y[0].reshape(1,batch_size,1)),batch_y))

return {
    encoder_inputs : batch_x,
    decoder_inputs : decoder_inputs_,
    decoder_targets : decoder_targets_
    }

loss_track = []
max_batches = 5000
batches_in_epoch = 1000

try:
for batch in range(max_batches):
fd = next_feed()
_, l = sess.run([train_op, loss], fd)
loss_track.append(l)

    if batch == 0 or batch % batches_in_epoch == 0:
        print('batch {}'.format(batch))
        print('  minibatch loss: {}'.format(sess.run(loss, fd)))
        predict_ = sess.run(decoder_prediction, fd)
        for i, (inp, pred) in enumerate(zip(fd[encoder_inputs].T, predict_.T)):
            print('  sample {}:'.format(i + 1))
            print('    input     > {}'.format(inp))
            print('    predicted > {}'.format(pred))
            if i >= 2:
                break
        print()

except KeyboardInterrupt:
print(‘training interrupted’)

import matplotlib.pyplot as plt
plt.plot(loss_track)
plt.show()
print(‘loss {:.4f} after {} examples (batch_size={})’.format(loss_track[-1], len(loss_track)*batch_size, batch_size))

———————-以上为训练部分—————————

PAD = tf.zeros([batch_size,output_dim], dtype=tf.float32, name=’PAD’)
EOS = tf.zeros([batch_size,output_dim], dtype=tf.float32, name=’EOS’)

decoder_lengths = seq_length + 1

def loop_fn_initial():
initial_elements_finished = (0 >= decoder_lengths)
initial_input = EOS
initial_cell_state = encoder_final_state
initial_cell_output = None
initial_loop_state = None
return (initial_elements_finished,
initial_input,
initial_cell_state,
initial_cell_output,
initial_loop_state)

def loop_fn_transition(time, previous_output, previous_state, previous_loop_state):

def get_next_input():

    output_logits = tf.add(tf.matmul(previous_output, W), b)

    next_input = output_logits

    return next_input

elements_finished = (time >= decoder_lengths) 

finished = tf.reduce_all(elements_finished) 
input = tf.cond(finished, lambda: PAD, get_next_input)
state = previous_state
output = previous_output
loop_state = None

return (elements_finished, 
        input,
        state,
        output,
        loop_state)

def loop_fn(time, previous_output, previous_state, previous_loop_state):

if previous_state is None:    # time == 0
    assert previous_output is None and previous_state is None
    return loop_fn_initial()
else:
    return loop_fn_transition(time, previous_output, previous_state, previous_loop_state)

with tf.variable_scope(‘decode’, reuse=None):

decoder_outputs_ta, decoder_final_state, _ = tf.nn.raw_rnn(decoder_cell, loop_fn)
decoder_outputs = decoder_outputs_ta.stack()
decoder_max_steps, decoder_batch_size, decoder_dim = tf.unstack(tf.shape(decoder_outputs))
decoder_outputs_flat = tf.reshape(decoder_outputs, (-1, decoder_dim))
decoder_logits_flat = tf.add(tf.matmul(decoder_outputs_flat, W), b)
decoder_prediction = tf.reshape(decoder_logits_flat, (decoder_max_steps, decoder_batch_size, output_dim))

def next_feed():
batch_x,batch_y = generate_data(batch_size)
decoder_targets_ = np.concatenate((batch_y,np.zeros_like(batch_y[0].reshape(1,batch_size,1))))

return {
    encoder_inputs : batch_x,
    decoder_targets : decoder_targets_,
    }

fd = next_feed()

predict = sess.run(decoder_prediction, fd)
encoder_input = fd[encoder_inputs]
target = fd[decoder_targets]

————–以上为预测部分—————–

画图

predict = np.array(predict).transpose((1,0,2))
encoder_input = np.array(encoder_input).transpose((1,0,2))
target = np.array(target).transpose((1,0,2))

for i in range(predict.shape[0]):

plt.plot(range(encoder_input.shape[1]),encoder_input[i],"o--b")
plt.plot(range(encoder_input.shape[1],encoder_input.shape[1]+predict.shape[1]),predict[i][:,-,"o--y")
plt.plot(range(encoder_input.shape[1],encoder_input.shape[1]+target.shape[1]),target[i],"x--b")
plt.title("Predictions vs. future")
plt.show()

猜你喜欢

转载自blog.csdn.net/qq_33752012/article/details/81478548