tf.unstack
请看本人这篇博客
tf.nn.rnn_cell.LSTMCell 和 tf.contrib.rnn.LSTMCell
请看本人这篇博客
tf.nn.static_rnn 和 tf.contrib.rnn.static_rnn
请看本人这篇博客
实现一个简单的RNN
代码来自这里,我稍微改了一些
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/home/xhm/PycharmProjects/tensorflow_study/mnist_data/", one_hot=True)
learning_rate = 0.001
training_steps = 10000
batch_size = 128
display_step = 200
num_input = 28
timesteps = 28
num_hidden = 128
num_classes = 10
X=tf.placeholder("float",[None,timesteps,num_input])
Y=tf.placeholder("float",[None,num_classes])
def RNN(x):
x = tf.unstack(x, timesteps, 1)
lstm_cell=tf.nn.rnn_cell.BasicLSTMCell(num_hidden,forget_bias=1.0)
outputs,states=rnn.static_rnn(lstm_cell,x,dtype=tf.float32)
out=tf.layers.dense(outputs[-1],num_classes,use_bias=True)
return out
logits=RNN(X)
pred_classes=tf.argmax(tf.nn.softmax(logits),axis=-1)
loss_op=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=Y))
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op=optimizer.minimize(loss_op)
accuracy=tf.reduce_mean(tf.cast(tf.equal(pred_classes,tf.argmax(Y,-1)),tf.float32))
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(1, training_steps+1):
batch_x, batch_y = mnist.train.next_batch(batch_size)
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
print("Step " + str(step) + ", Minibatch Loss= " + "{:.4f}".format(loss) + ", Training Accuracy= " + "{:.3f}".format(acc))
print("Optimization Finished!")
test_len = 128
test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:", sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))