Tensorflow — Recurrent Neural Network RNN

Code:

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)

operation result:

Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

Code:

# The input picture is 28*28
n_inputs = 28 #input one line, one line has 28 data
max_time = 28 #28 lines in total
lstm_size = 100 #Hidden layer unit
n_classes = 10 # 10 classes
batch_size = 50 #50 samples per batch
n_batch = mnist.train.num_examples // batch_size #Calculate how many batches there are


#The none here means that the first dimension can be any length
x = tf.placeholder(tf.float32,[None,784])
#correct label
y = tf.placeholder(tf.float32,[None,10])



#initialize weights
weights = tf.Variable(tf.truncated_normal([lstm_size, n_classes], stddev=0.1))
#Initialize bias value
biases = tf.Variable(tf.constant(0.1, shape=[n_classes]))


#define RNN network
def RNN(X,weights,biases):
    # inputs=[batch_size, max_time, n_inputs]
    inputs = tf.reshape(X,[-1,max_time,n_inputs])
    #define LSTM basic CELL
    lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size)
    # final_state[state, batch_size, cell.state_size]
    # final_state[0]是cell state   
    # final_state[1] is the last signal output by hidden_state
    # outputs: The RNN output `Tensor`.
    # If time_major == False (default), this will be a `Tensor` shaped:
    #   `[batch_size, max_time, cell.output_size]`.

    # If time_major == True, this will be a `Tensor` shaped:
    #   `[max_time, batch_size, cell.output_size]`.
    outputs,final_state = tf.nn.dynamic_rnn(lstm_cell,inputs,dtype=tf.float32)
    results = tf.nn.softmax(tf.matmul(final_state[1],weights) + biases)
    return results



#Calculate the return result of RNN
prediction= RNN(x, weights, biases)  
#loss function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))
#Optimize with AdamOptimizer
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
#The result is stored in a boolean list
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax returns the position of the largest value in the one-dimensional tensor
# find the accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))#change correct_prediction to float32 type
#initialization
init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    for epoch in range(6):
        for batch in range(n_batch):
            batch_xs,batch_ys =  mnist.train.next_batch(batch_size)
            sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
        
        acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
        print ("Iter " + str(epoch) + ", Testing Accuracy= " + str(acc))

operation result:

Iter 0, Testing Accuracy= 0.7258
Iter 1, Testing Accuracy= 0.7861
Iter 2, Testing Accuracy= 0.8223
Iter 3, Testing Accuracy= 0.8923
Iter 4, Testing Accuracy= 0.9145
Iter 5, Testing Accuracy= 0.9193

Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=325793466&siteId=291194637