Learning, notes, time will add a logical relationship between the comments and function.
# https://www.cnblogs.com/felixwang2/p/9190664.html
. 1 # https://www.cnblogs.com/felixwang2/p/9190664.html 2 # TensorFlow (XII): using RNN handwritten digit recognition . 3 . 4 Import tensorflow AS TF . 5 from tensorflow.examples.tutorials.mnist Import Input_Data . 6 . 7 # Loading data set . 8 MNIST = input_data.read_data_sets ( " MNIST_data / " , one_hot = True) . 9 10 # input image is 28 * 28 . 11 n_inputs = 28 # input line, data line 28 12 is MAX_TIME = 28 # A total of 28 line 13= 100 lstm_size # hidden layer 14 n_classes = 10 # 10 Category 15 the batch_size = 50 # for each batch of 50 samples of 16 n_batch mnist.train.num_examples // = the batch_size # calculates a total number of batches . 17 18 is # here none indicates a first dimension may be any length . 19 X = tf.placeholder (tf.float32, [none, 784 ]) 20 is # correct label 21 is Y = tf.placeholder (tf.float32, [none, 10 ]) 22 is 23 is # initialization weights 24 weights = tf.Variable (tf.truncated_normal ([lstm_size, n_classes], STDDEV = 0.1 )) 25 # 初始化偏置值 26 biases = tf.Variable(tf.constant(0.1, shape=[n_classes])) 27 28 29 # 定义RNN网络 30 def RNN(X, weights, biases): 31 # inputs=[batch_size, max_time, n_inputs] 32 inputs = tf.reshape(X, [-1, max_time, n_inputs]) 33 # 定义LSTM基本CELL 34 lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size) 35 # final_state[state,batch_size,cell.state_size] 36 # final_state[0]是cell state 37 # final_state[1]是hidden_state 38 #Outputs: Output of The RNN 'the Tensor'. 39 # the If time_major == False (default), the this Will BE A `shaped Tensor`: 40 # .` [the batch_size, MAX_TIME, cell.output_size] ` 41 is # the If time_major == True , the this Will bE a `shaped Tensor`: 42 is # .` [MAX_TIME, the batch_size, cell.output_size] ` 43 is # final_state record is the last output 44 is # outputs record is output every 45 46 is outputs, = tf.nn.dynamic_rnn final_state (lstm_cell, Inputs, DTYPE = tf.float32) 47 Results = tf.nn.softmax (tf.matmul (final_state [. 1], weights) + biases) 48 returnResults 49 50 51 is # calculation results returned RNN 52 is Prediction = RNN (X, weights, biases) 53 is # loss function 54 is cross_entropy = tf.reduce_mean (tf.nn.softmax_cross_entropy_with_logits_v2 (= logits Prediction, Labels = Y)) 55 # using AdamOptimizer optimized 56 is train_step tf.train.AdamOptimizer = (. 4-1E ) .minimize (cross_entropy) 57 is # store the result in a Boolean list 58 correct_prediction = tf.equal (tf.argmax (Y,. 1), tf.argmax (Prediction,. 1)) # the argmax dimensional tensor return position a maximum value where 59 # seeking accuracy of 60 accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # 把correct_prediction变为float32类型 61 # 初始化 62 init = tf.global_variables_initializer() 63 64 gpu_options = tf.GPUOptions(allow_growth=True) 65 with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: 66 sess.run(init) 67 for epoch in range(6): 68 for batch in range(n_batch): 69 batch_xs, batch_ys = mnist.train.next_batch(batch_size) 70 sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys}) 71 72 acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}) 73 print("Iter " + str(epoch) + ", Testing Accuracy= " + str(acc))
Export
Iter 0, Testing Accuracy= 0.6694 Iter 1, Testing Accuracy= 0.714 Iter 2, Testing Accuracy= 0.7984 Iter 3, Testing Accuracy= 0.8568 Iter 4, Testing Accuracy= 0.8863 Iter 5, Testing Accuracy= 0.9088 Process finished with exit code 0