import tensorflow as tf import numpy as np import them os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels #Set the training hyperparameters, respectively set the learning rate, the number of training times and the data size of each round of training # Set the training hyperparameters lr = 0.001 training_iters = 100000 batch_size = 128 # To classify images using RNN, we treat each image row as a sequence of pixels. Since the size of the MNIST image is 28×28 pixels, # So we treat each image sample as a sequence of lines. So, there are (28 elements sequence) × (28 lines), then the length of the input sequence at each step is 28, and the number of steps input is 28 steps # parameters of the neural network n_inputs = 28 # n of the input layer n_steps = 28 # 28 length n_hidden_units = 128 # number of neurons in hidden layer n_classes = 10 # The number of outputs, that is, the category of classification, 0 to 9 numbers, a total of 10 #Define input data and weights # input data placeholder x = tf.placeholder(tf.float32, [None, n_steps, n_inputs]) y = tf.placeholder(tf.float32, [None, n_classes]) # define weights weights = { # (28, 128) 'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])), # (128, 10) 'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes])) } biases = { # (128, ) 'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])), # (10, ) 'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ])) } #define RNN model def RNN(X, weights, biases): # Convert input X to X ==> (128 batch * 28 steps, 28 inputs) X = tf.reshape(X, [-1, n_inputs]) # enter the hidden layer # X_in = (128 batch * 28 steps, 128 hidden) X_in = tf.matmul(X, weights['in']) + biases['in'] # X_in ==> (128 batch, 28 steps, 128 hidden) X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_units]) # The basic LSTM recurrent network unit is used here: basic LSTM Cell lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units, forget_bias=1.0, state_is_tuple=True) # Initialized to zero value, the lstm unit consists of two parts: (c_state, h_state) init_state = lstm_cell.zero_state(batch_size, dtype=tf.float32) # dynamic_rnn receives tensor (batch, steps, inputs) or (steps, batch, inputs) as X_in outputs, final_state = tf.nn.dynamic_rnn(lstm_cell, X_in, initial_state=init_state, time_major=False) results = tf.matmul(final_state[1], weights['out']) + biases['out'] return results #Define the loss function and optimizer, the optimizer uses AdamOptimizer pred = RNN(x, weights, biases) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) train_op = tf.train.AdamOptimizer(lr).minimize(cost) #Define model prediction results and accuracy calculation method correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) #Start the graph in a session, start training, and output the size of the accuracy rate every 20 times with tf.Session() as sess: sess.run(tf.global_variables_initializer()) step = 0 while step * batch_size < training_iters: batch_xs, batch_ys = mnist.train.next_batch(batch_size) batch_xs = batch_xs.reshape([batch_size, n_steps, n_inputs]) sess.run([train_op], feed_dict={ x: batch_xs, y: batch_ys, }) if step % 20 == 0: print(sess.run(accuracy, feed_dict={ x: batch_xs, y: batch_ys, })) step += 1
tensorflow uses RNN to analyze the mnist handwritten digit dataset
Guess you like
Origin http://43.154.161.224:23101/article/api/json?id=325522072&siteId=291194637
Recommended
Ranking