import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data#载入数据集 mnist = input_data.read_data_sets("F:\\TensorflowProject\\MNIST_data",one_hot=True) # Size of each batch, training a neural network 100 into training batch_size = 100 # Calculate a total number of batches n_batch = mnist.train.num_examples // batch_size # Define two placeholder X = tf.placeholder (tf.float32, [None, 784 ]) # 0-9 ten- Y = tf.placeholder (tf.float32, [None, 10 ]) keep_prob = tf.placeholder(tf.float32) lr = tf.Variable(0.001,dtype=tf.float32) # Create a neural network # W is = tf.Variable (tf.zeros ([784,10])) # B = tf.Variable (tf.zeros ([10])) W1 of tf.Variable = (tf.truncated_normal ([ 784,500], STDDEV = 0.1 )) b1 = tf.Variable(tf.zeros([500])+0.1) L1 = tf.nn.tanh(tf.matmul(x,W1)+b1) L1_drop = tf.nn.dropout(L1,keep_prob) #隐藏层1 W2 = tf.Variable(tf.truncated_normal([500,300],stddev=0.1)) b2 = tf.Variable(tf.zeros([300])+0.1) L2 = tf.nn.tanh(tf.matmul(L1_drop,W2)+b2) L2_drop = tf.nn.dropout(L2,keep_prob) #隐藏层2 W3 = tf.Variable(tf.truncated_normal([300,10],stddev=0.1)) b3 = tf.Variable(tf.zeros([10])+0.1) #L3 = tf.nn.tanh(tf.matmul(L2_drop,W3)+b3) #L3_drop = tf.nn.dropout(L3,keep_prob) prediction = tf.nn.softmax(tf.matmul(L2_drop,W3)+b3) #W4 = tf.Variable(tf.truncated_normal([1000,10],stddev=0.1)) #b4 = tf.Variable(tf.zeros([10])+0.1) #prediction = tf.nn.softmax(tf.matmul(L3_drop,W4)+b4) # Quadratic cost function # loss = tf.reduce_mean (tf.square (Y-Prediction)) # cross entropy # minimum loss value when the highest accuracy # loss = tf.nn.softmax_cross_entropy_with_logits (= Y Labels, logits = Prediction) = tf.reduce_mean Loss (tf.nn.softmax_cross_entropy_with_logits (= Y Labels, logits = Prediction)) # using a gradient descent method # train_step = tf.train.GradientDescentOptimizer (0.2) .minimize (Loss) # training train_step = tf.train. AdamOptimizer (lr) .minimize (loss) # Initialize variables the init = tf.global_variables_initializer () # Results stored in a Boolean list correct_prediction = tf.equal (tf.argmax (Y,. 1), tf.argmax (Prediction,. 1 )) # required accuracy accuracy = tf.reduce_mean (tf.cast (correct_prediction, tf .float32))
#
with tf.Session () AS sess:
sess.run (the init)
for Epoch in the Range (30):
sess.run (tf.assign (LR, 0.001 * (0.95 ** Epoch)))
for BATCH Range in (n_batch):
batch_xs, batch_ys = mnist.train.next_batch (the batch_size)
sess.run (train_step, feed_dict = {X: batch_xs, Y: batch_ys, keep_prob: 1.0})
# test accuracy
#test_acc = sess.run (accuracy, feed_dict = {x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0})
#train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels,keep_prob:1.0})
learning_rate = sess.run(lr)
test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
print("Iter: "+str(epoch)+" ,Testing Accuracy "+str(test_acc)+" Train : "+str(learning_rate))
####running result