Code:
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
operation result:
Extracting MNISt_data/train-images-idx3-ubyte.gz Extracting MNISt_data/train-labels-idx1-ubyte.gz Extracting MNISt_data/t10k-images-idx3-ubyte.gz Extracting MNISt_data/t10k-labels-idx1-ubyte.gz
Code:
# size of each batch batch_size = 100 # Calculate how many batches there are n_batch = mnist.train.num_examples // batch_size #Parameter summary def variable_summaries(var): with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.summary.scalar('mean', mean)#average with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('stddev', stddev)#标准差 tf.summary.scalar('max', tf.reduce_max(var))#maximum tf.summary.scalar('min', tf.reduce_min(var))#minimum tf.summary.histogram('histogram', var)#histogram #initialize weights def weight_variable(shape,name): initial = tf.truncated_normal(shape,stddev=0.1)#Generate a truncated normal distribution return tf.Variable(initial,name=name) #initialize bias def bias_variable(shape,name): initial = tf.constant(0.1,shape=shape) return tf.Variable(initial,name=name) # convolutional layer def conv2d(x,W): #x input tensor of shape `[batch, in_height, in_width, in_channels]` #W filter / kernel tensor of shape [filter_height, filter_width, in_channels, out_channels] #`strides[0] = strides[3] = 1`. strides[1] represents the step size in the x direction, and strides[2] represents the step size in the y direction #padding: A `string` from: `"SAME", "VALID"` return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME') #pooling layer def max_pool_2x2(x): #ksize [1,x,y,1] return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME') #Namespaces with tf.name_scope('input'): #Define two placeholders x = tf.placeholder(tf.float32,[None,784],name='x-input') y = tf.placeholder(tf.float32,[None,10],name='y-input') with tf.name_scope('x_image'): #Change the format of x to a 4D vector [batch, in_height, in_width, in_channels]` x_image = tf.reshape(x,[-1,28,28,1],name='x_image') with tf.name_scope('Conv1'): #Initialize the weights and biases of the first convolutional layer with tf.name_scope('W_conv1'): W_conv1 = weight_variable([5,5,1,32],name='W_conv1')#5*5 sampling window, 32 convolution kernels extract features from 1 plane with tf.name_scope('b_conv1'): b_conv1 = bias_variable([32],name='b_conv1')#A bias value for each convolution kernel #Convolve x_image with the weight vector, add the bias value, and then apply it to the relu activation function with tf.name_scope('conv2d_1'): conv2d_1 = conv2d(x_image,W_conv1) + b_conv1 with tf.name_scope('relu'): h_conv1 = tf.nn.relu(conv2d_1) with tf.name_scope('h_pool1'): h_pool1 = max_pool_2x2(h_conv1)#进行max-pooling with tf.name_scope('Conv2'): #Initialize the weights and biases of the second convolutional layer with tf.name_scope('W_conv2'): W_conv2 = weight_variable([5,5,32,64],name='W_conv2')#5*5 sampling window, 64 convolution kernels extract features from 32 planes with tf.name_scope('b_conv2'): b_conv2 = bias_variable([64],name='b_conv2')#A bias value for each convolution kernel #Convolve h_pool1 with the weight vector, add the bias value, and then apply it to the relu activation function with tf.name_scope('conv2d_2'): conv2d_2 = conv2d(h_pool1,W_conv2) + b_conv2 with tf.name_scope('relu'): h_conv2 = tf.nn.relu(conv2d_2) with tf.name_scope('h_pool2'): h_pool2 = max_pool_2x2(h_conv2)# for max-pooling The image of #28*28 is still 28*28 after the first convolution, and becomes 14*14 after the first pooling #After the second convolution, it is 14*14, and after the second pooling, it becomes 7*7 #After entering the above operation, get 64 7*7 planes with tf.name_scope('fc1'): #Initialize the weights of the first fully connected layer with tf.name_scope('W_fc1'): W_fc1 = weight_variable([7*7*64,1024],name='W_fc1')#The last field has 7*7*64 neurons, and the fully connected layer has 1024 neurons with tf.name_scope('b_fc1'): b_fc1 = bias_variable([1024],name='b_fc1')#1024 nodes # Flatten the output of pooling layer 2 to 1 dimension with tf.name_scope('h_pool2_flat'): h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64],name='h_pool2_flat') # Find the output of the first fully connected layer with tf.name_scope('wx_plus_b1'): wx_plus_b1 = tf.matmul(h_pool2_flat,W_fc1) + b_fc1 with tf.name_scope('relu'): h_fc1 = tf.nn.relu(wx_plus_b1) #keep_prob is used to represent the output probability of the neuron with tf.name_scope('keep_prob'): keep_prob = tf.placeholder(tf.float32,name='keep_prob') with tf.name_scope('h_fc1_drop'): h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob,name='h_fc1_drop') with tf.name_scope('fc2'): #Initialize the second fully connected layer with tf.name_scope('W_fc2'): W_fc2 = weight_variable([1024,10],name='W_fc2') with tf.name_scope('b_fc2'): b_fc2 = bias_variable([10],name='b_fc2') with tf.name_scope('wx_plus_b2'): wx_plus_b2 = tf.matmul(h_fc1_drop,W_fc2) + b_fc2 with tf.name_scope('softmax'): #Calculate output prediction = tf.nn.softmax(wx_plus_b2) #Cross entropy cost function with tf.name_scope('cross_entropy'): cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction),name='cross_entropy') tf.summary.scalar('cross_entropy',cross_entropy) #Optimize with AdamOptimizer with tf.name_scope('train'): train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) # find the accuracy with tf.name_scope('accuracy'): with tf.name_scope('correct_prediction'): #The result is stored in a boolean list correct_prediction = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))#argmax returns the position of the largest value in the one-dimensional tensor with tf.name_scope('accuracy'): # find the accuracy accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) tf.summary.scalar('accuracy',accuracy) #Merge all summary merged = tf.summary.merge_all() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) train_writer = tf.summary.FileWriter('logs/train',sess.graph) test_writer = tf.summary.FileWriter('logs/test',sess.graph) for i in range(1001): #train model batch_xs,batch_ys = mnist.train.next_batch(batch_size) sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.5}) #Record the parameters of the training set calculation summary = sess.run(merged,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0}) train_writer.add_summary(summary,i) #Record the parameters of the test set calculation batch_xs,batch_ys = mnist.test.next_batch(batch_size) summary = sess.run(merged,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0}) test_writer.add_summary(summary,i) if i%100==0: test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0}) train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images[:10000],y:mnist.train.labels[:10000],keep_prob:1.0}) print ("Iter " + str(i) + ", Testing Accuracy= " + str(test_acc) + ", Training Accuracy= " + str(train_acc))
operation result:
Iter 0, Testing Accuracy= 0.0958, Training Accuracy= 0.0985 Iter 100, Testing Accuracy= 0.5912, Training Accuracy= 0.5964 Iter 200, Testing Accuracy= 0.8075, Training Accuracy= 0.8032 Iter 300, Testing Accuracy= 0.8582, Training Accuracy= 0.853 Iter 400, Testing Accuracy= 0.9402, Training Accuracy= 0.9359 Iter 500, Testing Accuracy= 0.9457, Training Accuracy= 0.9429 Iter 600, Testing Accuracy= 0.9537, Training Accuracy= 0.9525 Iter 700, Testing Accuracy= 0.9591, Training Accuracy= 0.9596 Iter 800, Testing Accuracy= 0.9607, Training Accuracy= 0.9578 Iter 900, Testing Accuracy= 0.964, Training Accuracy= 0.9615 Iter 1000, Testing Accuracy= 0.9667, Training Accuracy= 0.9652
tensorboard: