# coding: utf-8 # In[2]: import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data # In[3]: # load dataset mnist = input_data.read_data_sets("MNIST_data", one_hot=True) # size of each batch batch_size = 100 # Calculate how many batches there are n_batch = mnist.train.num_examples // batch_size #Parameter summary def variable_summaries(var): with tf.name_scope('summaries'): mean=tf.reduce_mean(var) tf.summary.scalar('mean',mean) with tf.name_scope('stddev'): stddev=tf.sqrt(tf.reduce_mean(tf.square(var-mean))) tf.summary.scalar('stddev',stddev)#标准差 tf.summary.scalar('max', tf.reduce_max(var)) tf.summary.scalar('min',tf.reduce_min(var))#minimum tf.summary.histogram('histogram',var)#histogram #define a namespace with tf.name_scope('input'): x = tf.placeholder(tf.float32, [None, 784],name='x_input') y = tf.placeholder(tf.float32, [None, 10],name='y_input') with tf.name_scope("layer"): with tf.name_scope('weights'): W = tf.Variable(tf.zeros([784, 10])) variable_summaries(W) with tf.name_scope('biases'): b = tf.Variable(tf.zeros([10])) variable_summaries(b) with tf.name_scope('wx_plus_b'): wx_plus_b=tf.matmul(x, W) + b # Create a simple neural network with tf.name_scope('prediction'): prediction = tf.nn.softmax(wx_plus_b) with tf.name_scope('loss'): # Quadratic cost function loss = tf.reduce_mean(tf.square(y - prediction)) tf.summary.scalar('loss',loss)# with tf.name_scope("train"): # use gradient descent train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss) # Initialize variables init = tf.global_variables_initializer() with tf.name_scope('accuracy'): with tf.name_scope("correct_prediction"): # The result is stored in a boolean list correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1)) # argmax returns the position of the largest value in the one-dimensional tensor # find the accuracy with tf.name_scope("accuracy"): accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar('accuracy',accuracy) #Merge all summary merged=tf.summary.merge_all() with tf.Session() as sess: sess.run(init) writer=tf.summary.FileWriter('logs/',sess.graph) for epoch in range(51): for batch in range(n_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) summary,_=sess.run([merged,train_step], feed_dict={x: batch_xs, y: batch_ys}) writer.add_summary(summary,epoch) acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}) print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc)) # In[ ]:
The tensorboard of python study notes draws the structural curve and analyzes the parameters
Guess you like
Origin http://43.154.161.224:23101/article/api/json?id=324606089&siteId=291194637
Recommended
Ranking