6.MNIST simple version of the data set classification

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# Loading data set 
MNIST = input_data.read_data_sets ( " MNIST_data " , one_hot = True) 

# batch size 
the batch_size = 64 # calculates a total period of a number of batches 
n_batch mnist.train.num_examples // = the batch_size # define two placeholder 
X = tf.placeholder (tf.float32, [None, 784 ]) 
Y = tf.placeholder (tf.float32, [None, 10 ]) # Create a simple neural network: 784-10 
W is = tf.Variable ( tf.truncated_normal ([784,10], STDDEV = 0.1 )) 
B = tf.Variable (tf.zeros ([10]) + 0.1 ) 
Prediction = tf.nn.softmax (tf.matmul (X, W is) + B ) #






Quadratic cost function 
Loss = tf.losses.mean_squared_error (Y, Prediction)
 # using a gradient descent method 
Train tf.train.GradientDescentOptimizer = (0.3 ) .minimize (Loss) 

# store the result in a Boolean list 
correct_prediction = tf.equal (tf.argmax (Y,. 1), tf.argmax (Prediction,. 1 ))
 # required accuracy 
accuracy = tf.reduce_mean (tf.cast (correct_prediction, tf.float32)) 

with tf.Session () AS Sess: 
    # variable initialization 
    sess.run (tf.global_variables_initializer ())
     # cycles epoch: trained once all the data is a cycle 
    for Epoch in the Range (21 ):
         for BATCH inRange (n_batch):
             # access to data and a tag batch 
            batch_xs, batch_ys = mnist.train.next_batch (the batch_size) 
            sess.run (Train, feed_dict = {X: batch_xs, Y: batch_ys})
         # each training cycle do a test 
        ACC = sess.run (Accuracy, feed_dict = {X: mnist.test.images, Y: mnist.test.labels})
         Print ( " Iter " + STR (Epoch) + " , the Accuracy testing " + STR (ACC ))

 

Guess you like

Origin www.cnblogs.com/liuwenhua/p/11605457.html