TensorfFlow 实战Google深度学习框架读书笔记三:MNIST手写数字识别

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("G:/deep learning/mnist/", one_hot=True)

输出

Extracting G:/deep learning/mnist/train-images-idx3-ubyte.gz
Extracting G:/deep learning/mnist/train-labels-idx1-ubyte.gz
Extracting G:/deep learning/mnist/t10k-images-idx3-ubyte.gz
Extracting G:/deep learning/mnist/t10k-labels-idx1-ubyte.gz

input_node=784  #输入层节点 28*28=784
output_node=10  #输出层节点 输出0-9
layer1_node=500 #隐藏层节点

batch_size=100     #一个训练batch中的训练数据个数。数据越小,训练过程越接近随机梯度下降;数字越大时,训练过程越接近梯度下降。
training_steps=50000     #训练轮数

regularization_rate=0.0001   #描述模型复杂度的正则化项在损失函数的系数

learning_rate_base=0.8     #基础学习率
learning_decay_steps=mnist.train.num_examples / batch_size
learning_decay_rate=0.99       #学习率的衰减率

moving_average_decay_rate=0.99       #滑动平均衰减率
#给定神经网络的输入和所有参数,计算神经网络的前向传播结果。一共三层
def inference(input_tensor,avg_class,weights1,biases1,weights2,biases2):
    #不使用滑动平均累,直接使用参数当前取值
    if avg_class==None:
        #计算隐藏层前向传播结果,使用ReLu
        layer1=tf.nn.relu(tf.matmul(input_tensor,weights1)+biases1)
        #计算输出层的前向传播结果
        return tf.matmul(layer1,weights2)+biases2
    #使用滑动平均类
    else :
        #计算隐藏层前向传播结果,使用ReLu
        layer1=tf.nn.relu(tf.matmul(input_tensor,avg_class.average(weights1))+avg_class.average(biases1))
        #计算输出层的前向传播结果
        return tf.matmul(layer1,avg_class.average(weights2))+avg_class.average(biases2)
def train(mnist):
    
    #每次读取一小部分数据
    x=tf.placeholder(tf.float32,[None,input_node],name='x-input')
    y_=tf.placeholder(tf.float32,[None,output_node],name='y-input')
    
    #生成隐藏层参数
    weights1=tf.Variable(tf.truncated_normal([input_node,layer1_node],stddev=0.1))
    biases1=tf.Variable(tf.constant(0.1,shape=[layer1_node]))
    # 生成输出层的参数。
    weights2=tf.Variable(tf.truncated_normal([layer1_node,output_node],stddev=0.1))
    biases2=tf.Variable(tf.constant(0.1,shape=[output_node]))
    
    #计算不含滑动平均类的前向传播结果
    y=inference(x,None,weights1,biases1,weights2,biases2)
    
    #定义储存训练轮数的变量,指定为不可训练
    global_step=tf.Variable(0,trainable=False)
    variable_averages=tf.train.ExponentialMovingAverage(moving_average_decay_rate,global_step)
    varibales_averages_op=variable_averages.apply(tf.trainable_variables())
    #计算使用滑动平均之后的前向传播结果
    average_y=inference(x,variable_averages,weights1,biases1,weights2,biases2)
    
    
    #定义交叉熵损失
    corss_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))#tf.argmax(vector, 1):返回的是vector中的最大值的索引号,如果vector是一个向量,那就返回一个值,
                                                                                                #如果是一个矩阵,那就返回一个向量,这个向量的每一个维度都是相对应矩阵行的最大值元素的索引号。
    cross_entropy_mean=tf.reduce_mean(corss_entropy)
    
    #L2正则化损失函数的计算
    regularizer=tf.contrib.layers.l2_regularizer(regularization_rate)
    #计算模型的正则化损失。一般只计算神经网络边上权重的正则化损失,而不使用偏执项
    regularization=regularizer(weights1)+regularizer(weights2)
    
    
    
    #总损失=交叉熵损失+正则化损失
    loss=cross_entropy_mean+regularization
    
    #设置学习率
    learning_rate=tf.train.exponential_decay(learning_rate_base,global_step,learning_decay_steps,learning_decay_rate,staircase=True)
    #优化损失函数
    train_step=tf.train.GradientDescentOptimizer(learning_decay_rate).minimize(loss,global_step=global_step)
    
    
    # 反向传播更新参数和更新每一个参数的滑动平均值
    # tf支持进行一次完成多个操作,既需要进行train_step又需要variables_averages_op
    # 例如创建一个group,把train_step和variables_averages_op两个操作放在一起进行,等同于以下操作:
    # with tf.control_dependencies([train_step, variables_averages_op]):
    #     train_op = tf.no_op(name='train')
    train_op=tf.group(train_step,varibales_averages_op)
    
    #检验使用了滑动平均模型的神经网络前向传播结果是否正确。
    #tf.argmax(average_y,1)计算每一个样例的预测答案。其中average_y是一个batch_size*10的二维数组,每一行表示一个样例的前向传播结果。
    correct_prediction=tf.equal(tf.argmax(average_y,1),tf.argmax(y_,1))#tf.equal判断两个张量的每一维是否相等,如果相等返回true,否则返回false
    #将一个布尔型的数值转换为实数型,然后计算平均值。这个平均值就是模型在这一组数据上的正确率
    accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #cast(x,dtype,name=None)将x的数据格式转化成dtype.
    
    #训练神经网络
    with tf.Session()as sess:
        #参数初始化
        tf.global_variables_initializer().run()
        #准备验证数据。一般在神经网络训练过程中会通过验证数据来大致判断停止的条件和评判训练效果。
        validate_feed={x:mnist.validation.images,y_:mnist.validation.labels}
        #准备测试数据。在真实的应用中,这部分数据在训练时是不可见的,这个数据只是作为模型优劣的最后评价标准。
        test_feed={x:mnist.test.images,y_:mnist.test.labels}
    
        #迭代的更新参数
        for i in range(training_steps):
            #每1000轮输出一次在验证数据集上的测试结果
            if i%1000==0:
                #计算滑动平均模型在验证集上的结果
                validate_acc=sess.run(accuracy,feed_dict=validate_feed)
                print("After %d training step(s),validation accuracy using average model is %g" %(i,validate_acc))
        
            #产生这一轮使用的一个batch的训练数据,并运行训练过程
            xs,ys=mnist.train.next_batch(batch_size)
            sess.run(train_op,feed_dict={x:xs,y_:ys})
        
        #在训练完成后,在测试数据集上检测神经网络模型的最终正确率
        test_acc=sess.run(accuracy,feed_dict=test_feed)
        print("After %d training step(s),test accuracy using average model is %g" %(training_steps,test_acc))
%%time
def main(argv=None):    
    train(mnist)
    
    
if __name__=='__main__':
    main()

输出

After 0 training step(s),validation accuracy using average model is 0.1066
After 1000 training step(s),validation accuracy using average model is 0.9784
After 2000 training step(s),validation accuracy using average model is 0.983
After 3000 training step(s),validation accuracy using average model is 0.9842
After 4000 training step(s),validation accuracy using average model is 0.9838
After 5000 training step(s),validation accuracy using average model is 0.9838
After 6000 training step(s),validation accuracy using average model is 0.9846
After 7000 training step(s),validation accuracy using average model is 0.9846
After 8000 training step(s),validation accuracy using average model is 0.9852
After 9000 training step(s),validation accuracy using average model is 0.9844
After 10000 training step(s),validation accuracy using average model is 0.9852
After 11000 training step(s),validation accuracy using average model is 0.985
After 12000 training step(s),validation accuracy using average model is 0.9862
After 13000 training step(s),validation accuracy using average model is 0.984
After 14000 training step(s),validation accuracy using average model is 0.9844
After 15000 training step(s),validation accuracy using average model is 0.9846
After 16000 training step(s),validation accuracy using average model is 0.9852
After 17000 training step(s),validation accuracy using average model is 0.9842
After 18000 training step(s),validation accuracy using average model is 0.9854
After 19000 training step(s),validation accuracy using average model is 0.9852
After 20000 training step(s),validation accuracy using average model is 0.986
After 21000 training step(s),validation accuracy using average model is 0.9862
After 22000 training step(s),validation accuracy using average model is 0.9862
After 23000 training step(s),validation accuracy using average model is 0.9858
After 24000 training step(s),validation accuracy using average model is 0.9858
After 25000 training step(s),validation accuracy using average model is 0.986
After 26000 training step(s),validation accuracy using average model is 0.986
After 27000 training step(s),validation accuracy using average model is 0.9862
After 28000 training step(s),validation accuracy using average model is 0.986
After 29000 training step(s),validation accuracy using average model is 0.9858
After 30000 training step(s),validation accuracy using average model is 0.9862
After 31000 training step(s),validation accuracy using average model is 0.9862
After 32000 training step(s),validation accuracy using average model is 0.9856
After 33000 training step(s),validation accuracy using average model is 0.9856
After 34000 training step(s),validation accuracy using average model is 0.9862
After 35000 training step(s),validation accuracy using average model is 0.9854
After 36000 training step(s),validation accuracy using average model is 0.986
After 37000 training step(s),validation accuracy using average model is 0.9868
After 38000 training step(s),validation accuracy using average model is 0.9858
After 39000 training step(s),validation accuracy using average model is 0.9858
After 40000 training step(s),validation accuracy using average model is 0.9858
After 41000 training step(s),validation accuracy using average model is 0.9792
After 42000 training step(s),validation accuracy using average model is 0.9822
After 43000 training step(s),validation accuracy using average model is 0.983
After 44000 training step(s),validation accuracy using average model is 0.9836
After 45000 training step(s),validation accuracy using average model is 0.983
After 46000 training step(s),validation accuracy using average model is 0.9834
After 47000 training step(s),validation accuracy using average model is 0.9854
After 48000 training step(s),validation accuracy using average model is 0.9856
After 49000 training step(s),validation accuracy using average model is 0.9842
After 50000 training step(s),test accuracy using average model is 0.9823
Wall time: 3min 46s

猜你喜欢

转载自blog.csdn.net/qq_38375534/article/details/88743388