Tensorflow完整实例——MNIST数字识别问题

Tensorflow完整实例——MNIST数字识别问题

完整实例程序

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

INPUT_NODE = 784     # 输入节点
OUTPUT_NODE = 10     # 输出节点
LAYER1_NODE = 500    # 隐藏层数       
                              
BATCH_SIZE = 100     # 每次batch打包的样本个数        

# 模型相关的参数
LEARNING_RATE_BASE = 0.8      
LEARNING_RATE_DECAY = 0.99    
REGULARAZTION_RATE = 0.0001   
TRAINING_STEPS = 5000        
MOVING_AVERAGE_DECAY = 0.99

def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):
    # 不使用滑动平均类
    if avg_class == None:
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)
        return tf.matmul(layer1, weights2) + biases2

    else:
        # 使用滑动平均类
        layer1 = tf.nn.relu(tf.matmul(input_tensor, avg_class.average(weights1)) + avg_class.average(biases1))
        return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2)  

def train(mnist):
    x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')
    # 生成隐藏层的参数。
    weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))
    biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))
    # 生成输出层的参数。
    weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))
    biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))

    # 计算不含滑动平均类的前向传播结果
    y = inference(x, None, weights1, biases1, weights2, biases2)
    
    # 定义训练轮数及相关的滑动平均类 
    global_step = tf.Variable(0, trainable=False)
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    average_y = inference(x, variable_averages, weights1, biases1, weights2, biases2)
    
    # 计算交叉熵及其平均值
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    
    # 损失函数的计算
    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    regularaztion = regularizer(weights1) + regularizer(weights2)
    loss = cross_entropy_mean + regularaztion
    
    # 设置指数衰减的学习率。
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY,
        staircase=True)
    
    # 优化损失函数
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    
    # 反向传播更新参数和更新每一个参数的滑动平均值
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')

    # 计算正确率
    correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    
    # 初始化会话,并开始训练过程。
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
        test_feed = {x: mnist.test.images, y_: mnist.test.labels} 
        
        # 循环的训练神经网络。
        for i in range(TRAINING_STEPS):
            if i % 1000 == 0:
                validate_acc = sess.run(accuracy, feed_dict=validate_feed)
                print("After %d training step(s), validation accuracy using average model is %g " % (i, validate_acc))
            
            xs,ys=mnist.train.next_batch(BATCH_SIZE)
            sess.run(train_op,feed_dict={x:xs,y_:ys})

        test_acc=sess.run(accuracy,feed_dict=test_feed)
        print(("After %d training step(s), test accuracy using average model is %g" %(TRAINING_STEPS, test_acc)))

def main(argv=None):
    mnist = input_data.read_data_sets("../../../datasets/MNIST_data", one_hot=True)
    train(mnist)

if __name__=='__main__':
    main()

"""
Extracting ../../../datasets/MNIST_data/train-images-idx3-ubyte.gz
Extracting ../../../datasets/MNIST_data/train-labels-idx1-ubyte.gz
Extracting ../../../datasets/MNIST_data/t10k-images-idx3-ubyte.gz
Extracting ../../../datasets/MNIST_data/t10k-labels-idx1-ubyte.gz
After 0 training step(s), validation accuracy using average model is 0.106 
After 1000 training step(s), validation accuracy using average model is 0.9764 
After 2000 training step(s), validation accuracy using average model is 0.9822 
After 3000 training step(s), validation accuracy using average model is 0.9826 
After 4000 training step(s), validation accuracy using average model is 0.9846 
After 5000 training step(s), test accuracy using average model is 0.9823
"""

使用验证数据集判断模型效果

在神经网络的程序中,通常需要设置初始学习率,学习率衰减率,隐藏层节点数量,迭代轮数等众多参数。在大部分情况下,配置神经网络的这些参数都是需要通过实验来调整的。使用测试数据来选取参数可能会导致神经网络模型过度拟合测试数据,从而失去对未知数据的预判能力。因为一个神经网络模型的最终目标是对未知数据提供判断,所以为了估计模型在未知数据上的效果,需要保证测试数据在训练过程中是不可见的。于是,为了评测神经网络模型在不同参数下的效果,一般会从训练数据中抽取一部分作为验证数据。

变量管理

在上述程序中,inference 函数包含了神经网络中的所有参数。然而,当神经网络的结构更加复杂,参数更多时,就需要一个更好的方式来传递和管理神经网络中的参数了。Tensorflow 提供了通过变量名称来创建或者获取一个变量的机制。通过这个机制,在不同的函数中可以直接通过变量的名字来创建或者获取一个变量的机制。

import tensorflow as tf

with tf.variable_scope("foo"):
    v = tf.get_variable("v", [1], initializer=tf.constant_initializer(1.0))
                        
#with tf.variable_scope("foo"):
   # v = tf.get_variable("v", [1])
# 由于该变量已经存在,将报错
    
with tf.variable_scope("foo", reuse=True):
    v1 = tf.get_variable("v", [1])
print v == v1
# True

#with tf.variable_scope("bar", reuse=True):
   # v = tf.get_variable("v", [1])
# 由于该变量并不存在,将报错

tf.get_variable 在 reuse=True 时,将获取已经创建的变量,如果变量不存在将报错;如果 reuse=False 时,将创建新的变量,如果变量已经存在将报错。默认 reuse 为 False。

with tf.variable_scope("root"):
    print tf.get_variable_scope().reuse
    
    with tf.variable_scope("foo", reuse=True):
        print tf.get_variable_scope().reuse
        
        with tf.variable_scope("bar"):
            print tf.get_variable_scope().reuse
            
    print tf.get_variable_scope().reuse

"""
False
True
True
False
"""

通过 variable_scope 来管理变量

v1 = tf.get_variable("v", [1])
print v1.name

with tf.variable_scope("foo",reuse=True):
    v2 = tf.get_variable("v", [1])
print v2.name

with tf.variable_scope("foo"):
    with tf.variable_scope("bar"):
        v3 = tf.get_variable("v", [1])
        print v3.name
        
v4 = tf.get_variable("v1", [1])
print v4.name

"""
v:0
foo/v:0
foo/bar/v:0
v1:0
"""

可以通过变量的名称来获取变量

with tf.variable_scope("",reuse=True):
    v5 = tf.get_variable("foo/bar/v", [1])
    print v5 == v3
    v6 = tf.get_variable("v1", [1])     
    print v6 == v4

"""
True
True
"""

最后给出变量常用的初始化函数

Tensorflow 模型持久化

为了让训练结果可以复用,需要将训练得到的神经网络模型持久化。Tensorflow 通过了很简单的 API 来持久化一个训练好的模型,并从持久化之后的模型文件中还原被保存的模型。

import tensorflow as tf

v1 = tf.Variable(tf.random_normal([1], stddev=1, seed=1))
v2 = tf.Variable(tf.random_normal([1], stddev=1, seed=1))
result = v1 + v2

init_op = tf.global_variables_initializer()
saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(init_op)
    saver.save(sess, "Saved_model/model.ckpt")

with tf.Session() as sess:
    saver.restore(sess, "Saved_model/model.ckpt")
    print sess.run(result)

虽然程序中有一个文件路径,但这个目录下会出现三个文件:

  • model.ckpt.meta:保存了 Tensorflow 计算图的结构
  • model.ckpt:保存了 Tensorflow 程序中每一个变量的取值
  • checkpoint:保存了一个目录下所有的模型文件列表

直接加载持久化的图。因为之前没有导出v3,所以这里会报错

saver = tf.train.import_meta_graph("Saved_model/model.ckpt.meta")
v3 = tf.Variable(tf.random_normal([1], stddev=1, seed=1))

with tf.Session() as sess:
    saver.restore(sess, "Saved_model/model.ckpt")
    print sess.run(v1) 
    print sess.run(v2) 
    print sess.run(v3) 

"""
INFO:tensorflow:Restoring parameters from Saved_model/model.ckpt
[-0.81131822]
[-0.81131822]
---------------------------------------------------------------------------
FailedPreconditionError                   Traceback (most recent call last)
<ipython-input-4-88a9db3712de> in <module>()
      6     print sess.run(v1)
      7     print sess.run(v2)
----> 8     print sess.run(v3)
...
"""

变量重命名

v1 = tf.Variable(tf.constant(1.0, shape=[1]), name = "other-v1")
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name = "other-v2")
saver = tf.train.Saver({"v1": v1, "v2": v2})

使用滑动平均

v = tf.Variable(0, dtype=tf.float32, name="v")
for variables in tf.global_variables(): print variables.name
    
ema = tf.train.ExponentialMovingAverage(0.99)
maintain_averages_op = ema.apply(tf.global_variables())
for variables in tf.global_variables(): print variables.name

"""
v:0
v:0
v/ExponentialMovingAverage:0
"""

 保存滑动平均模型

saver = tf.train.Saver()
with tf.Session() as sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    
    sess.run(tf.assign(v, 10))
    sess.run(maintain_averages_op)
    # 保存的时候会将v:0  v/ExponentialMovingAverage:0这两个变量都存下来。
    saver.save(sess, "Saved_model/model2.ckpt")
    print sess.run([v, ema.average(v)])

"""
[10.0, 0.099999905]
"""

加载滑动平均模型

v = tf.Variable(0, dtype=tf.float32, name="v")

# 通过变量重命名将原来变量v的滑动平均值直接赋值给v。
saver = tf.train.Saver({"v/ExponentialMovingAverage": v})
with tf.Session() as sess:
    saver.restore(sess, "Saved_model/model2.ckpt")
    print sess.run(v)

"""
0.0999999
"""

variables_to_restore函数的使用样例

import tensorflow as tf
v = tf.Variable(0, dtype=tf.float32, name="v")
ema = tf.train.ExponentialMovingAverage(0.99)
print ema.variables_to_restore()

saver = tf.train.Saver({"v/ExponentialMovingAverage": v})
with tf.Session() as sess:
    saver.restore(sess, "Saved_model/model2.ckpt")
    print sess.run(v)

"""
{u'v/ExponentialMovingAverage': <tf.Variable 'v:0' shape=() dtype=float32_ref>}
INFO:tensorflow:Restoring parameters from Saved_model/model2.ckpt
0.0999999
"""

pb文件的保存方法

import tensorflow as tf
from tensorflow.python.framework import graph_util

v1 = tf.Variable(tf.constant(1.0, shape=[1]), name = "v1")
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name = "v2")
result = v1 + v2

init_op = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init_op)
    graph_def = tf.get_default_graph().as_graph_def()
    output_graph_def = graph_util.convert_variables_to_constants(sess, graph_def, ['add'])
    with tf.gfile.GFile("Saved_model/combined_model.pb", "wb") as f:
           f.write(output_graph_def.SerializeToString())

"""
INFO:tensorflow:Froze 2 variables.
Converted 2 variables to const ops.
"""

加载pb文件

from tensorflow.python.platform import gfile
with tf.Session() as sess:
    model_filename = "Saved_model/combined_model.pb"
   
    with gfile.FastGFile(model_filename, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())

    result = tf.import_graph_def(graph_def, return_elements=["add:0"])
    print sess.run(result)

"""
[array([ 3.], dtype=float32)]
"""

持久化原理及数据格式

  • tf.GraphDef (.pb) —A protobuf that represents the TensorFlow training or computation graph. It contains operators, tensors, and variables definitions.
  • CheckPoint (.ckpt) —Serialized variables from a TensorFlow graph. Since this does not contain a graph structure, it cannot be interpreted by itself.
  • FrozenGraphDef —A subclass of GraphDef that does not contain variables. A GraphDef can be converted to a FrozenGraphDef by taking a CheckPoint and a GraphDef, and converting each variable into a constant using the value retrieved from the CheckPoint.
  • SavedModel —A GraphDef and CheckPoint with a signature that labels input and output arguments to a model. A GraphDef and CheckPoint can be extracted from a SavedModel.
  • TensorFlow Lite model (.tflite) —A serialized FlatBuffer that contains TensorFlow Lite operators and tensors for the TensorFlow Lite interpreter, similar to a FrozenGraphDef.

Tensorflow 最佳实践样例程序

在最开始代码的基础上,增加了变量管理,模型持久化和程序结构优化。

mnist_inference.py

import tensorflow as tf

INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500

def get_weight_variable(shape, regularizer):
    weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
    if regularizer != None: tf.add_to_collection('losses', regularizer(weights))
    return weights

def inference(input_tensor, regularizer):
    with tf.variable_scope('layer1'):

        weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
        biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)


    with tf.variable_scope('layer2'):
        weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
        biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
        layer2 = tf.matmul(layer1, weights) + biases

    return layer2

mnist_train.py

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
import os

BATCH_SIZE = 100 
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99 
MODEL_SAVE_PATH = "MNIST_model/"
MODEL_NAME = "mnist_model"

def train(mnist):
    # 定义输入输出placeholder。
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)
    
    # 定义损失函数、学习率、滑动平均操作以及训练过程。
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY,
        staircase=True)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')
        
    # 初始化TensorFlow持久化类。
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
            if i % 1000 == 0:
                print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)

def main(argv=None):
    mnist = input_data.read_data_sets("../../../datasets/MNIST_data", one_hot=True)
    train(mnist)

if __name__ == '__main__':
    main()

"""
Extracting ../../../datasets/MNIST_data/train-images-idx3-ubyte.gz
Extracting ../../../datasets/MNIST_data/train-labels-idx1-ubyte.gz
Extracting ../../../datasets/MNIST_data/t10k-images-idx3-ubyte.gz
Extracting ../../../datasets/MNIST_data/t10k-labels-idx1-ubyte.gz
After 1 training step(s), loss on training batch is 2.57114.
After 1001 training step(s), loss on training batch is 0.25075.
After 2001 training step(s), loss on training batch is 0.180927.
After 3001 training step(s), loss on training batch is 0.138693.
After 4001 training step(s), loss on training batch is 0.141137.
After 5001 training step(s), loss on training batch is 0.118497.
After 6001 training step(s), loss on training batch is 0.113244.
After 7001 training step(s), loss on training batch is 0.0899573.
After 8001 training step(s), loss on training batch is 0.0841556.
After 9001 training step(s), loss on training batch is 0.0745659.
After 10001 training step(s), loss on training batch is 0.0665555.
After 11001 training step(s), loss on training batch is 0.0612771.
After 12001 training step(s), loss on training batch is 0.0586706.
After 13001 training step(s), loss on training batch is 0.0679592.
After 14001 training step(s), loss on training batch is 0.0538381.
After 15001 training step(s), loss on training batch is 0.0508026.
After 16001 training step(s), loss on training batch is 0.0472279.
After 17001 training step(s), loss on training batch is 0.0505102.
After 18001 training step(s), loss on training batch is 0.0527798.
After 19001 training step(s), loss on training batch is 0.0443641.
After 20001 training step(s), loss on training batch is 0.0450014.
After 21001 training step(s), loss on training batch is 0.0411893.
After 22001 training step(s), loss on training batch is 0.0466458.
After 23001 training step(s), loss on training batch is 0.035504.
After 24001 training step(s), loss on training batch is 0.0337035.
After 25001 training step(s), loss on training batch is 0.0363772.
After 26001 training step(s), loss on training batch is 0.035193.
After 27001 training step(s), loss on training batch is 0.0403096.
After 28001 training step(s), loss on training batch is 0.0372572.
After 29001 training step(s), loss on training batch is 0.03604.
"""

mnist_eval.py

import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
import mnist_train

# 加载的时间间隔。
EVAL_INTERVAL_SECS = 10

def evaluate(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
        validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}

        y = mnist_inference.inference(x, None)
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
                    print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))
                else:
                    print('No checkpoint file found')
                    return
            time.sleep(EVAL_INTERVAL_SECS)

def main(argv=None):
    mnist = input_data.read_data_sets("../../../datasets/MNIST_data", one_hot=True)
    evaluate(mnist)

if __name__ == '__main__':
    main()

"""
Extracting ../../../datasets/MNIST_data/train-images-idx3-ubyte.gz
Extracting ../../../datasets/MNIST_data/train-labels-idx1-ubyte.gz
Extracting ../../../datasets/MNIST_data/t10k-images-idx3-ubyte.gz
Extracting ../../../datasets/MNIST_data/t10k-labels-idx1-ubyte.gz
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-4001
After 4001 training step(s), validation accuracy = 0.9826
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-5001
After 5001 training step(s), validation accuracy = 0.983
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-6001
After 6001 training step(s), validation accuracy = 0.9832
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-7001
After 7001 training step(s), validation accuracy = 0.9834
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-8001
After 8001 training step(s), validation accuracy = 0.9834
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-9001
After 9001 training step(s), validation accuracy = 0.9834
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-10001
After 10001 training step(s), validation accuracy = 0.9848
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-11001
After 11001 training step(s), validation accuracy = 0.9842
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-12001
After 12001 training step(s), validation accuracy = 0.9848
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-13001
After 13001 training step(s), validation accuracy = 0.9846
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-14001
After 14001 training step(s), validation accuracy = 0.9842
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-15001
After 15001 training step(s), validation accuracy = 0.984
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-16001
After 16001 training step(s), validation accuracy = 0.9852
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-17001
After 17001 training step(s), validation accuracy = 0.9848
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-18001
After 18001 training step(s), validation accuracy = 0.985
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-19001
After 19001 training step(s), validation accuracy = 0.9858
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-20001
After 20001 training step(s), validation accuracy = 0.9844
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-21001
After 21001 training step(s), validation accuracy = 0.986
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-22001
After 22001 training step(s), validation accuracy = 0.9854
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-23001
After 23001 training step(s), validation accuracy = 0.9842
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-25001
After 25001 training step(s), validation accuracy = 0.9854
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-26001
After 26001 training step(s), validation accuracy = 0.9858
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-27001
After 27001 training step(s), validation accuracy = 0.9858
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-28001
After 28001 training step(s), validation accuracy = 0.9858
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-29001
After 29001 training step(s), validation accuracy = 0.9856
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-29001
After 29001 training step(s), validation accuracy = 0.9856
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-29001
After 29001 training step(s), validation accuracy = 0.9856
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-29001
After 29001 training step(s), validation accuracy = 0.9856
INFO:tensorflow:Restoring parameters from MNIST_model/mnist_model-29001
After 29001 training step(s), validation accuracy = 0.9856
"""

猜你喜欢

转载自blog.csdn.net/a40850273/article/details/84313382
今日推荐