TensorFlow学习笔记 —— MNIST 入门

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/makingLJ/article/details/88370963

TensorFlow MNIST 入门

参考链接

MNIST 入门

全连接神经网络 MNIST

初步实现

利用全连接神经网络进行 MNIST 手写体识别,主要是将手写体图片输入全连接的神经网络,训练,得到图片识别的结果。

实现代码入下:

# -*- coding: utf-8 -*-
import os

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

# MNIST 数据集相关的常数
INPUT_NODE = 784
OUTPUT_NODE = 10

# 配置神经网络参数
LAYER1_NODE = 500

BATCH_SIZE = 100

LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 50000
# 滑动平均衰减率
MOVING_AVERAGE_DECAY = 0.99
# 训练参数保存
MODEL_SAVE_PATH = "MNIST_model/"
MODEL_NAME = "mnist_model"


# 辅助函数,给定输入和神经网络参数,计算神经网络的前向传播结果
def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):
    # 当没有提供滑动平均类时,直接使用参数当前的取值
    if avg_class is None:
        # 计算隐含层的前向传播结果,使用ReLU 激活函数
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)

        # 计算输出层的前向传播结果
        return tf.matmul(layer1, weights2) + biases2
    else:
        # 首先使用 avg_class.average 函数计算得出变量的滑动平均值,
        # 然后计算相应的神经网络前向传播结果
        layer1 = tf.nn.relu(tf.matmul(input_tensor, avg_class.average(weights1))
                            + avg_class.average(biases1))

        return tf.matmul(layer1, avg_class.average(weights2)) \
               + avg_class.average(biases2)


# 模型训练过程
def train(mnist):
    # ============= 定义神经网络的结构 ============
    # 定义输入数据和正确输出数据变量
    x = tf.placeholder(tf.float32, [None, INPUT_NODE], name="x-input")
    y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name="y-input")

    # 生成隐含层的参数
    weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))
    biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))

    # 生成输出层的参数
    weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))
    biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))

    # 计算当前参数下的神经网络的前向传播的结果,不需要使用参数的滑动平均
    y = inference(x, None, weights1, biases1, weights2, biases2)

    # 定义存储训练轮数的变量
    global_step = tf.Variable(0, trainable=False)

    # 给定滑动平均衰减率和训练轮数的变量,初始化滑动平均类
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)

    # 在所有代表神经网络参数的变量上使用滑动平均
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    # 计算滑动平均之后的前向传播结果
    average_y = inference(x, variable_averages, weights1, biases1, weights2, biases2)

    # ============ 定义损失函数 ===============
    # 计算交叉熵作为刻画预测值和真实值之间差距的损失函数
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    # 计算在当前 batch 中所有样例的交叉熵平均值
    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    # L2 正则化损失函数
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    # 计算模型的正则化损失
    regularization = regularizer(weights1) + regularizer(weights2)
    # 总损失等于交叉熵损失和正则化损失之和
    loss = cross_entropy_mean + regularization

    # ========== 定义训练过程 ==============
    # 设置指数衰减的学习率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                               global_step,  # 当前迭代轮数
                                               # 过完所有的训练数据需要的迭代次数
                                               mnist.train.num_examples / BATCH_SIZE,
                                               LEARNING_RATE_DECAY)

    # 梯度下降优化参数
    train_step = tf.train.GradientDescentOptimizer(learning_rate). \
        minimize(loss, global_step=global_step)

    # 训练过程中同时更新网络参数和每一个参数的平均值
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name="train")

    # ========= 计算训练精度 ============
    # 计算精度
    correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # ======== 迭代训练 ===========
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # ====== 模型参数初始化 ========
        tf.global_variables_initializer().run()

        # ========== 输入数据 =========
        # 准备验证数据
        validate_feed = {x: mnist.validation.images,
                         y_: mnist.validation.labels}

        # 准备测试数据
        test_feed = {x: mnist.test.images,
                     y_: mnist.test.labels}

        # 迭代训练神经网络
        for i in range(TRAINING_STEPS):
            # 每 1000 轮输出一次在验证数据集上的测试结果
            if i % 1000 == 0:
                validate_acc = sess.run(accuracy, feed_dict=validate_feed)
                print("After %d training step(s), validation accuracy using average model is %g" % (i, validate_acc))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)

            # ======= 获取一次训练的数据批次 ========
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            # ====== 将数据输入神经网络,并开始一次训练 =========
            sess.run(train_op, feed_dict={x: xs, y_: ys})

        # ======== 训练结束,计算测试精度,评判训练结果 ==========
        test_acc = sess.run(accuracy, feed_dict=test_feed)
        print("After %d training step(s), test accuracy using average model is %g" % (TRAINING_STEPS, test_acc))


def main(argv=None):
    # 获取 MNIST 数据集,执行时数据集的路径应该根据本地的情况修改
    mnist = input_data.read_data_sets("../../../datasets/MNIST_data", one_hot=True)
    train(mnist)


if __name__ == '__main__':
    tf.app.run()

TensorFlowMNIST 上面的代码主要定义了一个双层全连接神经网络完成 MNIST 手写体识别任务。看懂上面的代码需要参考官网的教程搞清楚 TensorFlow 中 Variable 以及 placeholder 等基本语法,同时要熟悉神经网络前向传播、训练过程。

从上面代码可以看出, TensorFlow 神经网络应用中,首先定义网络结构及网络的前向传播过程(定义计算图中的所有计算),然后执行计算即可完成训练,即执行计算时 TensorFlow 自动执行前向传播和反向传播过程。

考虑变量管理后的代码修改

上面的实现代码,通过引用传递变量,当神经网络的结构变得更加复杂、变量更多之后,会大大降低程序的可读性。

# -*- coding: utf-8 -*-
# @File    : mnist_inference.py
import tensorflow as tf

# 配置神经网络参数
INPUT_NODE = 784
OUTPUT_NODE = 10

LAYER1_NODE = 500


# 通过 tf.get_variable 函数来获得变量。在训练神经网络时会创建这些变量;
# 在测试时会通过保存的模型加载这些变量的取值。而且更加方便的是,因为可以在变量加载时将滑动平均变量重命名,
# 所以可以直接通过同样的名字在训练时使用变量自身,而在测试时使用变量的滑动平均值。
# 在这个函数中也会将变量的正则化损失加入损失集合
def get_weight_variable(shape, regularizer):
    weights = tf.get_variable("weight", shape,
                              initializer=tf.truncated_normal_initializer(stddev=0.1))

    # 当给出正则化生成函数时,将当前变量的正则化损失加入名字为 losses 的集合
    if regularizer is not None:
        tf.add_to_collection("losses", regularizer(weights))
    return weights


# 辅助函数,给定输入和神经网络参数,计算神经网络的前向传播结果
def inference(input_tensor, regularizer):
    # 声明第一层神经网络的变量并完成前向传播过程
    with tf.variable_scope("layer1"):
        # 这里通过  tf.get_variable 或 tf.Variable 没有本质区别,
        # 因为在训练或是测试中没有在同一个程序中多次调用这个函数。
        weights = get_weight_variable([INPUT_NODE, LAYER1_NODE],
                                      regularizer)
        biases = tf.get_variable("boases", [LAYER1_NODE],
                                 initializer=tf.constant_initializer(0.0))
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)

    # 申明第二层神经网络的变量并完成前向传播过程
    with tf.variable_scope("layer2"):
        weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE],
                                      regularizer)
        biases = tf.get_variable("biases", [OUTPUT_NODE],
                                 initializer=tf.constant_initializer(0.0))
        layer2 = tf.matmul(layer1, weights) + biases

    # 返回最终的前向传播的结果
    return layer2

上述代码定义了神经网络的前向传播过程。无论是训练还是测试,只需直接调用相应函数完成前向传播,而不用关心具体的神经网络结构。

具体训练代码:

# -*- coding: utf-8 -*-
# @File    : mnist_train.py
import os

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

# 配置神经网络参数
from MNIST_example import mnist_inference

BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
# 滑动平均衰减率
MOVING_AVERAGE_DECAY = 0.99

# 训练参数保存
MODEL_SAVE_PATH = "MNIST_model/"
MODEL_NAME = "mnist_model.ckpt"


# 模型训练过程
def train(mnist):
    # ============= 定义神经网络的结构 ============
    # 定义输入数据和正确输出数据变量
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name="x-input")
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name="y-output")

    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)

    # 计算当前参数下的神经网络的前向传播的结果,不需要使用参数的滑动平均
    y = mnist_inference.inference(x, regularizer)

    # 定义存储训练轮数的变量
    global_step = tf.Variable(0, trainable=False)

    # 给定滑动平均衰减率和训练轮数的变量,初始化滑动平均类
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)

    # 在所有代表神经网络参数的变量上使用滑动平均
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    # ============ 定义损失函数 ===============
    # 计算交叉熵作为刻画预测值和真实值之间差距的损失函数
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    # 计算在当前 batch 中所有样例的交叉熵平均值
    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    # 总损失等于交叉熵损失和正则化损失之和
    loss = cross_entropy_mean + tf.add_n(tf.get_collection("losses"))

    # ========== 定义训练过程 ==============
    # 设置指数衰减的学习率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                               global_step,  # 当前迭代轮数
                                               # 过完所有的训练数据需要的迭代次数
                                               mnist.train.num_examples / BATCH_SIZE,
                                               LEARNING_RATE_DECAY)

    # 梯度下降优化参数
    train_step = tf.train.GradientDescentOptimizer(learning_rate). \
        minimize(loss, global_step=global_step)

    # 训练过程中同时更新网络参数和每一个参数的平均值
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name="train")

    # ======== 迭代训练 ===========
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # ====== 模型参数初始化 ========
        tf.global_variables_initializer().run()

        # 迭代训练神经网络
        for i in range(TRAINING_STEPS):
            # ======= 获取一次训练的数据批次 ========
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            # ====== 将数据输入神经网络,并开始一次训练 =========
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})

            # 每 1000 轮输出一次在验证数据集上的测试结果
            if i % 1000 == 0:
                print("After %d training step(s), loss on training batch is %g" % (step, loss_value))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)

def main(argv=None):
    # 获取 MNIST 数据集
    mnist = input_data.read_data_sets("../../../datasets/MNIST_data", one_hot=True)
    train(mnist)


if __name__ == '__main__':
    tf.app.run()

测试部分的代码:

# -*- coding: utf-8 -*-
# @File    : mnist_eval.py
import time

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

from MNIST_example import mnist_inference, mnist_train

EVAL_INFERENCE_SECS = 10


def evaluate(mnist):
    with tf.Graph().as_default() as g:
        # 定义输入数据和正确输出数据变量
        x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name="x-input")
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name="y-output")

        validate_feed = {x: mnist.validation.images,
                         y_: mnist.validation.labels}

        y = mnist_inference.inference(x, None)

        # 计算精度
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        variable_averages = tf.train.ExponentialMovingAverage(
            mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split("/")[-1].split("-")[-1]
                    accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
                    print("After %s training step(s), validation accuracy using average model is %g" % (
                        global_step, accuracy_score))
                else:
                    print("No checkpoint file foun")
                    return

            time.sleep(EVAL_INFERENCE_SECS)


def main(argv=None):
    # 获取 MNIST 数据集
    mnist = input_data.read_data_sets("../../../datasets/MNIST_data", one_hot=True)
    evaluate(mnist)


if __name__ == '__main__':
    tf.app.run()

猜你喜欢

转载自blog.csdn.net/makingLJ/article/details/88370963