LeNet-5 卷积神经网络

这是LeNet-5模型一个简版
第一层输入层是28x28x1的图片
第二层卷积层采用32个5x5的fiter,边缘用全0填补,得到28x28x32的输出
第三层是池化层 采用2x2的fiter,步长也是【2,2】,得到输出14x14x28的图片
第四层卷积采用5x5x64的fiter,边缘用全零填补,得到14x14x64的输出
第五层池化跟第二层一样,得到7x7x64的输出
第六层全连接层,将7x7x64=3236个节点作为输入,输出层节点512个
第七层,全连接层 ,输出层是10个节点

以下是代码和运行结果

LeNet-5.py

import tensorflow as tf

input_node = 784
output_node = 10
image_size = 28
num_labels = 10

conv1_deep = 32
conv1_size = 5

conv2_deep = 64
conv2_size = 5

fc_size = 512


def inference(input_tensor, train, regularizer):

    init2 = tf.constant_initializer(0.0)
    with tf.variable_scope('layer1-conv1'):
        conv1_weights = tf.get_variable('weight', [conv1_size, conv1_size, 1, conv1_deep], initializer=tf.truncated_normal_initializer(stddev=0.1))  # 5*5*1*32
        conv1_biases = tf.get_variable('bias', [conv1_deep], initializer=init2)
        conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
    with tf.variable_scope('layer2-pool1'):
        pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

    with tf.variable_scope('layer3-conv2'):
        conv2_weights = tf.get_variable('weight', [conv2_size, conv2_size, conv1_deep, conv2_deep],initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases =  tf.get_variable('bias', [conv2_deep], initializer=init2)
        conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))

    with tf.variable_scope('layer4-pool2'):
        pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

    pool_shape = pool2.get_shape().as_list()
    nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
    reshape = tf.reshape(pool2, [pool_shape[0], nodes])

    with tf.variable_scope('layer5-fc1'):
        fc1_weights = tf.get_variable('weight', [nodes, fc_size], initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(fc1_weights))
        fc1_biases = tf.get_variable('bias', [fc_size], initializer=init2)
        fc1 = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
        if train: fc1 = tf.nn.dropout(fc1, 0.5)
    with tf.variable_scope('layer6-fc2'):
        fc2_weights = tf.get_variable('weight', [fc_size, num_labels],initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(fc2_weights))
        fc2_biases = tf.get_variable('bias', [num_labels], initializer=init2)
        logit = tf.matmul(fc1, fc2_weights) + fc2_biases
    return logit

MNIST_train.py

import os
import numpy as np

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

# 加载LeNet_5.py中定义的常量和前向传播的函数
import LeNet_5

# 配置神经网络的参数
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.01
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
# 模型保存的路径和文件名
MODEL_SAVE_PATH = "model/"
MODEL_NAME = "model.ckpt"

def train(mnist):
    # 定义输入输出placeholder
    # 调整输入数据placeholder的格式,输入为一个四维矩阵
    x = tf.placeholder(tf.float32, [
        BATCH_SIZE,                             # 第一维表示一个batch中样例的个数
        LeNet_5.image_size,             # 第二维和第三维表示图片的尺寸
        LeNet_5.image_size,
        1],          # 第四维表示图片的深度,对于RBG格式的图片,深度为3
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, LeNet_5.output_node], name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    # 直接使用LeNet_5.py中定义的前向传播过程
    y = LeNet_5.inference(x, True, regularizer)
    global_step = tf.Variable(0, trainable=False)

    #定义损失函数、学习率、滑动平均操作以及训练过程
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples/BATCH_SIZE, LEARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    sum=tf.reduce_sum(tf.cast(tf.equal(tf.argmax(y,1),tf.argmax(y_,1)),dtype=tf.int32))
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        # 验证和测试的过程将会有一个独立的程序来完成
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            #类似地将输入的训练数据格式调整为一个四维矩阵,并将这个调整后的数据传入sess.run过程
            reshaped_xs = np.reshape(xs, (BATCH_SIZE, LeNet_5.image_size, LeNet_5.image_size, 1))
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})
            #每1000轮保存一次模型。
            if i%200 == 0:
                # 输出当前的训练情况。这里只输出了模型在当前训练batch上的损失函数大小。通过损失函数的大小可以大概了解训练的情况。
                # 在验证数据集上的正确率信息会有一个单独的程序来生成。
                print("After %d training step(s), loss on training batch is %f." % (step, loss_value))
                SUM=0;
                for j in range(mnist.validation.num_examples//BATCH_SIZE):
                    xs_,ys_=mnist.validation.next_batch(BATCH_SIZE)
                    rx=np.reshape(xs_,(BATCH_SIZE,LeNet_5.image_size,LeNet_5.image_size,1))
                    SUM+=sess.run([sum],feed_dict={x:rx,y_:ys_})[0]

                print("After %d training step(s), corrcet rate is %f." % (step, SUM/mnist.validation.num_examples))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)


def main(argv=None):
    mnist = input_data.read_data_sets("C:/Users/tang/Desktop/deeplearning/mnist数据集", one_hot=True)
    train(mnist)

if __name__ == '__main__':
    tf.app.run()

大概运行了1个小时,其运行结果如下:

After 1 training step(s), loss on training batch is 6.577364.
After 1 training step(s), corrcet rate is 0.112800.
After 201 training step(s), loss on training batch is 1.134218.
After 201 training step(s), corrcet rate is 0.820400.
After 401 training step(s), loss on training batch is 1.095968.
After 401 training step(s), corrcet rate is 0.882800.
After 601 training step(s), loss on training batch is 0.928677.
After 601 training step(s), corrcet rate is 0.908400.
After 801 training step(s), loss on training batch is 0.948598.
After 801 training step(s), corrcet rate is 0.921400.
After 1001 training step(s), loss on training batch is 0.963806.
After 1001 training step(s), corrcet rate is 0.929200.
After 1201 training step(s), loss on training batch is 0.758020.
After 1201 training step(s), corrcet rate is 0.937800.
After 1401 training step(s), loss on training batch is 0.855776.
After 1401 training step(s), corrcet rate is 0.944600.
After 1601 training step(s), loss on training batch is 0.775362.
After 1601 training step(s), corrcet rate is 0.948600.
After 1801 training step(s), loss on training batch is 0.752938.
After 1801 training step(s), corrcet rate is 0.954200.
After 2001 training step(s), loss on training batch is 0.873272.
After 2001 training step(s), corrcet rate is 0.953400.
After 2201 training step(s), loss on training batch is 0.688549.
After 2201 training step(s), corrcet rate is 0.956200.
After 2401 training step(s), loss on training batch is 0.810790.
After 2401 training step(s), corrcet rate is 0.959600.
After 2601 training step(s), loss on training batch is 0.817977.
After 2601 training step(s), corrcet rate is 0.958400.
After 2801 training step(s), loss on training batch is 0.754317.
After 2801 training step(s), corrcet rate is 0.960000.
After 3001 training step(s), loss on training batch is 0.734173.
After 3001 training step(s), corrcet rate is 0.962600.
After 3201 training step(s), loss on training batch is 0.665527.
After 3201 training step(s), corrcet rate is 0.963800.
After 3401 training step(s), loss on training batch is 0.674073.
After 3401 training step(s), corrcet rate is 0.966200.
After 3601 training step(s), loss on training batch is 0.828704.
After 3601 training step(s), corrcet rate is 0.968400.
After 3801 training step(s), loss on training batch is 0.803491.
After 3801 training step(s), corrcet rate is 0.966400.
After 4001 training step(s), loss on training batch is 0.660605.
After 4001 training step(s), corrcet rate is 0.965200.
After 4201 training step(s), loss on training batch is 0.738579.
After 4201 training step(s), corrcet rate is 0.970400.
After 4401 training step(s), loss on training batch is 0.785027.
After 4401 training step(s), corrcet rate is 0.966800.
After 4601 training step(s), loss on training batch is 0.692350.
After 4601 training step(s), corrcet rate is 0.970400.
After 4801 training step(s), loss on training batch is 0.681506.
After 4801 training step(s), corrcet rate is 0.968600.
After 5001 training step(s), loss on training batch is 0.747684.
After 5001 training step(s), corrcet rate is 0.968400.
After 5201 training step(s), loss on training batch is 0.662478.
After 5201 training step(s), corrcet rate is 0.973200.
After 5401 training step(s), loss on training batch is 0.678184.
After 5401 training step(s), corrcet rate is 0.973200.
After 5601 training step(s), loss on training batch is 0.699767.
After 5601 training step(s), corrcet rate is 0.970800.
After 5801 training step(s), loss on training batch is 0.633660.
After 5801 training step(s), corrcet rate is 0.970400.
After 6001 training step(s), loss on training batch is 0.646770.
After 6001 training step(s), corrcet rate is 0.970000.
After 6201 training step(s), loss on training batch is 0.703785.
After 6201 training step(s), corrcet rate is 0.972200.
After 6401 training step(s), loss on training batch is 0.705623.
After 6401 training step(s), corrcet rate is 0.974800.
After 6601 training step(s), loss on training batch is 0.656697.
After 6601 training step(s), corrcet rate is 0.974600.
After 6801 training step(s), loss on training batch is 0.646331.
After 6801 training step(s), corrcet rate is 0.974200.
After 7001 training step(s), loss on training batch is 0.781423.
After 7001 training step(s), corrcet rate is 0.977000.
After 7201 training step(s), loss on training batch is 0.675177.
After 7201 training step(s), corrcet rate is 0.977800.
After 7401 training step(s), loss on training batch is 0.724536.
After 7401 training step(s), corrcet rate is 0.975400.
After 7601 training step(s), loss on training batch is 0.728857.
After 7601 training step(s), corrcet rate is 0.975800.
After 7801 training step(s), loss on training batch is 0.649602.
After 7801 training step(s), corrcet rate is 0.976200.
After 8001 training step(s), loss on training batch is 0.665193.
After 8001 training step(s), corrcet rate is 0.978400.
After 8201 training step(s), loss on training batch is 0.686455.
After 8201 training step(s), corrcet rate is 0.978000.
After 8401 training step(s), loss on training batch is 0.682056.
After 8401 training step(s), corrcet rate is 0.978600.
After 8601 training step(s), loss on training batch is 0.653046.
After 8601 training step(s), corrcet rate is 0.977400.
After 8801 training step(s), loss on training batch is 0.680877.
After 8801 training step(s), corrcet rate is 0.978000.
After 9001 training step(s), loss on training batch is 0.734092.
After 9001 training step(s), corrcet rate is 0.977400.
After 9201 training step(s), loss on training batch is 0.683104.
After 9201 training step(s), corrcet rate is 0.979000.
After 9401 training step(s), loss on training batch is 0.625013.
After 9401 training step(s), corrcet rate is 0.978200.
After 9601 training step(s), loss on training batch is 0.672356.
After 9601 training step(s), corrcet rate is 0.980400.
After 9801 training step(s), loss on training batch is 0.647361.
After 9801 training step(s), corrcet rate is 0.978000.

效果没有达到书上说的99.4%,有待改进

发布了11 篇原创文章 · 获赞 14 · 访问量 1013

猜你喜欢

转载自blog.csdn.net/qq_41832757/article/details/102166141