TensorFlow北大公开课学习笔记-7卷积神经网络 lenet5复现

注:本系列文章主要是复现北京大学TensorFlow笔记中的代码,方便以后使用,并没有详细讲解流程,因为我并不是专门做教程的。何况北大的教程讲的已经很好了,有需要了解详细过程的可以去看北大的教程哈。
一,mnist_lenet5_backward.py

#coding:utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_lenet5_forward
import os
import numpy as np

BATCH_SIZE=100
LEARNING_RATE_BASE=0.005
LEARNING_RATE_DECAY=0.99
REGULARIZER=0.0001
STEPS=50000
MOVING_AVERAGE_DECAY=0.99
MODEL_SAVE_PATH="./model/"
MODEL_NAME="mnist_model"

def backward(mnist):
    x=tf.placeholder(tf.float32,[
    BATCH_SIZE,
    mnist_lenet5_forward.IMAGE_SIZE,
    mnist_lenet5_forward.IMAGE_SIZE,
    mnist_lenet5_forward.NUM_CHANNELS])
    y_ = tf.placeholder(tf.float32,[None,mnist_lenet5_forward.OUTPUT_NODE])
    y=mnist_lenet5_forward.forward(x,True,REGULARIZER)
    global_step = tf.Variable(0, trainable=False)

    ce=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
    cem=tf.reduce_mean(ce)
    loss=cem+tf.add_n(tf.get_collection('losses'))

    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples/BATCH_SIZE,     ##----##
        LEARNING_RATE_DECAY,
        staircase=True)

    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)

    ema=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
    ema_op=ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step,ema_op]):
        train_op=tf.no_op(name='train')

    saver=tf.train.Saver()

    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)

        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)  ##----##
        if ckpt and ckpt.model_checkpoint_path:  ##----##
            saver.restore(sess, ckpt.model_checkpoint_path)  ##----##

        for i in range(STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            reshaped_xs=np.reshape(xs,(
            BATCH_SIZE,
            mnist_lenet5_forward.IMAGE_SIZE,
            mnist_lenet5_forward.IMAGE_SIZE,
            mnist_lenet5_forward.NUM_CHANNELS))
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})
            if i % 100 == 0:
                print("After %d training step(s),loss on training batch is %g." % (step, loss_value))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)

def main():
    mnist=input_data.read_data_sets("./data/",one_hot=True)
    backward(mnist)  ##----##


if __name__ == '__main__':
    main()

二,mnist_lenet5_forward.py

#coding:utf-8
import tensorflow as tf
IMAGE_SIZE=28
NUM_CHANNELS=1
CONV1_SZIE=5
CONV1_KERNEL_NUM=32
CONV2_SZIE=5
CONV2_KERNEL_NUM=64
FC_SIZE=512    #
OUTPUT_NODE=10

def get_weight(shape,regularizer):
    w=tf.Variable(tf.truncated_normal(shape,stddev=0.1))
    if regularizer!=None:tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(regularizer)(w))
    return w

def get_bias(shape):
    b=tf.Variable(tf.zeros(shape))
    return b

def conv2d(x,w):
    return tf.nn.conv2d(x,w,strides=[1,1,1,1],padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

def forward(x,train,regularizer):
    conv1_w=get_weight([CONV1_SZIE,CONV1_SZIE,NUM_CHANNELS,CONV1_KERNEL_NUM],regularizer)
    conv1_b=get_bias([CONV1_KERNEL_NUM])
    conv1=conv2d(x,conv1_w)
    relu1=tf.nn.relu(tf.nn.bias_add(conv1,conv1_b))
    pool1=max_pool_2x2(relu1)

    conv2_w=get_weight([CONV2_SZIE,CONV2_SZIE,CONV1_KERNEL_NUM,CONV2_KERNEL_NUM],regularizer)
    conv2_b=get_bias([CONV2_KERNEL_NUM])
    conv2=conv2d(pool1,conv2_w)
    relu2=tf.nn.relu(tf.nn.bias_add(conv2,conv2_b))
    pool2=max_pool_2x2(relu2)

    pool_shape=pool2.get_shape().as_list()
    nodes=pool_shape[1]*pool_shape[2]*pool_shape[3]
    reshaped=tf.reshape(pool2,[pool_shape[0],nodes])

    fc1_w=get_weight([nodes,FC_SIZE],regularizer)
    fc1_b=get_bias([FC_SIZE])
    fc1=tf.nn.relu(tf.matmul(reshaped,fc1_w)+fc1_b)
    if train:fc1=tf.nn.dropout(fc1,0.5)

    fc2_w=get_weight([FC_SIZE,OUTPUT_NODE],regularizer)
    fc2_b=get_bias([OUTPUT_NODE])
    y=tf.matmul(fc1,fc2_w)+fc2_b
    return y

三,mnist_lenet5_test.py

#coding:utf-8
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_lenet5_forward
import mnist_lenet5_backward
import numpy as np

TEST_INTERVAL_SECS=5    #程序循环的间隔时间  5s

def test(mnist):   #读入mnist数据集
    with tf.Graph().as_default() as g:  #复现计算图
        x = tf.placeholder(tf.float32, [
            mnist.test.num_examples,
            mnist_lenet5_forward.IMAGE_SIZE,
            mnist_lenet5_forward.IMAGE_SIZE,
            mnist_lenet5_forward.NUM_CHANNELS])
        y_ = tf.placeholder(tf.float32, [None, mnist_lenet5_forward.OUTPUT_NODE])
        y = mnist_lenet5_forward.forward(x,False, None)

        #实例化带滑动平均的saver对象,这样所有参数被加载时,都会被赋值为各自的滑动平均值
        ema=tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY)
        ema_restore=ema.variables_to_restore()
        saver=tf.train.Saver(ema_restore)

        #计算准确率
        correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
        accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

        while True:
            with tf.Session() as sess:
                #加载ckpt,即将滑动平均值赋值给各个参数
                ckpt=tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH)
                #判断有没有模型,如果有,先恢复模型到当前会话
                if ckpt and ckpt.model_checkpoint_path:
                    #先恢复模型到当前会话
                    saver.restore(sess,ckpt.model_checkpoint_path)
                    #恢复global_step值
                    global_step=ckpt.model_checkpoint_path.split('/')[-1].split("-")[-1]
                    reshaped_x = np.reshape(mnist.test.images, (
                    mnist.test.num_examples,
                    mnist_lenet5_forward.IMAGE_SIZE,
                    mnist_lenet5_forward.IMAGE_SIZE,
                    mnist_lenet5_forward.NUM_CHANNELS))
                    #执行准确率计算
                    accuracy_score=sess.run(accuracy,feed_dict={x:reshaped_x,y_:mnist.test.labels})
                    print("After %s training step(s), test accuracy=%g"%(global_step,accuracy_score))
                else:
                    print("No checkpoint file found")  #未找到模型
                    return
            time.sleep(TEST_INTERVAL_SECS)

def main():
    mnist=input_data.read_data_sets('./data/',one_hot=True) #读入数据集
    test(mnist)  #执行test函数

if __name__=='__main__':
    main()

先运行mnist_lenet5_backward.py保存模型
再运行mnist_lenet5_test.py

这里写图片描述

猜你喜欢

转载自blog.csdn.net/sxlsxl119/article/details/81661669