TensorFlow笔记视频学习

TensorFlow笔记视频

第五章问题

  • 要同时运行backward和test
  • saver = tf.train.Saver() 默认保存最近 5 个模型,保存所以模型用max_to_keep=None

第五章程序

#coding:utf-8
#0 导入模块,生成模拟数据集
import tensorflow as tf

INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500

#定义神经网络的输入、参数和输出,定义前向传播过程
def get_weight(shape, regularizer):
    w = tf.Variable(tf.random_normal(shape), dtype=tf.float32)
    if regularizer != None:tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w)) #正则化w
    return w

def get_bias(shape):
    b = tf.Variable(tf.zeros(shape))
    return b

def forward(x,reglarizer):
    w1 = get_weight([INPUT_NODE, LAYER1_NODE], reglarizer)
    b1 = get_bias([LAYER1_NODE])
    y1 = tf.nn.relu(tf.matmul(x, w1) + b1)

    w2 = get_weight([LAYER1_NODE, OUTPUT_NODE], reglarizer)
    b2 = get_bias([OUTPUT_NODE])
    y = tf.matmul(y1, w2) + b2  # 输出层不激活
    return y


#coding:utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import os

STEPS = 50000
BATCH_SIZE = 200
LEARNING_RATE_BASE=0.1  #最初学习率
LEARNING_RATE_DECAY=0.99  #学习率素衰减率
REGULARIZER=0.0001
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH="./model/"
MODEL_NAME="mnist_model"

def backward(mnist):
    x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
    y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE])
    y = mnist_forward.forward(x, REGULARIZER)
    # 运行了几轮BATCH_SIZE的计算器,初值为0,设为不被训练
    global_step = tf.Variable(0, trainable=False)

    #正则化项 regularization
    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    #tf.nn.sparse...表示 softmax()函数与交叉熵一起使用。
    cem = tf.reduce_mean(ce)
    loss = cem + tf.add_n(tf.get_collection('losses'))
    # 定义指数下降学习率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step,
                                               mnist.train.num_examples / BATCH_SIZE,
                                               LEARNING_RATE_DECAY, staircase=True)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    #定义滑动平均
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
    ema_op = ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step, ema_op]):
        train_op = tf.no_op(name='train')

    saver = tf.train.Saver(max_to_keep=None)  #默认保存最近 5 个模型

    with tf.Session()as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        #实现断点续训
        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        for i in range(STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
            if i % 1000 == 0:
                print("After %d training step(s),loss on training batch is %g" % (step, loss_value,))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
def main():
    mnist = input_data.read_data_sets("./data/", one_hot=True)
    backward(mnist)
if __name__ == '__main__':
    main()
#coding:utf-8
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import mnist_backward
TEST_INTERVAL_SECS = 5

def test(mnist):
    with tf.Graph().as_default() as g:
        # 给 x y_占位
        x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
        y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE])
        # 前向传播得到预测结果 y
        y = mnist_forward.forward(x, None)

        # 实例化可还原滑动平均的 saver
        ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)
        # 计算正确率
        correct_predictiom = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_predictiom, tf.float32))

        while True:
            with tf.Session() as sess:
                # 加载训练好的模型
                ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
                #如果已有 ckpt 模型则恢复
                if ckpt and ckpt.model_checkpoint_path:
                    # 恢复会话
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    # 恢复轮数
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    #计算准确率
                    accuracy_score = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})
                    # 打印提示
                    print("After %s training step(s), test accuracy= %g" % (global_step, accuracy_score))
                    #如果没有模型
                else:
                    print('No checkpoint file found') #模型不存在提示
                    return
            time.sleep(TEST_INTERVAL_SECS)
# 其次,制定 main()函数
def main():
    #加载测试数据集
    mnist = input_data.read_data_sets("./data/", one_hot=True)
    #调用定义好的测试函数 test()
    test(mnist)
if __name__ == '__main__':
    main()

第六章第一节问题

  • 循环图片个数定义为整型
  • raw_input在python3里被取消了直接用input

第六章第一节程序

#coding:utf-8
import tensorflow as tf
import numpy as np
from PIL import Image
import mnist_forward
import mnist_backward

def restore_model(testPicArr):
    #创建一个默认图,在该图中执行以下操作(多数操作和train中的一样)
    with tf.Graph().as_default() as tg:
        x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
        y = mnist_forward.forward(x, None)
        preValue = tf.argmax(y,1)  #得到概率最大的预测

        #实现滑动平均模型,参数MOVING_AVERAGE_DECAY用于控制模型更新的速度。训练过程中会对每个变量维护
        #一个影子变量,这个影子变量的初始值就是相应变量的初始值,每次变量更新时,影子变量会随之更新
        variable_averages = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        with tf.Session() as sess:
            # 加载训练好的模型
            ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                # 恢复会话
                saver.restore(sess, ckpt.model_checkpoint_path)
                preValue = sess.run(preValue, feed_dict={x: testPicArr})  #喂入网络进行预测
                return preValue
            else:
                print("No checkpoint file found")
                return -1

#预处理函数,包括resize、转变灰度图、二值化操作
def pre_pic(picName):
    img = Image.open(picName)
    reIm =img.resize((28, 28), Image.ANTIALIAS)  #用消除锯齿的方法resize
    im_arr = np.array(reIm.convert('L'))  #变成灰度图,转化成矩阵
    threshold = 50  #阈值 滤掉噪声
    #给图片反色
    for i in range(28):
        for j in range(28):
            im_arr[i][j] = 255 - im_arr[i][j]
            if (im_arr[i][j] < threshold):
                im_arr[i][j] = 0   #小于阈值纯白点
            else:
                im_arr[i][j] = 255   #大于纯黑点
    #变成浮点数
    nm_arr = im_arr.reshape([1, 784])
    nm_arr = nm_arr.astype(np.float32)
    img_ready = np.multiply(nm_arr, 1.0/255.0)

    return img_ready

def application():
    testNum = input("input the number of test pictures:")
    for i in range(int(testNum)):
        testPic = input("the path of test picture:")
        testPicArr = pre_pic(testPic)  #预处理图片
        preValue = restore_model(testPicArr)  #整理好的图片喂入神经网络
        print("The prediction number is:", preValue)

def main():
    application()
if __name__ == '__main__':
    main()


结果程序有点傻,待优化…

猜你喜欢

转载自blog.csdn.net/weixin_43903091/article/details/84710934
今日推荐