TF_5 FC_tricks

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/qq_877667836/article/details/83140938

1.目标

依旧是创建一个全连接神经网络,只不过本次加入了一层隐藏层,还有一些优化模型的tricks(窍门)。

2.Tricks

  • 指数衰减学习率
    decayed_learning_rate = learning_rate *decay_rate ^ (global_step / decay_steps)
    在之前的代码中我们使用固定值作为学习率,但这个常量要经过多次试验才能找到一个合适的值,选取的太大,会使损失最小的过程容易震荡,选取的值太小的话,学习速度又会很慢。使用指数衰减学习率,可以根据训练步数动态的调整学习率,尽快的逼近理想值。
exponential_decay(learning_rate, global_step, decay_steps, decay_rate,
                  staircase=False, name=None)
'''
learning_rate 最初学习率
global_step 当前训练步数
每训练decay_steps 步数衰减一次
decay_rate衰减率
如果staircase=True,对global_step / decay_steps的结果取整
'''
  • 正则化
    正则化是通过对损失函数加入一个惩罚项λ * R(w)来防止模型过拟合的优化算法。常用的正则化有L1正则化和L2正则化。简单的说,L1正则就是对参数w求1范数,L2正则就是对参数w求2范数,乘以λ表示惩罚力度。λ越小,正则项权重越小,模型越容易过拟合,λ越大,正则项权重越大,模型越容易欠拟合。
# 使用L2正则
tf.contrib.layers.l2_regularizer(lamda)(w)

# 将正则损失值添加到一个列表中,命名'losses'
tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(lamda)(w))

# 对'losses'里的所有元素求和
reg = tf.add_n(tf.get_collection('losses'))
  • 滑动平均
    看了很多博客,明白是什么原理,但不明白tensorflow的处理机制。先放这,明白了再写。

3.模型保存与恢复

  • 恢复
'''
首先要创建一个saver对象,可控制保存模型的数量
在恢复模型时,要检查模型保存路径,只有存在才能恢复。
初次训练时,模型不存在,要先初始化所有变量
'''
saver = tf.train.Saver(max_to_keep=1)
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)  
if ckpt and ckpt.model_checkpoint_path:  
    saver.restore(sess, ckpt.model_checkpoint_path)
    print('Successfully load model.') 
else:
    print('Model not found. Initializing all variables')
    tf.global_variables_initializer().run()
  • 保存
    将会话中的所有变量
saver.save(sess, MODEL_SAVE_PATH + MODEL_NAME, global_step=global_step)

4.代码

import os

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

#------------------定义相关超参数-------------------
#输入结点、输出结点和隐藏结点的个数
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500
#批处理大小及训练轮数
BATCH_SIZE = 200
STEPS = 20000
#指数衰减学习率
LEARNING_RATE_BASE = 0.1
DECAY_STEPS = 100
LEARNING_RATE_DECAY = 0.99
#正则化
LAMDA = 0.0001
#滑动平均
MOVING_AVERAGE_DECAY = 0.99
#模型保存路径
MODEL_SAVE_PATH = 'mnist/fc/model/'
MODEL_NAME = 'fc_tricks'
#mnist data 路径
MNIST_DATA_PATH = 'mnist/MNIST_data'

#定义权重,可选正则优化
def get_weight(shape, lamda=None):
    w = tf.Variable(tf.truncated_normal(shape,stddev=0.1))
    if lamda != None:
        tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(lamda)(w))
    return w


#定义偏差
def get_bias(shape):
    b = tf.Variable(tf.zeros(shape))
    return b


#定义前向传播过程
def forward(x, lamda=None):
    w1 = get_weight([INPUT_NODE,LAYER1_NODE], lamda)
    b1 = get_bias(LAYER1_NODE)
    y1 = tf.nn.relu(tf.matmul(x, w1) + b1)

    w2 = get_weight([LAYER1_NODE,OUTPUT_NODE], lamda)
    b2 = get_bias(OUTPUT_NODE)
    y = tf.matmul(y1,w2) + b2
    return y


#定义反向传播
def backward(mnist):
    x = tf.placeholder(tf.float32, [None,INPUT_NODE])
    y_ = tf.placeholder(tf.float32, [None,OUTPUT_NODE])
    y = forward(x, LAMDA)
    global_step = tf.Variable(0,trainable=False)

    #计算交叉熵损失
    ce = tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_)
    cem = tf.reduce_mean(ce)
    #计算正则化损失
    reg = tf.add_n(tf.get_collection('losses'))
    #计算总损失
    loss = cem + reg

    #使用指数衰减学习率
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        DECAY_STEPS,
        LEARNING_RATE_DECAY,
        staircase=True)
    
    #使用滑动平均
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    ema_op = ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step,ema_op]):
        train_op = tf.no_op(name='train')

    #准确率
    corroct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
    accuracy = tf.reduce_mean(tf.cast(corroct_prediction,tf.float32))

    #训练并保存模型
    with tf.Session() as sess:
        saver = tf.train.Saver(max_to_keep=1)
        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)  
        if ckpt and ckpt.model_checkpoint_path:  
            saver.restore(sess, ckpt.model_checkpoint_path)
            print('Successfully load model.') 
        else:
            print('Model not found. Initializing all variables')
            tf.global_variables_initializer().run()

        for i in range(STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x:xs, y_:ys})
            if (i+1)%1000 == 0:
                saver.save(sess, MODEL_SAVE_PATH + MODEL_NAME, global_step=global_step)
                accuracy_rate = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})
                print('steps: %d, loss: %g, accuracy_rate: %g'%(step, loss_value, accuracy_rate))     #97.5% after 10000 steps


def main():
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    mnist = input_data.read_data_sets(MNIST_DATA_PATH,one_hot=True)
    backward(mnist)

if __name__ == '__main__':
    main()            

猜你喜欢

转载自blog.csdn.net/qq_877667836/article/details/83140938