Tensorflow框架(四)

《TensorFlow 实战Google深度学习框架》文中对这一章的描述个人感觉过于简单,尤其是针对一些经典的卷积网络。所以本章重点介绍如何搭建这些经典的网络结构,对于模型的细节部分,本章不允以过多介绍,有兴趣的可以查阅其他资料文献

一、卷积层

卷积层就是对过滤器的定义

  • 前两个维度分别表示了过滤器的尺寸(也就是长了宽)   
  • 第三个维度表示了当前层的深度(其实过滤器的深度等于当前层的深度)
  • 第四个维度表示了过滤器的个数(书上说是过滤器深度,不过个人觉得过滤器个数更直观)

注意过滤器与过滤器之间参数不共享,过滤器在当前图层滑动时权重共享

# 定义过滤器权重与偏置
filter_weight = tf.get_variable(
    'weights', [5, 5, 3, 16],
    initializer = tf.truncated_normal_initializer(stddev = 0.1))
biases = tf.get_variable('biases', [16], initializer = tf.constant_initializer(0.1))

# 定义过滤器
# input是四个维度张量,以平面图为例,第一个维度可以表示第几张图片,后三个维度代表该图
# strides = [1,1,1,1]的第一个维度和第四个维度一定是1,步长只对矩阵的长和宽有效
# SAME表示边缘填充空白,经过卷积变换后图像与未变换前尺寸一样,如果是VALID表示不添加
conv = tf.nn.conv2d(input, filter_weight, strides = [1, 1, 1, 1], padding = 'SAME')

# 为每一个过滤器卷积得到的图层加上偏执
bias = tf.nn.bias_add(conv, biases)

# Relu函数激活
actived_conv = tf.nn.relu(bias)

二、池化层

常用的有最大池化层和平均池化层

# 最大池化层
# ksize维度里第一个和第四个必须为1,第二个和第三个维度表示过滤器尺寸
pool = tf.nn.max_pool(actived_conv, ksize = [1,3,3,1],
                      strides = [1,2,2,1], padding = 'SAME')
# 平均池化层
pool = tf.nn.avg_pool(actived_conv, ksize = [1,3,3,1],
                      strides = [1,2,2,1], padding = 'SAME')

三、LeNet-5网络

下图给的是论文中的LeNet-5网络模型,在下述代码中我们并不重建这个模型,而是用一种类似的网络结构。需要指出的是,书中的代码个人感觉有点生涩难懂,而且由于使用的是梯度下降法,会导致模型收敛速度很慢。因此修改使用Adam优化算法。

下述中的代码不保存模型参数,实施打印预测测试集的效果。 

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

# 加载数据集
mnist = input_data.read_data_sets("C:/Users/14981/Desktop/Deep Learning/", one_hot = True)

# 加载参数
times = 20
batch_size = 128
regularization = 0.0001
moving_average_decay = 0.99
learning_rate_base = 0.8
learning_rate_decay = 0.99
batch_num = mnist.train.num_examples // batch_size

# 通用函数
def weight_variable(name, shape):
    initial = tf.get_variable(name, shape,
                           initializer = tf.truncated_normal_initializer(stddev=0.1))
    return initial

def bias_variable(name, shape):
    initial = tf.get_variable(name, shape,
                             initializer = tf.constant_initializer(0.1))
    return initial

def inference(input_tensor, regularizer, keep_prob):
    images = tf.reshape(input_tensor, [-1,28,28,1])
    
    with tf.variable_scope('layer1-conv1'):
        conv1_weights = weight_variable('weight', [5,5,1,32])
        conv1_biases = bias_variable('bias', [32])
        conv1 = tf.nn.conv2d(images, conv1_weights, strides = [1,1,1,1], padding = 'SAME')
        relu1 = tf.nn.relu(conv1 + conv1_biases)
    with tf.name_scope('layer2-pool1'):
        pool1 = tf.nn.max_pool(relu1, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME')
    
    with tf.variable_scope('layer3-conv2'):
        conv2_weights = weight_variable('weight', [5,5,32,64])
        conv2_biases = bias_variable('bias', [64])
        conv2 = tf.nn.conv2d(pool1, conv2_weights, strides = [1,1,1,1], padding = 'SAME')
        relu2 = tf.nn.relu(conv2 + conv2_biases)
    with tf.name_scope('layer4-pool2'):
        pool1 = tf.nn.max_pool(relu2, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME')
    
    # 注意此时图片的维度被压缩成了7*7*64
    pool_shape = tf.reshape(pool1, (-1,7*7*64))
    
    with tf.variable_scope('layer5-fc1'):
        fc1_weights = weight_variable('weight', [7*7*64,512])
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(fc1_weights))
        fc1_biases = bias_variable('bias', [512])
        fc1 = tf.nn.relu(tf.matmul(pool_shape, fc1_weights) + fc1_biases)
        # dropout优化算法
        fc1 = tf.nn.dropout(fc1, keep_prob)
    
    with tf.variable_scope('layer6-fc2'):
        fc2_weights = weight_variable('weight', [512, 10])
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(fc2_weights))
        fc2_biases = bias_variable('bias',[10])
        logit = tf.nn.softmax(tf.matmul(fc1, fc2_weights) + fc2_biases)
    
    return logit

def train(mnist):
    x = tf.placeholder(tf.float32, [None,784], name = 'x-input')
    y = tf.placeholder(tf.float32, [None,10], name = 'y-input')
    keep_prob = tf.placeholder(tf.float32)
    
    regularizer = tf.contrib.layers.l2_regularizer(regularization)
    logit = inference(x, regularizer, keep_prob)
    
    # 定义滑动平均模型
    global_step = tf.Variable(0, trainable = False)
    variable_averages = tf.train.ExponentialMovingAverage(moving_average_decay)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())
    
    # 定义损失函数
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits = logit, labels = tf.argmax(y, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    
    # 定义学习率衰减
    learning_rate = tf.train.exponential_decay(
        learning_rate_base,
        global_step,
        mnist.train.num_examples, # 总玩所有数据需要的迭代次数
        learning_rate_decay
        )
    train_step = tf.train.AdamOptimizer(1e-4).\
    minimize(loss, global_step = global_step)
    
    # 定义反向传播滑动平均模型
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name = 'train')
    
    # 准确率
    correction_prediction = tf.equal(tf.argmax(logit, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correction_prediction, tf.float32))
    
    # 参数初始化
    init = tf.global_variables_initializer()
    
    with tf.Session() as sess:
        sess.run(init)
        # 总共的批次共迭代times次
        for epoch in range(times):
            for batch in range(batch_num):
                xs, ys = mnist.train.next_batch(batch_size)
                _, loss_value, step = sess.run([train_op, loss, global_step],
                                               feed_dict = {x:xs,y:ys,keep_prob:0.7})
            acc = sess.run(accuracy,
                           feed_dict = {x:mnist.test.images, y:mnist.test.labels,keep_prob:1.0})
            print("After %d, Testing Accuracy = %g" %(epoch, acc))

# 训练
train(mnist)

四、经典网络模型

后续会单独整理一篇由经典网络模型的文章

五、迁移学习

本段代码摘自书上,经个人实际运行后结果正确,同时做了更详细的代码注释,并对书中冗余的代码进行了优化。

首先是处理数据集,并最终将数据集的信息存储在.npy文件中

import numpy as np
import os.path
import glob
from scipy.misc import imread, imresize

# 输入文件位置
input_data = 'C:\\Users\\guesthost\\Desktop\\深度学习数据集\\flower_photos\\flower_photos'
# 输出文件位置
output_data = 'C:\\Users\\guesthost\\Desktop\\深度学习数据集\\flower_processed_data.npy'

# 测试数据与验证数据的比例
validation_percentage = 10
test_percentage = 10

def create_image_lists(testing_percentage, validation_percentage):
    # 在目录树中游走以便读取目录下的文件夹名称
    sub_dirs = [x[0] for x in os.walk(input_data)]
    is_root_dir = True
    
    training_images = []
    training_labels = []
    testing_images = []
    testing_labels = []
    validation_images = []
    validation_labels = []
    current_label = 0
    
    for sub_dir in sub_dirs:
        ret = 1
        # 忽略根目录
        if is_root_dir:
            is_root_dir = False
            continue
            
        extensions = ['jpg', 'jpeg']
        file_list = []
        dir_name = os.path.join(sub_dir)
        for extension in extensions:
            # 读取当前目录下所有以extension为后缀的文件
            file_glob = os.path.join(input_data, dir_name, '*.' + extension)
            file_list.extend(glob.glob(file_glob))
        if not file_list:
            continue

        # 对file_list列表下所有图片进行处理
        for file_name in file_list:
            # 对图片进行解码
            image = imread(file_name)
            image_value = imresize(image, [299, 299])

            # 随机划分数据集
            # 根据训练集:验证集:测试集 = 8:1:1比例来划分数据
            chance = np.random.randint(100)
            if chance < validation_percentage:
                validation_images.append(image_value)
                validation_labels.append(current_label)
            elif chance < (test_percentage + validation_percentage):
                testing_images.append(image_value)
                testing_labels.append(current_label)
            else:
                training_images.append(image_value)
                training_labels.append(current_label)
            print("%d picture is completed" % ret)
            ret += 1
        print("%d dir is completed!" % current_label)
        current_label += 1
            
    # 获取随机状态
    state = np.random.get_state()
    np.random.shuffle(training_images)
    # 以打乱训练集图像矩阵的状态同样打乱训练集图像标签
    np.random.set_state(state)
    np.random.shuffle(training_labels)
    
    return np.asarray([training_images, training_labels,
                      validation_images, validation_labels,
                      testing_images, testing_labels])
    
def main():
    processed_data = create_image_lists(test_percentage, validation_percentage)
    np.save(output_data, processed_data)
        
if __name__ == '__main__':
    main()

使用Google训练好的Inception-v3网络模型做迁移学习

import tensorflow as tf
import tensorflow.contrib.slim.python.slim.nets.inception_v3 as inception_v3
import tensorflow.contrib.slim as slim
import load_dataset as load
import numpy as np

# 输入数据路径
Input_data = load.output_data
# 预先训练好的模型路径
ckpt_file = 'C:/Users/guesthost/Desktop/机器学习/深度学习/深度学习预训练模型/inception_v3_2016_08_28/inception_v3.ckpt'
# 训练好的模型路径
train_file = 'C:/Users/guesthost/Desktop/机器学习/深度学习/深度学习预训练模型/my_inception_model/my_inception_v3_model'

input_data = load.output_data
learning_rate = 1e-4
steps = 300
batch = 32
n_classes = 5

checkpoint_exclude_scopes = 'InceptionV3/Logits,InceptionV3/AuxLogits'
trainable_scopes = 'InceptionV3/Logits, InceptionV3/AuxLogits'

# 获取所有需要从训练好的模型中加载的参数
def get_tuned_variables():
    exclusions = [scope.strip() for scope in checkpoint_exclude_scopes.split(',')]
    variables_to_restore = []
    
    # 获取模型所有变量的列表
    # 之后筛选出除了op名称的开头为exclusion的以外的op作为需要训练的参数返回
    for var in slim.get_model_variables():
        excluded = False
        for exclusion in exclusions:
            if var.op.name.startswith(exclusion):
                excluded = True
                break
        if not excluded:
            variables_to_restore.append(var)
    return variables_to_restore

# 获取所有需要训练的变量列表
def get_trainable_variables():
    scopes = [scope.strip() for scope in trainable_scopes.split(',')]
    variables_to_train = []
    
    for scope in scopes:
        # 获取tf.GraphKeys图对象中所有可训练的变量
        variables = tf.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES, scope)
        variables_to_train.extend(variables)
    return variables_to_train

def main():
    processed_data = np.load(input_data)
    training_images = processed_data[0]
    n_training_example = len(training_images)
    training_labels = processed_data[1]
    validation_images = processed_data[2]
    validation_labels = processed_data[3]
    testing_images = processed_data[4]
    testing_labels = processed_data[5]
    print("%d trianing examples, %d validation examples and %d testing examples."\
          %(n_training_example, len(validation_images), len(testing_images)))
    
    images = tf.placeholder(tf.float32, shape = [None, 299, 299, 3], name = 'x-input')
    labels = tf.placeholder(tf.int64, shape = [None], name = 'labels')
    
    # 加载模型的默认变量
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        logits, _ = inception_v3.inception_v3(images, num_classes = n_classes)
    
    # 交叉熵损失
    tf.losses.softmax_cross_entropy(tf.one_hot(labels, n_classes), logits = logits, weights = 1.0)

    # 定义训练过程
    train_step = tf.train.RMSPropOptimizer(learning_rate).minimize(tf.losses.get_total_loss())
    
    # 计算正确率
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    
    # 定义从ckpt文件中返回指定的变量的函数
    # 这里的get_tuned_variables() 表示返回需要训练的变量
    # ignore_missing_vars = True 表示若没有变量时,返回None
    load_fn = slim.assign_from_checkpoint_fn(ckpt_file, get_tuned_variables(), ignore_missing_vars = True)
    
    # 定义保存新的训练好得模型
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # 初始化需要在模型加载之前,否则初始化过程会将已经加载好的变量冲重新赋值
        init = tf.global_variables_initializer()
        sess.run(init)
        
        print("Loading tuned variables from %s" % ckpt_file)
        load_fn(sess)
        start = 0
        end = batch
        for i in range(steps):
            start = (i * batch) % n_training_example
            end = min(start + batch, n_training_example)
            
            sess.run(train_step, feed_dict = {images:training_images[start:end],labels:training_labels[start:end]})
            
            # 打印验证集准确率
            if i % 30 == 0 or i + 1 == steps:
                saver.save(sess, train_file, global_step = i)
                validation_accuracy = sess.run(evaluation_step, feed_dict = {images:validation_images, labels:validation_labels})
                print("Step %d Validation accuracy = %.1f%%" %(i, validation_accuracy * 100))
            
            # 打印测试集准确率
        test_accuracy = sess.run(evaluation_step, feed_dict = {images:testing_images, labels:testing_labels})
        print('Final test accuracy = %.1f%%' %(test_accuracy * 100))
            
main()

猜你喜欢

转载自blog.csdn.net/adorkable_thief/article/details/84639111
今日推荐