5 cifar-10分类实战

1.主要思想

  1. 下载解析数据,以及定义数据结构
  2. 定义自己的网络结构
  3. 定义损失函数loss
  4. 根据loss以及实际值对参数进行优化

2.主要代码

train.py
import tensorflow as tf
import readcifar10
#slime 是对tf的高层封装
slim = tf.contrib.slim
import os
import resnet

#输入是imgae,输出是全连接之后的概率分布,十维度向量
def model(image, keep_prob=0.8, is_training=True):
    batch_norm_params = {
        #添加约束,训练是为true,
        "is_training": is_training,
        #防止归一化除0
        "epsilon":1e-5,
        #衰减系数
        "decay":0.997,
        'scale':True,
        #对参数进行收集
        'updates_collections':tf.GraphKeys.UPDATE_OPS
    }

    with slim.arg_scope(
        [slim.conv2d],
        #初始化参数,方差尺度不变
        weights_initializer = slim.variance_scaling_initializer(),
        #激活函数relu
        activation_fn = tf.nn.relu,
        #对权值的正则化约束
        weights_regularizer = slim.l2_regularizer(0.0001),
        #规范到正则上
        normalizer_fn = slim.batch_norm,
        normalizer_params = batch_norm_params):
        #池化层是进行统一池化的
        with slim.arg_scope([slim.max_pool2d], padding="SAME"):
            #卷积层,32通道,卷积核大小3*3,命名成卷积1
            net = slim.conv2d(image, 32, [3, 3], scope='conv1')
            net = slim.conv2d(net, 32, [3, 3], scope='conv2')
            #池化层
            net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')


            net = slim.conv2d(net, 64, [3, 3], scope='conv3')
            net = slim.conv2d(net, 64, [3, 3], scope='conv4')
            net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool2')


            net = slim.conv2d(net, 128, [3, 3], scope='conv5')
            net = slim.conv2d(net, 128, [3, 3], scope='conv6')
            net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool3')

            #卷积层
            net = slim.conv2d(net, 256, [3, 3], scope='conv7')
            #特征图求均值,对第一维,和第二维
            net = tf.reduce_mean(net, axis=[1, 2]) #nhwc--n11c
            #n*1*1*c变成n*c
            net = slim.flatten(net)
            #全连接层1
            net = slim.fully_connected(net, 1024)
            #dropout层,删去部分取值
            slim.dropout(net, keep_prob)
            #全连接层
            net = slim.fully_connected(net, 10)
    return net #10 dim vec

#预测的概率分布值,和标签
def loss(logits, label):
    #对label进行one-hot编码
    one_hot_label = slim.one_hot_encoding(label, 10)
    #交叉熵损失,分类损失
    slim.losses.softmax_cross_entropy(logits, one_hot_label)


    #正则化loss的集合
    reg_set = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    #l2 loss相加
    l2_loss = tf.add_n(reg_set)
    slim.losses.add_loss(l2_loss)
    totalloss = slim.losses.get_total_loss()

    return totalloss, l2_loss
#学习率
def func_optimal(batchsize, loss_val):
    global_step = tf.Variable(0, trainable=False)
    lr = tf.train.exponential_decay(0.01,
                                    global_step,
                                    decay_steps= 50000// batchsize,
                                    decay_rate= 0.95,
                                    staircase=False)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

    with tf.control_dependencies(update_ops):
        #优化器
        op = tf.train.AdamOptimizer(lr).minimize(loss_val, global_step)
    #global_step(当前迭代次数),op网络参数进行调节,lr
    return global_step, op, lr

def train():
    batchsize = 64
    #日志存放的目录
    floder_log = 'logdirs-resnet'

    floder_model = 'model-resnet'

    if not os.path.exists(floder_log):
        os.mkdir(floder_log)

    if not os.path.exists(floder_model):
        os.mkdir(floder_model)

    tr_summary = set()
    te_summary = set()

    ##data
    tr_im, tr_label = readcifar10.read(batchsize, 0, 1)
    te_im, te_label = readcifar10.read(batchsize, 1, 0)

    ##net
    #数据占位符
    input_data = tf.placeholder(tf.float32, shape=[None, 32, 32, 3],
                                name='input_data')
    #标签占位符
    input_label = tf.placeholder(tf.int64, shape=[None],
                                name='input_label')
    #drop的参数
    keep_prob = tf.placeholder(tf.float32, shape=None,
                                name='keep_prob')
    #batchnorm层进行的的参数
    is_training = tf.placeholder(tf.bool, shape=None,
                               name='is_training')
    logits = resnet.model_resnet(input_data, keep_prob=keep_prob, is_training=is_training)

    ##loss

    total_loss, l2_loss = loss(logits, input_label)

    tr_summary.add(tf.summary.scalar('train total loss', total_loss))
    tr_summary.add(tf.summary.scalar('test l2_loss', l2_loss))

    te_summary.add(tf.summary.scalar('train total loss', total_loss))
    te_summary.add(tf.summary.scalar('test l2_loss', l2_loss))

    ##accurancy精度
    #最大值对应的索引值
    pred_max  = tf.argmax(logits, 1)
    #看和label是否成相同
    correct = tf.equal(pred_max, input_label)

    accurancy = tf.reduce_mean(tf.cast(correct, tf.float32))
    tr_summary.add(tf.summary.scalar('train accurancy', accurancy))
    te_summary.add(tf.summary.scalar('test accurancy', accurancy))
    ##op
    global_step, op, lr = func_optimal(batchsize, total_loss)
    tr_summary.add(tf.summary.scalar('train lr', lr))
    te_summary.add(tf.summary.scalar('test lr', lr))

    tr_summary.add(tf.summary.image('train image', input_data * 128 + 128))
    te_summary.add(tf.summary.image('test image', input_data * 128 + 128))

    with tf.Session() as sess:
        #参数初始化,局部变量和全局变量
        sess.run(tf.group(tf.global_variables_initializer(),
                          tf.local_variables_initializer()))
        #文件队列写入的session
        tf.train.start_queue_runners(sess=sess,
                                     coord=tf.train.Coordinator())#采用多线程管理器
        #创建一个存储的东西
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
        #获取最新的数据
        ckpt = tf.train.latest_checkpoint(floder_model)
        #更新
        if ckpt:
            saver.restore(sess, ckpt)

        #将日志进行合并
        tr_summary_op = tf.summary.merge(list(tr_summary))
        te_summary_op = tf.summary.merge(list(te_summary))

        summary_writer = tf.summary.FileWriter(floder_log, sess.graph)

        #one epoch = numbers of iterations = N = 训练样本的数量/batch_size
        epoch_val = 100
        #样本总量5万*epoch
        for i in range(50000 * epoch_val):
            #获取一个batchsize的数据
            train_im_batch, train_label_batch = \
                sess.run([tr_im, tr_label])
            #赋值
            feed_dict = {
                input_data:train_im_batch,
                input_label:train_label_batch,
                keep_prob:0.8,
                is_training:True
            }
            #更新参数
            _, global_step_val, \
            lr_val, \
            total_loss_val, \
            accurancy_val, tr_summary_str = sess.run([op,
                                      global_step,
                                      lr,
                                      total_loss,
                                      accurancy, tr_summary_op],
                     feed_dict=feed_dict)

            summary_writer.add_summary(tr_summary_str, global_step_val)
            #打印一下得到的参数
            if i % 100 == 0:
                print("{},{},{},{}".format(global_step_val,
                                           lr_val, total_loss_val,
                                           accurancy_val))

            if i % (50000 // batchsize) == 0:
                test_loss = 0
                test_acc = 0
                for ii in range(10000//batchsize):
                    test_im_batch, test_label_batch = \
                        sess.run([te_im, te_label])
                    feed_dict = {
                        input_data: test_im_batch,
                        input_label: test_label_batch,
                        keep_prob: 1.0,
                        is_training: False
                    }

                    total_loss_val, global_step_val, \
                    accurancy_val, te_summary_str = sess.run([total_loss,global_step,
                                              accurancy, te_summary_op],
                                             feed_dict=feed_dict)

                    summary_writer.add_summary(te_summary_str, global_step_val)

                    test_loss += total_loss_val
                    test_acc += accurancy_val

                print('test:', test_loss * batchsize / 10000,
                      test_acc* batchsize / 10000)
           #每隔1000次存储一次
            if i % 1000 == 0:
                saver.save(sess, "{}/model.ckpt{}".format(floder_model, str(global_step_val)))
    return

if __name__ == '__main__':
    train()

readcifar10.py

具体注释以及使用方法参考上一篇文章

import tensorflow as tf

def read(batchsize=64, type=1, no_aug_data=1):
    reader = tf.TFRecordReader()
    if type == 0: #train
        file_list = ["data/train.tfrecord"]
    if type == 1: #test
        file_list = ["data/test.tfrecord"]
    #创建文件队列
    filename_queue = tf.train.string_input_producer(
        file_list, num_epochs=None, shuffle=True
    )
    #读取
    _, serialized_example = reader.read(filename_queue)

    #化成批处理
    batch = tf.train.shuffle_batch([serialized_example], batchsize, capacity=batchsize * 10,
                                   min_after_dequeue= batchsize * 5)

    feature = {'image': tf.FixedLenFeature([], tf.string),
               'label': tf.FixedLenFeature([], tf.int64)}
    #通过对应的格式进行解码
    features = tf.parse_example(batch, features = feature)

    images = features["image"]

    img_batch = tf.decode_raw(images, tf.uint8)
    img_batch = tf.cast(img_batch, tf.float32)
    img_batch = tf.reshape(img_batch, [batchsize, 32, 32, 3])
    #数据增强,加在训练样本上,是否添加数据增强
    if type == 0 and no_aug_data == 1:
        #随机裁剪
        distorted_image = tf.random_crop(img_batch,
                                         [batchsize, 28, 28, 3])
        #随机对比度
        distorted_image = tf.image.random_contrast(distorted_image,
                                                   lower=0.8,
                                                   upper=1.2)
        #饱和度
        distorted_image = tf.image.random_hue(distorted_image,
                                              max_delta=0.2)
        #色调
        distorted_image = tf.image.random_saturation(distorted_image,
                                                     lower=0.8,
                                                     upper=1.2)
        #取值范围的约束
        img_batch = tf.clip_by_value(distorted_image, 0, 255)
    #规范到32*32
    img_batch = tf.image.resize_images(img_batch, [32, 32])
    label_batch = tf.cast(features['label'], tf.int64)

    #-1,1 规范到-1,1之间
    img_batch = tf.cast(img_batch, tf.float32) / 128.0 - 1.0
    #
    return img_batch, label_batch

resetnet.py

import tensorflow as tf
slim = tf.contrib.slim

def resnet_blockneck(net, numout, down, stride, is_training):
    batch_norm_params = {
    'is_training': is_training,
    'decay': 0.997,
    'epsilon': 1e-5,
    'scale': True,
    'updates_collections': tf.GraphKeys.UPDATE_OPS,
    }
    with slim.arg_scope(
                [slim.conv2d],
                weights_regularizer=slim.l2_regularizer(0.0001),
                weights_initializer=slim.variance_scaling_initializer(),
                activation_fn=tf.nn.relu,
                normalizer_fn=slim.batch_norm,
                normalizer_params=batch_norm_params):
        with slim.arg_scope([slim.batch_norm], **batch_norm_params):
            with slim.arg_scope([slim.conv2d, slim.max_pool2d], padding='SAME') as arg_sc:

                shortcut = net

                if numout != net.get_shape().as_list()[-1]:
                    shortcut = slim.conv2d(net, numout, [1, 1])

                if stride != 1:
                    shortcut = slim.max_pool2d(shortcut, [3, 3],
                                               stride=stride)

                net = slim.conv2d(net, numout // down, [1, 1])
                net = slim.conv2d(net, numout // down, [3, 3])
                net = slim.conv2d(net, numout, [1, 1])

                if stride != 1:
                    net = slim.max_pool2d(net, [3, 3], stride=stride)

                net = net + shortcut

                return net


def model_resnet(net, keep_prob=0.5, is_training = True):
    with slim.arg_scope([slim.conv2d, slim.max_pool2d], padding='SAME') as arg_sc:

        net = slim.conv2d(net, 64, [3, 3], activation_fn=tf.nn.relu)
        net = slim.conv2d(net, 64, [3, 3], activation_fn=tf.nn.relu)

        net = resnet_blockneck(net, 128, 4, 2, is_training)
        net = resnet_blockneck(net, 128, 4, 1, is_training)
        net = resnet_blockneck(net, 256, 4, 2, is_training)
        net = resnet_blockneck(net, 256, 4, 1, is_training)
        net = resnet_blockneck(net, 512, 4, 2, is_training)
        net = resnet_blockneck(net, 512, 4, 1, is_training)

        net = tf.reduce_mean(net, [1, 2])
        net = slim.flatten(net)

        net = slim.fully_connected(net, 1024, activation_fn=tf.nn.relu, scope='fc1')
        net = slim.dropout(net, keep_prob, scope='dropout1')
        net = slim.fully_connected(net, 10, activation_fn=None, scope='fc2')

    return net

发布了17 篇原创文章 · 获赞 0 · 访问量 283

猜你喜欢

转载自blog.csdn.net/DropJing/article/details/104884143
今日推荐