Alexnet网络的实现(tensorflow版本)

由于基础不是很好,最近花了大概五天的时间。终于把Alexnet网络实现了。首先看一下网络结构。

test,test2分别是训练网络时生成的文件。input_data是处理数据的文件,3个model文件,由上至下分别对应了最原始,最简单的Alexnet网络结构,其次是模仿2块GPU进行训练的网络结构,最后是去掉lrn层的网络结构。test.py文件是取一张没有训练过的图片进行测试,两个training文件对应两个model文件的网络进行训练。我并没有训练去掉lrn层的网络,感兴趣的朋友也可以将training.py改动一下进行训练。

我主要参考了这篇文章进行编码:https://blog.csdn.net/qq_26499769/article/details/82928178

在写代码前首先要理清楚思路:

  • 网络的结构,每一步的输入与输出分别是什么,最后要实现何种功能
  • 选择数据集,根据网络的输入,对数据集进行处理
  • 设计网络的结构
  • 训练网络
  • 测试网络
  • 总结

我们可以按着思路来走一遍:

首先我们需要了解Alexnet的网络结构,如下图所示:

接着是第二步,我选择的是来源于Kaggle的猫狗数据集,数据集有12500只猫和12500只狗。

第三步是根据前两步,设计我们的网络结构,见第一张图。

接着是编写代码训练网络,然后测试网络。

input_data.py

import tensorflow as tf
import numpy as np
import os

#img_width = 227
#img_height = 227
#train_dir = 'H:/liangpan/train1/'


def get_files(file_dir):
    cats = []
    label_cats = []
    dogs = []
    label_dogs = []
    for file in os.listdir(file_dir):#os.listdir返回指定的文件夹包含的文件或文件夹的名字的列表
        # 将图片存入一个列表中
        # 如果是猫的图片,将图片的标签为0
        name = file.split(sep='.')
        if name[0] == 'cat':
            cats.append(file_dir + file)
            label_cats.append(0)
        else:
            # 如果是狗的图片,将图片的标签为1
            dogs.append(file_dir + file)
            label_dogs.append(1)
    print('There are %d cats\nThere are %d dogs' % (len(cats), len(dogs)))
    image_list = np.hstack((cats, dogs))
    label_list = np.hstack((label_cats, label_dogs))
    # 把图片和标签都取出放入temp文件中,然后将图片的顺序打乱,最后取出
    temp = np.array([image_list, label_list])
    temp = temp.transpose()#转置,由两行的矩阵转换成两列的矩阵
    np.random.shuffle(temp)
    image_list = list(temp[:, 0])
    label_list = list(temp[:, 1])
    label_list = [int(i) for i in label_list]
    return image_list, label_list


#image_list, label_list = get_files(train_dir)


def get_batch(image, label, image_W, image_H, batch_size, capacity):
    image = tf.cast(image, tf.string)
    label = tf.cast(label, tf.int32)
    # 将图片与其标签放入队列中
    input_queue = tf.train.slice_input_producer([image, label])
    label = input_queue[1]
    image_contents = tf.read_file(input_queue[0])
    # 把图片解码
    image = tf.image.decode_jpeg(image_contents, channels=3)
    #设置图片的大小
    image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H)
    #将图片进行标准化
    image = tf.image.per_image_standardization(image)
    # 生成批次
    image_batch, label_batch = tf.train.batch([image, label], batch_size=batch_size, num_threads=64, capacity=capacity)
    label_batch = tf.reshape(label_batch, [batch_size])
    return image_batch, label_batch

#对get_batch进行测试
"""
import matplotlib.pyplot as plt

BATCH_SIZE = 2
CAPACITY = 256
IMG_W = 208
IMG_H = 208
train_dir = 'H:/liangpan/train1/'
image_list, label_list = get_files(train_dir)
image_batch, label_batch = get_batch(image_list, label_list, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

with tf.Session() as sess:
    i = 0
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)
    try:
        while not coord.should_stop() and i < 1:
            img, label = sess.run([image_batch, label_batch])
            for j in np.arange(BATCH_SIZE):
                print('label:%d' % label[j])
                plt.imshow(img[j, :, :, :])
                plt.show()
            i += 1
    except tf.errors.OutOfRangeError:
        print('done!')
    finally:
        coord.request_stop()
    coord.join(threads)
    """

model.py

import tensorflow as tf

#输出各层参数
def shape(value):
    print(value.op.name,value.get_shape().as_list())

def inference(images, batch_size, n_classes):
    #conv1
    with tf.variable_scope('conv1') as scope:
        weights = tf.get_variable('weights',
                                  shape=[11,11,3,96],#随机生成96个11*11*3 的卷积核
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        # 其中tf.truncated_normal:从截断的正态分布中输出随机值。
        # 生成的值服从具有指定平均值和标准偏差的正态分布,如果生成的值大于平均值2个标准偏差的值则丢弃重新选择。
        # stddev: 正态分布的标准差。
        biases = tf.get_variable("biases",
                                 shape=[96],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(images, weights, strides=[1, 4, 4, 1], padding='SAME')
        # 描述:过滤器移动的步长,第一位和第四位一般恒定为1,第二位指水平移动时候的步长,第三位指垂直移动的步长。
        # strides = [1, stride, stride, 1].
        # Valid: 用过滤器在输入的矩阵中按步长移动时候,会把最后的不足部分的列和行抛弃;
        # Same:先在输入矩阵上下各加个值为0的行,在左右各加个个值为0的列,也就是用0把原先的矩阵包裹一层,
        # 然后在移动的时候如果输入矩阵的列或者行长度不够,就用0来补齐。
        pre_activation = tf.nn.bias_add(conv, biases)
        #tf.nn.bias_add将偏差项biases(向量)加到conv(矩阵)上,是向量与矩阵的每一行进行相加,得到的结果和conv矩阵大小相同
        conv1 = tf.nn.relu(pre_activation, name=scope.name)

        #使用relu激活函数进行激活

    with tf.variable_scope('pooling1_lrn') as scope:
        # conv1经卷积之后得到的feature map,那么它就具有[batch, height, width, channels]这样的shape
        shape(conv1)
        # 池化层
        pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
                               padding='VALID', name='pooling1')
        # 局部响应归一化层
        norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001 / 9.0,
                          beta=0.75, name='norm1')
        shape(norm1)

    #conv2
    with tf.variable_scope('conv2') as scope:
        weights = tf.get_variable('weights',
                                  shape=[5, 5, 96, 256],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[256],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(norm1, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(pre_activation, name='conv2')

    with tf.variable_scope('pooling2_lrn') as scope:
        norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001 / 9.0,
                          beta=0.75, name='norm2')
        pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
                               padding='VALID', name='pooling2')

    #conv3
    with tf.variable_scope('conv3') as scope:
        weights = tf.get_variable('weights',
                                  shape=[3, 3, 256, 384],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[384],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(pool2, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv3 = tf.nn.relu(pre_activation, name='conv3')

    #conv4
    with tf.variable_scope('conv4') as scope:
        weights = tf.get_variable('weights',
                                  shape=[3, 3, 384, 384],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[384],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(conv3, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv4 = tf.nn.relu(pre_activation, name='conv4')

    #conv5
    with tf.variable_scope('conv5') as scope:
        weights = tf.get_variable('weights',
                                  shape=[3, 3, 384, 256],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[256],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(conv4, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv5 = tf.nn.relu(pre_activation, name='conv5')

    with tf.variable_scope('pooling2_lrn') as scope:
        norm5 = tf.nn.lrn(conv5, depth_radius=4, bias=1.0, alpha=0.001 / 9.0,
                          beta=0.75, name='norm5')
        pool5 = tf.nn.max_pool(norm5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
                               padding='VALID', name='pooling5')


    print("pool5.shape = ",pool5.shape)
    #fc6
    with tf.variable_scope('fc6') as scope:
        reshape = tf.reshape(pool5, shape=[batch_size, -1])#将pool5拉直,将最后3个维度变成一个维度,并且保留前面的batch_size维度
        print("reshape.shape = ", reshape.shape)
        dim = reshape.get_shape()[1].value
        print("dim",dim)
        weights = tf.get_variable('weights',
                                  shape=[dim, 4096],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))

        biases = tf.get_variable('biases',
                                 shape=[4096],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        fc6 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)

        # dropout6
        with tf.name_scope('dropout6') as scope:
            dropout6 = tf.nn.dropout(fc6, 0.5)#0.5表示随机选择一半的神经元失效

    with tf.variable_scope('fc7') as scope:
        weights = tf.get_variable('weights',
                                  shape=[4096, 4096],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[4096],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        fc7 = tf.nn.relu(tf.matmul(dropout6, weights) + biases, name='fc7')
        # dropout7
        with tf.name_scope('dropout6') as scope:
            dropout7 = tf.nn.dropout(fc7, 0.5)

    #fc8
    with tf.variable_scope('fc8') as scope:
        weights = tf.get_variable('fc8',
                                  shape=[4096, n_classes],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[n_classes],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        fc8 = tf.add(tf.matmul(dropout7, weights), biases, name='fc8')

    return fc8


def losses(logits, labels):
    with tf.variable_scope('loss') as scope:
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits \
            (logits=logits, labels=labels, name='xentropy_per_example')

        loss = tf.reduce_mean(cross_entropy, name='loss')
        tf.summary.scalar(scope.name + '/loss', loss)
    return loss


def training(loss, learning_rate):
    with tf.name_scope('optimizer'):
        optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
        global_step = tf.Variable(0, name='global_step', trainable=False)
        train_op = optimizer.minimize(loss, global_step=global_step)#用于记录全局训练步骤的单值
    return train_op

#评估函数
def evaluation(logits, labels):
    with tf.variable_scope('accuracy') as scope:
        # 用于计算预测的结果和实际结果的是否相等,返回一个bool类型的张量
        correct = tf.nn.in_top_k(logits, labels, 1)#预测的结果,实际样本类别的标签,一般都取1
        correct = tf.cast(correct, tf.float16)
        accuracy = tf.reduce_mean(correct)
        tf.summary.scalar(scope.name + '/accuracy', accuracy)#生成loss的标量信息

    return accuracy

training.py

import os
import numpy as np
import tensorflow as tf
import input_data
import model

N_CLASSES = 2#输出的分类的数量
IMG_W = 227#图片的尺寸
IMG_H = 227
BATCH_SIZE = 8#每批数据的大小
CAPACITY = 200
MAX_STEP = 15000#训练步数
learing_rate = 0.0001#学习率


def run_training():
    train_dir = 'H:/liangpan/train1/'#训练的数据集所在的路径
    logs_train_dir = 'H:/liangpan/test_alexnet/test/'#存放训练模型过程的数据的存储路径
    # 获取图片和标签集
    train, train_label = input_data.get_files(train_dir)
    # 生成批次
    train_batch, train_label_batch = input_data.get_batch(train,
                                                          train_label,
                                                          IMG_W,
                                                          IMG_H,
                                                          BATCH_SIZE,
                                                          CAPACITY)
    # 进入训练模型
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    # 获取 loss
    train_loss = model.losses(train_logits, train_label_batch)
    # 训练
    train_op = model.training(train_loss, learing_rate)
    #获取准确率
    train_acc = model.evaluation(train_logits, train_label_batch)
    # 合并summary
    summary_op = tf.summary.merge_all()

    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)#保存summary
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer())#初始化全部的变量
    # Coordinator类用来管理在Session中的多个线程,
    # 可以用来同时停止多个工作线程并且向那个在等待所有工作线程终止的程序报告异常,
    # 该线程捕获到这个异常之后就会终止所有线程。
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    try:
        for step in np.arange(MAX_STEP):
            # coord.should_stop() 返回 true 时也就是数据读完了,应该调用 coord.request_stop()
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
            if step % 100 == 0:
                print('step %d,train loss=%.2f,train accuracy=%.2f' % (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                # 每隔2000步保存一下模型,模型保存在 checkpoint_path 中
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done traing -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()


run_training()

test.py测试的是training的model

import os
import numpy as np
import tensorflow as tf
import input_data
import model

N_CLASSES = 2#输出的分类的数量
IMG_W = 227#图片的尺寸
IMG_H = 227
BATCH_SIZE = 8#每批数据的大小
CAPACITY = 200
MAX_STEP = 15000#训练步数
learing_rate = 0.0001#学习率


def run_training():
    train_dir = 'H:/liangpan/train1/'#训练的数据集所在的路径
    logs_train_dir = 'H:/liangpan/test_alexnet/test/'#存放训练模型过程的数据的存储路径
    # 获取图片和标签集
    train, train_label = input_data.get_files(train_dir)
    # 生成批次
    train_batch, train_label_batch = input_data.get_batch(train,
                                                          train_label,
                                                          IMG_W,
                                                          IMG_H,
                                                          BATCH_SIZE,
                                                          CAPACITY)
    # 进入训练模型
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    # 获取 loss
    train_loss = model.losses(train_logits, train_label_batch)
    # 训练
    train_op = model.training(train_loss, learing_rate)
    #获取准确率
    train_acc = model.evaluation(train_logits, train_label_batch)
    # 合并summary
    summary_op = tf.summary.merge_all()

    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)#保存summary
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer())#初始化全部的变量
    # Coordinator类用来管理在Session中的多个线程,
    # 可以用来同时停止多个工作线程并且向那个在等待所有工作线程终止的程序报告异常,
    # 该线程捕获到这个异常之后就会终止所有线程。
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    try:
        for step in np.arange(MAX_STEP):
            # coord.should_stop() 返回 true 时也就是数据读完了,应该调用 coord.request_stop()
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
            if step % 100 == 0:
                print('step %d,train loss=%.2f,train accuracy=%.2f' % (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                # 每隔2000步保存一下模型,模型保存在 checkpoint_path 中
                checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done traing -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()


run_training()

test_2_gpu.py测试的是training_2_gpu的model

from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import input_data
import tensorflow as tf
import model_2_gpu

#获取一张图片
def get_one_image(train):
    n = len(train)
    ind = np.random.randint(0, n)#从数据集中随机选取一张图片
    img_dir = train[ind]#图片的路径
    print(img_dir)
    image = Image.open(img_dir)#打开图片
    plt.imshow(image)
    image = image.resize([227, 227])#把图片裁成大小为227*227
    image = np.array(image)#把图片转成数组
    plt.show()
    return image


def evaluate_one_image():
    train_dir = 'H:/liangpan/train/'
    train, train_label = input_data.get_files(train_dir)
    image_array = get_one_image(train)
    # Graph.as_default() 的上下文管理器(context manager),它能够在这个上下文里面覆盖默认的图
    with tf.Graph().as_default():
        BATCH_SIZE = 1# 因为只读取一副图片 所以batch 设置为1
        N_CLASSES = 2
        # 转化图片格式
        image = tf.cast(image_array, tf.float32)
        image = tf.reshape(image, [1, 227, 227, 3])
        # 图片原来是三维的 [227, 227, 3] 重新定义图片形状 改为一个4D  四维的 tensor
        logit = model_2_gpu.inference(image, BATCH_SIZE, N_CLASSES)
        # 因为 inference 的返回没有用激活函数,所以在这里对结果用softmax 激活
        logit = tf.nn.softmax(logit)
        # 用最原始的输入数据的方式向模型输入数据 placeholder(占位符)
        x = tf.placeholder(tf.float32, shape=[227, 227, 3])

        logs_train_dir = 'H:/liangpan/test_alexnet/test2/'
        saver = tf.train.Saver()
        with tf.Session() as sess:
            print("reading checkpoints")
            # 将模型加载到sess 中
            ckpt = tf.train.get_checkpoint_state(logs_train_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('loading success,global_step is %s' % global_step)
            else:
                print('no checkpoint file found')
            # 将图片输入到模型计算
            prediction = sess.run(logit, feed_dict={x: image_array})
            # 获取输出结果中最大概率的索引
            max_index = np.argmax(prediction)
            if max_index == 0:
                print("this is a cat with possibility %.6f" % prediction[:, 0])
            else:
                print("this is a dog with posiibility %.6f" % prediction[:, 1])


evaluate_one_image()

model_2_gpu.py

import tensorflow as tf

#输出各层参数
def shape(value):
    print(value.op.name,value.get_shape().as_list())

def inference(images, batch_size, n_classes):
    #conv1
    with tf.variable_scope('conv1') as scope:
        weights = tf.get_variable('weights',
                                  shape=[11,11,3,96],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable("biases",
                                 shape=[96],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(images, weights, strides=[1, 4, 4, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(pre_activation, name=scope.name)

    with tf.variable_scope('pooling1_lrn') as scope:

        norm1 = tf.nn.lrn(conv1, depth_radius=2, bias=2.0, alpha=1e-4,
                          beta=0.75, name='norm1')
        pool1 = tf.nn.max_pool(norm1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
                               padding='VALID', name='pooling1')

    #conv2
    with tf.variable_scope('conv2') as scope:
        # 数据的分组处理,分组时则把输入数据和权重先划分后做卷积运算,卷积结束后再用concat()合并起来
        pool1_groups = tf.split(axis=3, value=pool1, num_or_size_splits=2)
        weights = tf.get_variable('weights',
                                  shape=[5, 5, 48, 256],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        weights_groups = tf.split(axis=3, value=weights, num_or_size_splits=2)
        conv_up = tf.nn.conv2d(pool1_groups[0], weights_groups[0], [1, 1, 1, 1], padding='SAME')
        conv_down = tf.nn.conv2d(pool1_groups[1], weights_groups[1], [1, 1, 1, 1], padding='SAME')

        biases = tf.get_variable('biases',
                                 shape=[256],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))

        biases_groups = tf.split(axis=0, value=biases, num_or_size_splits=2)
        bias_up = tf.nn.bias_add(conv_up, biases_groups[0])
        bias_down = tf.nn.bias_add(conv_down, biases_groups[1])
        bias = tf.concat(axis=3, values=[bias_up, bias_down])
        conv2 = tf.nn.relu(bias, name=scope.name)

    with tf.variable_scope('pooling2_lrn') as scope:
        norm2 = tf.nn.lrn(conv2, depth_radius=2, bias=2.0, alpha=1e-4,
                          beta=0.75, name='norm2')
        pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
                               padding='VALID', name='pooling2')

    #conv3
    with tf.variable_scope('conv3') as scope:
        weights = tf.get_variable('weights',
                                  shape=[3, 3, 256, 384],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[384],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(pool2, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv3 = tf.nn.relu(pre_activation, name='conv3')

    #conv4
    with tf.variable_scope('conv4') as scope:
        conv3_groups = tf.split(axis=3, value=conv3, num_or_size_splits=2)
        weights = tf.get_variable('weights',
                                  shape=[3, 3, 192, 384],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        weights_groups = tf.split(axis=3, value=weights, num_or_size_splits=2)
        conv_up = tf.nn.conv2d(conv3_groups[0], weights_groups[0], [1, 1, 1, 1], padding='SAME')
        conv_down = tf.nn.conv2d(conv3_groups[1], weights_groups[1], [1, 1, 1, 1], padding='SAME')

        biases = tf.get_variable('biases',
                                 shape=[384],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        biases_groups = tf.split(axis=0, value=biases, num_or_size_splits=2)
        bias_up = tf.nn.bias_add(conv_up, biases_groups[0])
        bias_down = tf.nn.bias_add(conv_down, biases_groups[1])
        bias = tf.concat(axis=3, values=[bias_up, bias_down])
        conv4 = tf.nn.relu(bias, name=scope.name)

    #conv5
    with tf.variable_scope('conv5') as scope:
        conv4_groups = tf.split(axis=3, value=conv4, num_or_size_splits=2)
        weights = tf.get_variable('weights',
                                  shape=[3, 3, 192, 256],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        weights_groups = tf.split(axis=3, value=weights, num_or_size_splits=2)
        conv_up = tf.nn.conv2d(conv4_groups[0], weights_groups[0], [1, 1, 1, 1], padding='SAME')
        conv_down = tf.nn.conv2d(conv4_groups[1], weights_groups[1], [1, 1, 1, 1], padding='SAME')

        biases = tf.get_variable('biases',
                                 shape=[256],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))

        biases_groups = tf.split(axis=0, value=biases, num_or_size_splits=2)
        bias_up = tf.nn.bias_add(conv_up, biases_groups[0])
        bias_down = tf.nn.bias_add(conv_down, biases_groups[1])
        bias = tf.concat(axis=3, values=[bias_up, bias_down])
        conv5 = tf.nn.relu(bias, name=scope.name)

    with tf.variable_scope('pooling2_lrn') as scope:
        norm5 = tf.nn.lrn(conv5, depth_radius=2, bias=2.0, alpha=1e-4,
                          beta=0.75, name='norm5')
        pool5 = tf.nn.max_pool(norm5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
                               padding='VALID', name='pooling5')



    #fc6
    with tf.variable_scope('fc6') as scope:
        reshape = tf.reshape(pool5, shape=[batch_size, -1])
        dim = reshape.get_shape()[1].value
        weights = tf.get_variable('weights',
                                  shape=[dim, 4096],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))

        biases = tf.get_variable('biases',
                                 shape=[4096],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        fc6 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)

        # dropout6
        with tf.name_scope('dropout6') as scope:
            dropout6 = tf.nn.dropout(fc6, 0.5)

    with tf.variable_scope('fc7') as scope:
        weights = tf.get_variable('weights',
                                  shape=[4096, 4096],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[4096],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        fc7 = tf.nn.relu(tf.matmul(dropout6, weights) + biases, name='fc7')
        # dropout7
        with tf.name_scope('dropout6') as scope:
            dropout7 = tf.nn.dropout(fc7, 0.5)

    #fc8
    with tf.variable_scope('fc8') as scope:
        weights = tf.get_variable('fc8',
                                  shape=[4096, n_classes],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[n_classes],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        fc8 = tf.add(tf.matmul(dropout7, weights), biases, name='fc8')

    return fc8


def losses(logits, labels):
    with tf.variable_scope('loss') as scope:
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits \
            (logits=logits, labels=labels, name='xentropy_per_example')

        loss = tf.reduce_mean(cross_entropy, name='loss')
        tf.summary.scalar(scope.name + '/loss', loss)
    return loss


def training(loss, learning_rate):
    with tf.name_scope('optimizer'):
        optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
        global_step = tf.Variable(0, name='global_step', trainable=False)
        train_op = optimizer.minimize(loss, global_step=global_step)
    return train_op


def evaluation(logits, labels):
    with tf.variable_scope('accuracy') as scope:
        correct = tf.nn.in_top_k(logits, labels, 1)
        correct = tf.cast(correct, tf.float16)
        accuracy = tf.reduce_mean(correct)
        tf.summary.scalar(scope.name + '/accuracy', accuracy)

    return accuracy

model_no_lrn.py

import tensorflow as tf


def inference(images, batch_size, n_classes):
    #conv1
    with tf.variable_scope('conv1') as scope:
        weights = tf.get_variable('weights',
                                  shape=[11,11,3,96],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable("biases",
                                 shape=[96],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(images, weights, strides=[1, 4, 4, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(pre_activation, name=scope.name)

    with tf.variable_scope('pooling1_lrn') as scope:
        pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
                               padding='SAME', name='pooling1')


    #conv2
    with tf.variable_scope('conv2') as scope:
        weights = tf.get_variable('weights',
                                  shape=[5, 5, 96, 256],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[256],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(pool1, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(pre_activation, name='conv2')

    with tf.variable_scope('pooling2_lrn') as scope:
        pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
                               padding='SAME', name='pooling2')

    #conv3
    with tf.variable_scope('conv3') as scope:
        weights = tf.get_variable('weights',
                                  shape=[3, 3, 256, 384],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[384],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(pool2, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv3 = tf.nn.relu(pre_activation, name='conv3')

    #conv4
    with tf.variable_scope('conv4') as scope:
        weights = tf.get_variable('weights',
                                  shape=[3, 3, 384, 384],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[384],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(conv3, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv4 = tf.nn.relu(pre_activation, name='conv4')

    #conv5
    with tf.variable_scope('conv5') as scope:
        weights = tf.get_variable('weights',
                                  shape=[3, 3, 384, 256],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[256],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        conv = tf.nn.conv2d(conv4, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv5 = tf.nn.relu(pre_activation, name='conv5')

    with tf.variable_scope('pooling2_lrn') as scope:
        norm5 = tf.nn.lrn(conv5, depth_radius=4, bias=1.0, alpha=0.001 / 9.0,
                          beta=0.75, name='norm5')
        pool5 = tf.nn.max_pool(norm5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
                               padding='SAME', name='pooling5')



    #fc6
    with tf.variable_scope('fc6') as scope:
        reshape = tf.reshape(pool5, shape=[batch_size, -1])
        dim = reshape.get_shape()[1].value
        weights = tf.get_variable('weights',
                                  shape=[dim, 4096],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))

        biases = tf.get_variable('biases',
                                 shape=[4096],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        fc6 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)

        # dropout6
        with tf.name_scope('dropout6') as scope:
            dropout6 = tf.nn.dropout(fc6, 0.5)

    with tf.variable_scope('fc7') as scope:
        weights = tf.get_variable('weights',
                                  shape=[4096, 4096],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[4096],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        fc7 = tf.nn.relu(tf.matmul(dropout6, weights) + biases, name='fc7')
        # dropout7
        with tf.name_scope('dropout6') as scope:
            dropout7 = tf.nn.dropout(fc7, 0.5)

    #fc8
    with tf.variable_scope('fc8') as scope:
        weights = tf.get_variable('fc8',
                                  shape=[4096, n_classes],
                                  dtype=tf.float32,
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
        biases = tf.get_variable('biases',
                                 shape=[n_classes],
                                 dtype=tf.float32,
                                 initializer=tf.constant_initializer(0.1))
        fc8 = tf.add(tf.matmul(dropout7, weights), biases, name='fc8')

    return fc8


def losses(logits, labels):
    with tf.variable_scope('loss') as scope:
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits \
            (logits=logits, labels=labels, name='xentropy_per_example')

        loss = tf.reduce_mean(cross_entropy, name='loss')
        tf.summary.scalar(scope.name + '/loss', loss)
    return loss


def training(loss, learning_rate):
    with tf.name_scope('optimizer'):
        optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
        global_step = tf.Variable(0, name='global_step', trainable=False)
        train_op = optimizer.minimize(loss, global_step=global_step)
    return train_op


def evaluation(logits, labels):
    with tf.variable_scope('accuracy') as scope:
        correct = tf.nn.in_top_k(logits, labels, 1)
        correct = tf.cast(correct, tf.float16)
        accuracy = tf.reduce_mean(correct)
        tf.summary.scalar(scope.name + '/accuracy', accuracy)

    return accuracy

总结

由于基础不好磕磕绊绊写了好久,也写了n多个版本,最后终于弄出像那么点样子的了。虽然代码看起来很简单,但是自己动手写,一点点深究,才发现并不简单,so,动手能力有待加强。

猜你喜欢

转载自blog.csdn.net/weixin_32888153/article/details/84889934