使用tensorflow训练自己的数据集(二)——定义神经网络

版权声明:希望尊重辛苦的自学之旅 https://blog.csdn.net/sinat_42378539/article/details/83047958

使用tensorflow训练自己的数据集—定义神经网络

上一篇使用tensorflow训练自己的数据集(一)中制作已经介绍了制作自己的数据集、接下来就是定义向前传播过程了也就是定义神经网络。本次使用了两层卷积两层最大池化两层全连接神经网络最后加softmax层的经典卷积神经网络结构。

import tensorflow as tf
# 配置参数
# 图片size
IMAGE_SIZE = 128
NUM_CHANNELS = 3
NUM_LABELS = 2
# 第一层卷积层的尺寸和深度
CONV1_DEEP = 64
CONV1_SIZE = 5
# 第二层卷积层的尺寸和深度
CONV2_DEEP = 128
CONV2_SIZE = 5
# 全连接层的节点个数
FC_SIZE = 512
def get_Weight(shape,regularizer_rate = None):     # 定义weight如需正则化需传入zhengzehualv默认值为None
    Weight = tf.Variable(tf.truncated_normal(shape=shape,stddev=0.1),dtype=tf.float32)    # tensorflow API推荐随机初始化

    if regularizer_rate != None:
        regularizer = tf.contrib.layers.l2_regularizer(regularizer_rate)
        tf.add_to_collection('losses',regularizer(Weight))

    return Weight

def get_biase(shape):       # 定义biase
    biase = tf.Variable(tf.constant(value=0.1,shape=shape),dtype=tf.float32)    # tensorflow API推荐初始化0.1
    return biase

def create_conv2d(x,w):     # 定义卷积层
    conv2d = tf.nn.conv2d(x,w,strides=[1,1,1,1],padding='SAME')     # 步幅为1、SAME填充
    return conv2d

def max_pooling(x):         # 定义最大值池化
    pool = tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')   # ksize为2、步幅为2、SAME填充
    return pool

def create_fc(x,w,b):       # 定义全连接层
    fc = tf.matmul(x,w) + b
    return fc

# 定义前向传播的过程
# 这里添加了一个新的参数train,用于区分训练过程和测试过程。
# 在这个程序中将用到dropout方法,dropout可以进一步提升模型可靠性并防止过拟合
# dropout过程只在训练时使用
def inference(input_tensor, train, regularizer_rate):
    with tf.variable_scope('layer1-conv1'):
        conv1_Weights = get_Weight([CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP])     # 5*5*64
        conv1_baises = get_biase([CONV1_DEEP])
        conv1 = tf.nn.bias_add(create_conv2d(input_tensor,conv1_Weights),conv1_baises)
        conv1 = tf.nn.relu(conv1)       # 使用ReLu激活函数

    with tf.name_scope('layer2-pool1'):        # 64*64*64
        pool1 = max_pooling(conv1)

    with tf.variable_scope('layer3-conv2'):
        conv2_Weights = get_Weight([CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP])       # 5*5*128
        conv2_biases = get_biase([CONV2_DEEP])
        conv2 = tf.nn.bias_add(create_conv2d(pool1,conv2_Weights),conv2_biases)
        conv2 = tf.nn.relu(conv2)

    with tf.name_scope('layer4-pool2'):         # 32*32*128
        pool2 = max_pooling(conv2)

        pool_shape = pool2.get_shape().as_list()
        # pool_shape为[batch_size,32,32,128]
        # 计算将矩阵拉直成向量之后的长度,这个长度就是矩阵长度及深度的乘积。
        nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
        # 通过tf.reshape函数将第四层的输出变成一个batch的向量
        reshaped = tf.reshape(pool2, [pool_shape[0], nodes])

    # 声明第五层全连接层的变量并实现前向传播过程
    with tf.variable_scope('layer5-fc1'):
        fc1_Weights = get_Weight([nodes,FC_SIZE],regularizer_rate)
        fc1_biases = get_biase([FC_SIZE])
        fc1 = tf.nn.relu(create_fc(reshaped,fc1_Weights,fc1_biases))
    # 训练过程添加dropout防止过拟合
        if train:
            fc1 = tf.nn.dropout(fc1, 0.5)

    # 声明第六层全连接层的变量并实现前向传播过程
    with tf.variable_scope('layer6-fc2'):
        fc2_Weights = get_Weight([FC_SIZE,NUM_LABELS],regularizer_rate)
        fc2_biases = get_biase([NUM_LABELS])
        logit = create_fc(fc1,fc2_Weights,fc2_biases)
        # fc2 = tf.nn.relu(fc2)     需要softmax层时使用激活函数

    # with tf.variable_scope('layer7-softmax'):
    # 可在全连接层后添加softmax层loss函数需要改变
    #     sm_Weight = get_Weight([FC_SIZE,NUM_LABELS])
    #     sm_biases = get_biase([NUM_LABELS])
    #     sm = tf.matmul(fc2,sm_Weight) + sm_biases
    #     logit = tf.nn.softmax(sm)
        return logit

下一篇将介绍反向传播过程
如有错误望多多指教~~

猜你喜欢

转载自blog.csdn.net/sinat_42378539/article/details/83047958