LeNet-5模型,关于卷积神经网络的前项传播

#关于卷积神经网络的前项传播

import tensorflow as tf
INPUT_NODE = 784#神经网络输入图片的像素
OUTPUT_NODE = 10#神经网络输出的类别种类

IMAGE_SIZE = 28#输入图片的尺寸,28*28=784
NUM_CHANNELS = 1#输入图片的信道
NUM_LABELS = 10#输出的类别

CONVE1_DEEP = 32#第一层卷积网络的深度,也就是卷积核的个数
CONVE1_SIZE = 5#第一层卷积核的尺寸

CONVE2_DEEP = 62#第二层卷积网络的深度,也就是卷积核的个数
CONVE2_SIZE = 5#第二层卷积核的尺寸

FC_SIZE = 512#全连接的节点个数

def inference(input_tensor,train,regularizer):#定义函数,该函数包含前项传播所有过程
    with tf.variable_scope('layer1-conv1'):
        conv1_weights = tf.get_variable(
                "weight",[CONV1_SIZE,CONVE1_SIZE,NUM_CHANNELS,CONV1_DEEP],
                initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases = tf.get_variable("bias",[CONV1_DEEP],initializer=tf.constant_initializer)#其中,bias的参数为什么是深度呢?那是因为
        conv1 = tf.nn.conv2d(
                input_tensor,conv1_weights,strides=[1,1,1,1],padding='SAME')
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))
        
    with tf.name_scope('layer-pool1'):
        pool1 = tf.nn.max_pool(
                relu1,ksize=[1,3,3,1],strides=[1,2,2,1],padding='SAME')
    with tf.variable_scope('layer3-conv2'):
        conv2_weights = tf.get_variable(
                "weight",[CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],
                initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable(
                "bias",[CONV2_DEEP],
                initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.conv2d(
                pool1,conv2_weights,strides=[1,1,1,1],padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))
        
    with tf.name_scope('layer4_pool2'):
        pool2 = tf.nn.max_pool(
                relu2,ksize=[1,2,2,1],padding='SAME')
        
    pool_shape = pool2.get_shape().as_list()#从pool2层获取维度
    
    nodes = pool_shape[1]*pool_shape[2]*pool_shape[3]#其中,pool_shape[1],[2],[3]分别表长宽深度,其中pool_shape[0]表一个batch中图片个数  
    reshaped = tf.reshape(pool2,[pool_shape[0],nodes])#将该batch中的图片重置为一个列向量pool_shape[0]表该bitch图片个数
    
    
    
    with tf.variable_scope('layer5-fc1'):
        fc1_weight = tf.get_variable(
                "weight",[nodes,FC_SIZE],
                initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer !=none:
            tf.add_to_collection('losses',regularizer(fc1_weights))
        fc1_biases = tf.get_variable(
                "bias",[FC_SIZE],initializer=tf.concat_initializer(0.1))
        fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_weights)+fc1_biases) 
        if train:fc1 = tf.nn.dropout(fc1,0.5)
        
    with tf.variable_scope('layer6-fc2'):
        fc2_weight = tf.get_variable(
                "weight",[nodes,NUM_LABELS],
                initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer !=none:
            tf.add_to_collection('losses',regularizer(fc2_weights))
        fc2_biases = tf.get_variable(
                "bias",[NUM_LABLES],
                initializer=tf.concat_initializer(0.1))
        
        logit = tf.matmul(fc1,fc2_weights)+fc2_biases
    
    return logit

猜你喜欢

转载自blog.csdn.net/weixin_40849273/article/details/82692208