python tensorflow 中部分AlexNet构建

取自于TensorFlow实战

'''

本文代码中的AlexNet不是完整的,书中也只给了卷积网络的构建,全连接层是博主自己加的,后面会分享完整的AlexNet
因为设备运算速度原因,只计算前向和反向的计算时间。

AlexNet使用了五个卷积层,各卷积层使用了relu、LRN,maxpool等trick
每层的卷积核大小和卷积步长需要经验和实践尝试
LRN层现在很少使用了,因为效果不是很明显,可以不加
maxpool层有ksize=[1,3,3,1],strides=[1,2,2,1],有重叠,可以增加特征的丰富性

'''
from datetime import datetime
import tensorflow as tf
import math
import time
#设置TensorFlow的CPU运算优先级
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
#设置训练参数
batch_size=128
num_batches=100
#节点信息打印
def print_activation(t):
    print(t.op.name,' ',t.get_shape().as_list())
#网络构建
def inference(images):
    #参数信息列表
    parameters=[]
    #第一卷积层
    with tf.name_scope('conv1') as scope:
        #卷积核[11,11,3,64],卷积步长[1,4,4,1]
        kernel=tf.Variable(tf.truncated_normal([11,11,3,64],dtype=tf.float32,stddev=1e-1),name='weights')
        conv=tf.nn.conv2d(images,kernel,[1,4,4,1],padding='SAME')
        #加偏置
        biases=tf.Variable(tf.constant(0,shape=[64],dtype=tf.float32),trainable=True,name='biases')
        bias=tf.nn.bias_add(conv,biases)
        #整流线性激活
        conv1=tf.nn.relu(bias,name=scope)
        #参数存入参数列表
        parameters+=[kernel,biases]
    #LRN标准化,池化,pool1节点信息显示
    lrn1=tf.nn.lrn(conv1,4,bias=1,alpha=0.001/9,beta=0.75,name='lrn1')
    pool1=tf.nn.max_pool(lrn1,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID',name='pool1')
    print_activation(pool1)
    #第二卷积层,同上,卷积核[5, 5, 64, 192],卷积步长[1,1,1,1,1]
    with tf.name_scope('conv2') as scope:
        kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192], dtype=tf.float32, stddev=1e-1), name='weights')
        conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')
        biases = tf.Variable(tf.constant(0, shape=[192], dtype=tf.float32), trainable=True, name='biases')
        bias = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(bias, name=scope)
        parameters += [kernel, biases]
        print_activation(conv2)

    lrn2 = tf.nn.lrn(conv2, 4, bias=1, alpha=0.001 / 9, beta=0.75, name='lrn2')
    pool2 = tf.nn.max_pool(lrn2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool2')
    print_activation(pool2)
    #第三卷积层卷积核[3, 3, 192, 384],卷积步长[1, 1, 1, 1],不做LRN和池化
    with tf.name_scope('conv3') as scope:
        kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384], dtype=tf.float32, stddev=1e-1), name='weights')
        conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
        biases = tf.Variable(tf.constant(0, shape=[384], dtype=tf.float32), trainable=True, name='biases')
        bias = tf.nn.bias_add(conv, biases)
        conv3 = tf.nn.relu(bias, name=scope)
        parameters += [kernel, biases]
        print_activation(conv3)
    # 第四卷积层,卷积核[3, 3, 384, 256],卷积步长[1, 1, 1, 1],不做LRN和池化
    with tf.name_scope('conv4') as scope:
        kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256], dtype=tf.float32, stddev=1e-1), name='weights')
        conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
        biases = tf.Variable(tf.constant(0, shape=[256], dtype=tf.float32), trainable=True, name='biases')
        bias = tf.nn.bias_add(conv, biases)
        conv4 = tf.nn.relu(bias, name=scope)
        parameters += [kernel, biases]
        print_activation(conv4)
    # 第五卷积层同上,卷积核[3, 3, 256, 256],卷积步长[1, 1, 1, 1],池化
    with tf.name_scope('conv5') as scope:
        kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32, stddev=1e-1), name='weights')
        conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
        biases = tf.Variable(tf.constant(0, shape=[256], dtype=tf.float32), trainable=True, name='biases')
        bias = tf.nn.bias_add(conv, biases)
        conv5 = tf.nn.relu(bias, name=scope)
        parameters += [kernel, biases]
        print_activation(conv5)
    pool5 = tf.nn.max_pool(conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool5')
    print_activation(pool5)
    # 第一全连接层,一维化、线性、激活
    with tf.name_scope('fc1') as scope:
        reshape = tf.reshape(pool5, [batch_size, -1],name='reshape')
        dim = reshape.get_shape()[1].value
        w1 = tf.Variable(tf.truncated_normal([dim,4096], dtype=tf.float32, stddev=1e-1), name='weights')
        b1 = tf.Variable(tf.constant(0.1, shape=[4096],dtype=tf.float32), trainable=True, name='biases')
        fc1 = tf.nn.relu(tf.matmul(reshape, w1) + b1,name=scope)
        parameters += [w1, b1]
        print_activation(fc1)
    # 第二全连接层,线性、激活
    with tf.name_scope('fc2') as scope:
        w2 = tf.Variable(tf.truncated_normal([4096,4096], dtype=tf.float32, stddev=1e-1), name='weights')
        b2 = tf.Variable(tf.constant(0.1, shape=[4096],dtype=tf.float32), trainable=True, name='biases')
        fc2 = tf.nn.relu(tf.matmul(fc1, w2) + b2,name=scope)
        parameters += [w2, b2]
        print_activation(fc2)
    # 第三全连接层、线性,激活函数为softmax
    with tf.name_scope('fc3') as scope:
        w3 = tf.Variable(tf.truncated_normal([4096,1000], dtype=tf.float32, stddev=1e-1), name='weights')
        b3 = tf.Variable(tf.constant(0.1, shape=[1000],dtype=tf.float32), trainable=True, name='biases')
        fc3 = tf.nn.softmax(tf.matmul(fc2, w3) + b3,name=scope)
        parameters += [w3, b3]
        print_activation(fc3)

    #返回结果和参数列表
    return fc3,parameters
#预计算节点计算耗时函数
def time_tensorflow_run(session,target,info_string):
    num_steps_burn_in=10
    total_duration=0
    total_duration_squared=0

    for i in range(num_batches+num_steps_burn_in):
        start_time=time.time()
        _=session.run(target)
        duration=time.time()-start_time
        if i>num_steps_burn_in:
            if not i%10:
                print('{:s}:step{:d},duration={:.3f}'.format(datetime.now(),i-num_steps_burn_in,duration))
            total_duration+=duration
            total_duration_squared+=duration*duration

    mn=total_duration/num_batches
    vr=total_duration_squared/num_batches-mn*mn
    sd=math.sqrt(vr)
    print('{:s}:{:s} across {:d} steps,{:.3f}+/-{:.3f} sec/batch'.format(datetime.now(),info_string,num_batches,mn,sd))
#计算耗时
def run_benckmark():
    with tf.Graph().as_default():
        #加载训练数据和网络
        image_size=224
        images=tf.Variable(tf.random_normal([batch_size,image_size,image_size,3],dtype=tf.float32,stddev=0.1))
        fc3,parameters=inference(images)
        #初始化
        init=tf.global_variables_initializer()
        sess=tf.Session()
        sess.run(init)
        #计算前向耗时
        time_tensorflow_run(sess,fc3,'Forward')
        #定义损失函数,计算反向耗时
        objective=tf.nn.l2_loss(fc3)
        grad=tf.gradients(objective,parameters)
        time_tensorflow_run(sess,grad,'Forward-backward')

run_benckmark()

猜你喜欢

转载自blog.csdn.net/qq_41644087/article/details/80501205
今日推荐