tensorflow复现google的inception v3神经网络

卷积神经网络图片识别关键发展
2012年alexnet:relu激活函数,lrn层,dropdout,重叠最大池化,数据增强
2014年vggnet:两个3x3卷积代替一个5x5卷积,lrn层用处不大,卷积层越深效果基本越好,1x1卷积核性价比很高
2014年的Inception net v1:全局平均池化层+维度变换代替最后一个全链接层,多分支小神经网络结构堆叠成大网络,辅助分类节点
2015年的Inception net v2:学习vggnet两个3x3代替一个5x5,提出batch normalization大幅加速学习
2015年的Inception net v3:用1*7和7*1代替7*7比vgg的那种方法效果更好
2015年的ResNet:加入了skip方式,残差学习模块解决了超深神经网络的学习问题
2016年的Inception net v4:参考了ResNet,加入了skip方式

这此使用tensorflow编码google的Inception net v3,采用tensorflow的slim模块大幅建华设计代码量
slim模块教程:https://www.2cto.com/kf/201706/649266.html
完整代码:https://github.com/joliph/tensorflow/blob/master/InceptionV3.py
我的tensorflow系列代码仓库:https://github.com/joliph/tensorflow/

科普文章 为什么batch normalization效果这么明显:https://www.zhihu.com/question/38102762

用slim简化网络层代码后光网络设计竟然还有200+层的代码量,抄都抄的的累死了….
代码:

#Inception v3 是谷歌公司设计的第三代网络


import tensorflow as tf
from datetime import datetime
import math
import time
import tensorflow.contrib.slim as slim
trunc_normal=lambda stddev:tf.truncated_normal_initializer(0.0,stddev)


def inception_v3_arg_scope(weight_decay=0.00004,stddev=0.1,batch_norm_var_collection='moving_vars'):
    batch_norm_params={
        'decay':0.9997,
        'epsilon':0.001,
        'updates_collections':tf.GraphKeys.UPDATE_OPS,
        'variables_collections':{
            'beta':None,
            'gamma':None,
            'moving_mean':[batch_norm_var_collection],
            'moving_variance':[batch_norm_var_collection],
        }
    }

    with slim.arg_scope([slim.conv2d,slim.fully_connected],
                        weights_regularizer=slim.l2_regularizer(weight_decay)):
        with slim.arg_scope(
                [slim.conv2d],
                weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
                activation_fn=tf.nn.relu,
                normalizer_fn=slim.batch_norm,
                normalizer_params=batch_norm_params) as sc:
            return sc


def inception_v3_base(inputs,scope=None):
    end_points={}
    with tf.variable_scope(scope,'InceptionV3',[inputs]):
        with slim.arg_scope([slim.conv2d,slim.max_pool2d,slim.avg_pool2d],
                            stride=1,padding='VALID'):
            net=slim.conv2d(inputs,32,[3,3],stride=2,scope='conv2d_1a_3x3')
            net=slim.conv2d(net,32,[3,3],scope='conv2d_2a_3x3')
            net=slim.conv2d(net,64,[3,3],padding='SAME',scope='conv2d_2b_3x3')
            net=slim.max_pool2d(net,[3,3],stride=2,scope='maxpool_3a_3x3')
            net=slim.conv2d(net,80,[1,1],scope='conv2d_3b_1x1')
            net=slim.conv2d(net,192,[3,3],scope='conv2d_4a_3x3')
            net=slim.max_pool2d(net,[3,3],stride=2,scope='maxpool_5a_3x3')

    with slim.arg_scope([slim.conv2d,slim.max_pool2d,slim.avg_pool2d],
                        stride=1,padding='SAME'):
        with tf.variable_scope('mixed_5b'):
            with tf.variable_scope('branch_0'):
                branch_0=slim.conv2d(net,64,[1,1],scope='conv2d_0a_1x1')
            with tf.variable_scope('branch_1'):
                branch_1=slim.conv2d(net,48,[1,1],scope='conv2d_01_1x1')
                branch_1=slim.conv2d(branch_1,64,[5,5],scope='conv2d_0b_5x5')
            with tf.variable_scope('branch_2'):
                branch_2=slim.conv2d(net,64,[1,1],scope='conv2d_0a_1x1')
                branch_2=slim.conv2d(branch_2,96,[3,3],scope='conv2d_0b_3x3')
                branch_2=slim.conv2d(branch_2, 96, [3, 3], scope='conv2d_0c_3x3')
            with tf.variable_scope('branch_3'):
                branch_3=slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                branch_3=slim.conv2d(branch_3,32,[1,1],scope='conv2d_0b_1x1')
            net=tf.concat([branch_0,branch_1,branch_2,branch_3],3)

        with tf.variable_scope('mixed_5c'):
            with tf.variable_scope('branch_0'):
                branch_0=slim.conv2d(net,64,[1,1],scope='conv2d_0a_1x1')
            with tf.variable_scope('branch_1'):
                branch_1=slim.conv2d(net,48,[1,1],scope='conv2d_01_1x1')
                branch_1=slim.conv2d(branch_1,64,[5,5],scope='conv2d_0b_5x5')
            with tf.variable_scope('branch_2'):
                branch_2=slim.conv2d(net,64,[1,1],scope='conv2d_0a_1x1')
                branch_2=slim.conv2d(branch_2,96,[3,3],scope='conv2d_0b_3x3')
                branch_2=slim.conv2d(branch_2, 96, [3, 3], scope='conv2d_0c_3x3')
            with tf.variable_scope('branch_3'):
                branch_3=slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                branch_3=slim.conv2d(branch_3,64,[1,1],scope='conv2d_0b_1x1')
            net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

        with tf.variable_scope('mixed_5d'):
            with tf.variable_scope('branch_0'):
                branch_0=slim.conv2d(net,64,[1,1],scope='conv2d_0a_1x1')
            with tf.variable_scope('branch_1'):
                branch_1=slim.conv2d(net,48,[1,1],scope='conv2d_01_1x1')
                branch_1=slim.conv2d(branch_1,64,[5,5],scope='conv2d_0b_5x5')
            with tf.variable_scope('branch_2'):
                branch_2=slim.conv2d(net,64,[1,1],scope='conv2d_0a_1x1')
                branch_2=slim.conv2d(branch_2,96,[3,3],scope='conv2d_0b_3x3')
                branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='conv2d_0c_3x3')
            with tf.variable_scope('branch_3'):
                branch_3=slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                branch_3=slim.conv2d(branch_3,64,[1,1],scope='conv2d_0b_1x1')
            net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

        with tf.variable_scope('mixed_6a'):
            with tf.variable_scope('branch_0'):
                branch_0 = slim.conv2d(net, 384, [3, 3],stride=2,padding='VALID',scope='conv2d_1a_1x1')
            with tf.variable_scope('branch_1'):
                branch_1=slim.conv2d(net,64,[1,1],scope='conv2d_0a_1x1')
                branch_1=slim.conv2d(branch_1,96,[3,3],scope='conv2d_0b_3x3')
                branch_1=slim.conv2d(branch_1, 96, [3, 3], stride=2,padding='VALID',scope='conv2d_1a_3x3')
            with tf.variable_scope('branch_2'):
                branch_2=slim.max_pool2d(net,[3,3],stride=2,padding='VALID',scope='maxpool_1a_3x3')
            net=tf.concat([branch_0, branch_1, branch_2], 3)

        with tf.variable_scope('mixed_6b'):
            with tf.variable_scope('branch_0'):
                branch_0 = slim.conv2d(net, 192, [1, 1],scope='conv2d_0a_1x1')
            with tf.variable_scope('branch_1'):
                branch_1=slim.conv2d(net,128,[1,1],scope='conv2d_0a_1x1')
                branch_1=slim.conv2d(branch_1,128,[1,7],scope='conv2d_0b_1x7')
                branch_1=slim.conv2d(branch_1,192, [7, 1],scope='conv2d_1a_7x1')
            with tf.variable_scope('branch_2'):
                branch_2=slim.conv2d(net,128,[1,1],scope='conv2d_0a_1x1')
                branch_2=slim.conv2d(branch_2,128,[7,1],scope='conv2d_oc_7x1')
                branch_2=slim.conv2d(branch_2,128,[1,7],scope='conv2d_0c_1x7')
                branch_2=slim.conv2d(branch_2,128,[7,1],scope='conv2d_od_7x1')
                branch_2=slim.conv2d(branch_2,192,[1,7],scope='conv2d_0e_1x7')
            with tf.variable_scope('branch_3'):
                branch_3=slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                branch_3=slim.conv2d(branch_3,192,[1,1],scope='conv2d_0b_1x1')
            net=tf.concat([branch_0, branch_1, branch_2,branch_3], 3)

        with tf.variable_scope('mixed_6c'):
            with tf.variable_scope('branch_0'):
                branch_0 = slim.conv2d(net, 192, [1, 1],scope='conv2d_0a_1x1')
            with tf.variable_scope('branch_1'):
                branch_1=slim.conv2d(net,160,[1,1],scope='conv2d_0a_1x1')
                branch_1=slim.conv2d(branch_1,160,[1,7],scope='conv2d_0b_1x7')
                branch_1=slim.conv2d(branch_1,192, [7, 1],scope='conv2d_1a_7x1')
            with tf.variable_scope('branch_2'):
                branch_2=slim.conv2d(net,160,[1,1],scope='conv2d_0a_1x1')
                branch_2=slim.conv2d(branch_2,160,[7,1],scope='conv2d_oc_7x1')
                branch_2=slim.conv2d(branch_2,160,[1,7],scope='conv2d_0c_1x7')
                branch_2=slim.conv2d(branch_2,160,[7,1],scope='conv2d_od_7x1')
                branch_2=slim.conv2d(branch_2,192,[1,7],scope='conv2d_0e_1x7')
            with tf.variable_scope('branch_3'):
                branch_3=slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                branch_3=slim.conv2d(branch_3,192,[1,1],scope='conv2d_0b_1x1')
            net=tf.concat([branch_0, branch_1, branch_2,branch_3], 3)

        with tf.variable_scope('mixed_6d'):
            with tf.variable_scope('branch_0'):
                branch_0 = slim.conv2d(net, 192, [1, 1],scope='conv2d_0a_1x1')
            with tf.variable_scope('branch_1'):
                branch_1=slim.conv2d(net,160,[1,1],scope='conv2d_0a_1x1')
                branch_1=slim.conv2d(branch_1,160,[1,7],scope='conv2d_0b_1x7')
                branch_1=slim.conv2d(branch_1,192, [7, 1],scope='conv2d_1a_7x1')
            with tf.variable_scope('branch_2'):
                branch_2=slim.conv2d(net,160,[1,1],scope='conv2d_0a_1x1')
                branch_2=slim.conv2d(branch_2,160,[7,1],scope='conv2d_oc_7x1')
                branch_2=slim.conv2d(branch_2,160,[1,7],scope='conv2d_0c_1x7')
                branch_2=slim.conv2d(branch_2,160,[7,1],scope='conv2d_od_7x1')
                branch_2=slim.conv2d(branch_2,192,[1,7],scope='conv2d_0e_1x7')
            with tf.variable_scope('branch_3'):
                branch_3=slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                branch_3=slim.conv2d(branch_3,192,[1,1],scope='conv2d_0b_1x1')
            net=tf.concat([branch_0, branch_1, branch_2,branch_3], 3)

        with tf.variable_scope('mixed_6e'):
            with tf.variable_scope('branch_0'):
                branch_0 = slim.conv2d(net, 192, [1, 1],scope='conv2d_0a_1x1')
            with tf.variable_scope('branch_1'):
                branch_1=slim.conv2d(net,160,[1,1],scope='conv2d_0a_1x1')
                branch_1=slim.conv2d(branch_1,160,[1,7],scope='conv2d_0b_1x7')
                branch_1=slim.conv2d(branch_1,192, [7, 1],scope='conv2d_1a_7x1')
            with tf.variable_scope('branch_2'):
                branch_2=slim.conv2d(net,160,[1,1],scope='conv2d_0a_1x1')
                branch_2=slim.conv2d(branch_2,160,[7,1],scope='conv2d_oc_7x1')
                branch_2=slim.conv2d(branch_2,160,[1,7],scope='conv2d_0c_1x7')
                branch_2=slim.conv2d(branch_2,160,[7,1],scope='conv2d_od_7x1')
                branch_2=slim.conv2d(branch_2,192,[1,7],scope='conv2d_0e_1x7')
            with tf.variable_scope('branch_3'):
                branch_3=slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                branch_3=slim.conv2d(branch_3,192,[1,1],scope='conv2d_0b_1x1')
            net=tf.concat([branch_0, branch_1, branch_2,branch_3], 3)
        end_points['mixed_6e']=net

        with tf.variable_scope('mixed_7a'):
            with tf.variable_scope('branch_0'):
                branch_0=slim.conv2d(net,192,[1,1],scope='conv2d_0a_1x1')
                branch_0=slim.conv2d(branch_0,320,[3,3],stride=2,padding='VALID',scope='conv2d_1a_3x3')
            with tf.variable_scope('branch_1'):
                branch_1=slim.conv2d(net,192,[1,1],scope='conv2d_0a_1x1')
                branch_1=slim.conv2d(branch_1,192,[1,7],scope='conv2d_0b_1x7')
                branch_1=slim.conv2d(branch_1, 192, [7, 1], scope='conv2d_0c_7x1')
                branch_1=slim.conv2d(branch_1, 192, [3, 3],stride=2,padding='VALID',scope='conv2d_1a_3x3')
            with tf.variable_scope('branch_2'):
                branch_2=slim.max_pool2d(net,[3,3],stride=2,padding='VALID',scope='maxpool_1a_3x3')
            net=tf.concat([branch_0,branch_1,branch_2],3)

        with tf.variable_scope('mixed_7b'):
            with tf.variable_scope('branch_0'):
                branch_0=slim.conv2d(net,320,[1,1],scope='conv2d_0a_1x1')
            with tf.variable_scope('branch_1'):
                branch_1=slim.conv2d(net,384,[1,1],scope='conv2d_0a_1x1')
                branch_1=tf.concat([
                    slim.conv2d(branch_1,384,[1,3],scope='conv2d_0b_1x3'),
                    slim.conv2d(branch_1, 384, [3, 1], scope='conv2d_0b_3x1')],3)
            with tf.variable_scope('branch_2'):
                branch_2=slim.conv2d(net,448,[1,1],scope='conv2d_0a_1x1')
                branch_2=slim.conv2d(branch_2,384,[3,3],scope='conv2d_0b_3x3')
                branch_2=tf.concat([
                    slim.conv2d(branch_2, 384, [1, 3], scope='conv2d_0c_1x3'),
                    slim.conv2d(branch_2, 384, [3, 1], scope='conv2d_0d_3x1')], 3)
            with tf.variable_scope('branch_3'):
                branch_3=slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                branch_3=slim.conv2d(branch_3,192,[1,1],scope='conv2d_0b_1x1')
            net=tf.concat([branch_0,branch_1,branch_2,branch_3],3)

        with tf.variable_scope('mixed_7c'):
            with tf.variable_scope('branch_0'):
                branch_0=slim.conv2d(net,320,[1,1],scope='conv2d_0a_1x1')
            with tf.variable_scope('branch_1'):
                branch_1=slim.conv2d(net,384,[1,1],scope='conv2d_0a_1x1')
                branch_1=tf.concat([
                    slim.conv2d(branch_1,384,[1,3],scope='conv2d_0b_1x3'),
                    slim.conv2d(branch_1, 384, [3, 1], scope='conv2d_0b_3x1')],3)
            with tf.variable_scope('branch_2'):
                branch_2=slim.conv2d(net,448,[1,1],scope='conv2d_0a_1x1')
                branch_2=slim.conv2d(branch_2,384,[3,3],scope='conv2d_0b_3x3')
                branch_2=tf.concat([
                    slim.conv2d(branch_2, 384, [1, 3], scope='conv2d_0c_1x3'),
                    slim.conv2d(branch_2, 384, [3, 1], scope='conv2d_0d_3x1')], 3)
            with tf.variable_scope('branch_3'):
                branch_3=slim.avg_pool2d(net,[3,3],scope='avgpool_0a_3x3')
                branch_3=slim.conv2d(branch_3,192,[1,1],scope='conv2d_0b_1x1')
            net=tf.concat([branch_0,branch_1,branch_2,branch_3],3)
    return net,end_points


def inception_v3(inputs,
                 num_classes=1000,
                 is_training=True,
                 dropout_keep_prob=0.8,
                 prediction_fn=slim.softmax,
                 spatial_squeeze=True,
                 reuse=None,
                 scope='InceptionV3'):
    with tf.variable_scope(scope,'InceptionV3',[inputs,num_classes],reuse=reuse) as scope:
        with slim.arg_scope([slim.batch_norm,slim.dropout],
                            is_training=is_training):
            net,end_points=inception_v3_base(inputs,scope=scope)
            with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):
                aux_logits = end_points['mixed_6e']
                with tf.variable_scope('AuxLogits'):
                    aux_logits = slim.avg_pool2d(aux_logits, [5, 5],
                                                 stride=3, padding='VALID', scope='avgpool_1a_5x5')
                    aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='conv2d_1b_1x1')
                    aux_logits = slim.conv2d(
                        aux_logits, 768, [5, 5],
                        weights_initializer=trunc_normal(0.01),
                        padding='VALID', scope='conv2d_2a_5x5'
                    )
                    aux_logits = slim.conv2d(
                        aux_logits, num_classes, [1, 1], activation_fn=None,
                        normalizer_fn=None, weights_initializer=trunc_normal(0.001),
                        scope='conv2d_2b_1x1'
                    )
                    if spatial_squeeze:
                        aux_logits = tf.squeeze(aux_logits, [1, 2], name='SpatialSqueeze')
                    end_points['AuxLogits'] = aux_logits

                    with tf.variable_scope('Logits'):
                        net = slim.avg_pool2d(net, [8, 8], padding='VALID', scope='avgpool_1a_8x8')
                        net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='dropout_1b')
                        end_points['PreLogits'] = net
                        logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
                                             normalizer_fn=None, scope='conv2d_1c_1x1')
                        if spatial_squeeze:
                            logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
                    end_points['Logits'] = logits
                    end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
                return logits, end_points

def time_tensorflow_run(session, target, info_string):
    num_steps_burn_in = 10
    total_duration = 0.0
    total_duration_squared = 0.0

    for i in range(num_batches + num_steps_burn_in):
        start_time = time.time()
        _ = session.run(target)
        duration = time.time() - start_time
        if i >= num_steps_burn_in:
            if not i %10:
                print('%s: step %d, duration = %.3f' %(datetime.now(), i - num_steps_burn_in, duration))
            total_duration += duration
            total_duration_squared += duration * duration

    mn = total_duration / num_batches
    vr = total_duration_squared / num_batches - mn * mn
    sd = math.sqrt(vr)
    print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %(datetime.now(), info_string, num_batches, mn, sd))





batch_size = 32
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception_v3_arg_scope()):
    logits, end_points = inception_v3(inputs, is_training=False)

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
num_batches = 100
time_tensorflow_run(sess, logits, 'Forward')

猜你喜欢

转载自blog.csdn.net/joliph/article/details/79197134