tf.contrib.slim.arg_scope

slim 是基于tensorflow的一个库,可以更加方便高效地搭建深度学习网络并进行训练和分析。
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim

slim.arg_scope常用于为tensorflow里的layer函数提供默认值以使构建模型的代码更加紧凑苗条(slim)
https://github.com/tensorflow/tensorflow/blob/56e4ea405d13125a3dcb6459019a83d12330bf84/tensorflow/contrib/framework/python/ops/arg_scope.py

它是嵌套在with上下文里面使用的,为了更好理解,附上arg_scope的源码:

arg_scope = [{}]
#contextmanager修饰过的函数,with调用该函数形成上下文的时候,先执行函数内部yield之前的部分
#将yield的结果传入with上下文中的代码并执行该段上下文
#退出with上下文的时候,执行函数内部yield之后的部分
@tf_contextlib.contextmanager
def arg_scope(list_ops_or_scope, **kwargs):
# list_ops是函数(op)列表
#kwargs是调用函数时传入的实参(将a=xx,b=xxx,....打包为字典),**kwargs是字典解包
   try:
      current_scope = current_arg_scope().copy()  #每一层with都会将一个current_scope入栈
      #这是一个字典,字典的键为函数(op)名,键值为函数的参数
      #然后current_arg_scope会将上一层with产生的scope取出来
      for op in list_ops_or_scope:  #对于list_ops列表中的每一个函数(op)
        key = arg_scope_func_key(op) #得到这个函数(op)的名字,比如conv2d
        if not has_arg_scope(op): # 这个函数(op)是否用@slim.add_arg_scope修饰
          raise ValueError('%s is not decorated with @add_arg_scope',
                           _name_op(op))
        if key in current_scope: #如果这个函数(op)在上一层with产生的字典里
          current_kwargs = current_scope[key].copy() #将这个函数的参数取出来
          current_kwargs.update(kwargs) #将本层with新添加的参数合并进去
          current_scope[key] = current_kwargs
        else:
          current_scope[key] = kwargs.copy() #如果这个函数(op)在上一层with产生的字典里没定义
          #就在字典里新增一个键
      _get_arg_stack().append(current_scope) #将这个更新后的current_scope字典入栈
      yield current_scope #将这个更新后的current_scope应用于本层with上下文里面的代码(函数、op)
    finally:
      _get_arg_stack().pop() #离开这层with的上下文后,就将它产生的scope出栈

# demo

with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                    weights_initializer=tf.truncated_normal_initializer(
                                            stddev=weights_initializer_stddev),
                    activation_fn=activation_fn,
                    normalizer_fn=slim.batch_norm if use_batch_norm else None):
#将参数字典kargs={arginitializer=..、activation=..、normalizer=..}应用于conv2d、separable_conv2d函数
#产生current_scope={'conv2d':kargs,'separable_conv2d'=kargs}
#这个时候栈为[{},{'conv2d':kargs,'separable_conv2d'=kargs}]
    with slim.arg_scope([slim.batch_norm], **batch_norm_params):
    #将上一层with产生的current_scope取出来,增加一个键,
    #产生current_scope={'conv2d':kargs,'separable_conv2d'=kargs,'batch_norm'=batch_norm_params}
    #这个时候栈为[{},{'conv2d':kargs,'separable_conv2d'=kargs},
    #{'conv2d':kargs,'separable_conv2d'=kargs,'batch_norm'=batch_norm_params}]
      with slim.arg_scope([slim.conv2d],weights_regularizer=slim.l2_regularizer(weight_decay)):
      #将上一层with产生的current_scope取出来,将conv2d键值进行更新
      #产生current_scope=
      #{'conv2d':kargs+weights_regularizer,'separable_conv2d'=kargs,
      #'batch_norm'=batch_norm_params}
      #这个时候栈为[{},{'conv2d':kargs,'separable_conv2d'=kargs},
      #{'conv2d':kargs,'separable_conv2d'=kargs,'batch_norm'=batch_norm_params},
      #{'conv2d':kargs+weights_regularizer,'separable_conv2d'=kargs,
      #'batch_norm'=batch_norm_params}]               
        with slim.arg_scope([slim.separable_conv2d],
                            weights_regularizer=depthwise_regularizer) as arg_sc:
        #将上一层with产生的current_scope取出来,将separable_conv2d键值进行更新
        #产生current_scope=
        #{'conv2d':kargs+weights_regularizer,'separable_conv2d'=kargs+weights_regularizer,
        # 'batch_norm'=batch_norm_params}
        #这个时候栈为[{},{'conv2d':kargs,'separable_conv2d'=kargs},
        #{'conv2d':kargs,'separable_conv2d'=kargs,'batch_norm'=batch_norm_params},
        #{'conv2d':kargs+weights_regularizer,
        #'separable_conv2d'=kargs,'batch_norm'=batch_norm_params}
        #{'conv2d':kargs+weights_regularizer,'separable_conv2d'=kargs+weights_regularizer,
        #'batch_norm'=batch_norm_params}]  
          return arg_sc  #就是所有with层产生的所有scope组成的列表

再举一个例子:

import tensorflow as tf
slim = tf.contrib.slim
with slim.arg_scope( [layers.conv2d],
                     padding='SAME', 
                     initializer= xavier_initializer(), 
                     regularizer= l2_regularizer(0.05)):
#with上下文调用arg_scope函数的yield之前的部分,产生current_scope={'conv2d':kargs}
     net = slim.conv2d(net, 256, [5, 5]) #将arg_scope函数yield的current_scope传给这段代码,
     #并新添加参数256,【5,5】,与current_scope的‘conv2d'键已有的键值合并,作为新的函数参数

下面谈一下variable_scope

import tensorflow as tf
with tf.Session() as sess:
    #default initializer
    with tf.variable_scope("foo", initializer=tf.constant_initializer(0.4)) as myscope:
        v = tf.get_variable("v", [1],initializer=tf.constant_initializer(0.232))
        sess.run(v.initializer)
        print v.name
        print v.eval()   # Specific initializer overrides the default.
        w = tf.get_variable("w", [1], initializer=tf.constant_initializer(0.3))
        sess.run(w.initializer)
        print w.name
        print w.eval()   # Specific initializer overrides the default.
        with tf.variable_scope(myscope,reuse=True):
            v = tf.get_variable("v", [1])
            sess.run(v.initializer)
            print v.name #使用第一个"with"层的scope, 其下的"v"重名,因此 reuse has to be true
            print v.eval() #既然是resue ,v的value也不变
        with tf.variable_scope("baz"):
            v = tf.get_variable("v", [1])
            sess.run(v.initializer)
            print v.name
            print v.eval()  # set to the default initializer.

foo/v:0
[0.232]
foo/w:0
[0.3]
foo/v:0
[0.232]
foo/baz/v:0
[0.4]

还有一个容易混淆的叫name_scope
Both scopes have the same effect on all operations as well as variables created using tf.Variable, i.e. the scope will be added as a prefix to the operation or variable name.However, name scope is ignored by tf.get_variable. The only way to place a variable accessed using tf.get_variable in a scope is to use variable scope

with tf.name_scope("my_scope"):
    v1 = tf.get_variable("var1", [1], dtype=tf.float32)
    v2 = tf.Variable(1, name="var2", dtype=tf.float32)
    a = tf.add(v1, v2)

print(v1.name)  # var1:0
print(v2.name)  # my_scope/var2:0
print(a.name)   # my_scope/Add:0

with tf.variable_scope("my_scope"):
    v1 = tf.get_variable("var1", [1], dtype=tf.float32)
    v2 = tf.Variable(1, name="var2", dtype=tf.float32)
    a = tf.add(v1, v2)

print(v1.name)  # my_scope/var1:0
print(v2.name)  # my_scope/var2:0
print(a.name)   # my_scope/Add:0
发布了31 篇原创文章 · 获赞 18 · 访问量 6035

猜你喜欢

转载自blog.csdn.net/weixin_40027284/article/details/85710737
今日推荐