解析 slim.arg_scope

def resnet_arg_scope(weight_decay=0.0001,
                     is_training=True,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True,
                     activation_fn=tf.nn.relu,
                     use_batch_norm=True):
  """Defines the default ResNet arg scope.

  TODO(gpapan): The batch-normalization related default values above are
    appropriate for use in conjunction with the reference ResNet models
    released at https://github.com/KaimingHe/deep-residual-networks. When
    training ResNets from scratch, they might need to be tuned.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_decay: The moving average decay when estimating layer activation
      statistics in batch normalization.
    batch_norm_epsilon: Small constant to prevent division by zero when
      normalizing activations by their variance in batch normalization.
    batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
      activations in the batch normalization layer.
    activation_fn: The activation function which is used in ResNet.
    use_batch_norm: Whether or not to use batch normalization.

  Returns:
    An `arg_scope` to use for the resnet models.
  """
  batch_norm_params = {
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': None,
      'is_training': is_training,
      'fused': True,  # Use fused batch norm if possible.
  }

  with slim.arg_scope(     #1  
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=slim.variance_scaling_initializer(),
      activation_fn=activation_fn,
      normalizer_fn=slim.batch_norm if use_batch_norm else None,
      normalizer_params=batch_norm_params):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params): #2  
      # The following implies padding='SAME' for pool1, which makes feature
      # alignment easier for dense prediction tasks. This is also used in
      # https://github.com/facebook/fb.resnet.torch. However the accompanying
      # code of 'Deep Residual Learning for Image Recognition' uses
      # padding='VALID' for pool1. You can switch to that choice by setting
      # slim.arg_scope([slim.max_pool2d], padding='VALID')
      with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc: #23
        return arg_sc

#1 第一个 slim.arg_scope [slim.conv2d]  就是 给[] 里面的所有的 op,传递后面默认的参数    

# 2 第二个 slim.arg_scope 假设 现在增加一个[slim.batch_norm,slim.conv2d] 然后 第一个arg_scope的参数 

weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=slim.variance_scaling_initializer(),
      activation_fn=activation_fn,
      normalizer_fn=slim.batch_norm if use_batch_norm else None,
      normalizer_params=batch_norm_params 以及 第二个arg_scope的 参数 batch_norm_params 都会传递给conv2d  

#3 第三个 slim.arg_scope 的arg_sc 其实包含了 上面三个scope的 list 里面的op 机器对应的参数   

扫描二维码关注公众号,回复: 4800871 查看本文章

resnet_arg_scope = resnet_utils.resnet_arg_scope

resnet_arg_scope ()返回 

{
'<function convolution2d at 0x000002680A9DE488>': {'weights_regularizer': <function l2_regularizer.<locals>.l2 at 0x000002680DEDABF8>, 
'weights_initializer': <function variance_scaling_initializer.<locals>._initializer at 0x000002680DEDAD08>,
 'activation_fn': <function relu at 0x0000026805F9F9D8>, 
 'normalizer_fn': <function add_arg_scope.<locals>.func_with_args at 0x000002680A9D5EA0>, 
 'normalizer_params': {'decay': 0.997, 'epsilon': 1e-05, 'scale': True, 'updates_collections': None, 'is_training': True, 'fused': True}}, 
 '<function batch_norm at 0x000002680A9D5E18>': {'decay': 0.997, 'epsilon': 1e-05, 'scale': True, 'updates_collections': None, 'is_training': True, 'fused': True}, 
 '<function max_pool2d at 0x000002680AA5DAE8>': {'padding': 'SAME'}

}

猜你喜欢

转载自blog.csdn.net/candy134834/article/details/85915738
今日推荐