mmdetection源码解读(二)

  一:关于Python修饰符@property和装饰器(decorator)等的解释和用法

  二:mmdetection/mmdet/core/fp16/utils.py

  from collections import abc

  import numpy as np

  import torch

  def cast_tensor_type(inputs, src_type, dst_type):

  if isinstance(inputs, torch.Tensor):

  return inputs.to(dst_type)

  elif isinstance(inputs, str):

  return inputs

  elif isinstance(inputs, np.ndarray):

  return inputs

  elif isinstance(inputs, abc.Mapping):

  return type(inputs)({

  k: cast_tensor_type(v, src_type, dst_type)

  for k, v in inputs.items()

  })

  elif isinstance(inputs, abc.Iterable):

  return type(inputs)(

  cast_tensor_type(item, src_type, dst_type) for item in inputs)

  else:

  return inputs

  mmdetection/mmdet/core/fp16/decorators.py

  import functools

  from inspect import getfullargspec

  import torch

  from .utils import cast_tensor_type

  def auto_fp16(apply_to=None, out_fp32=False):

  """Decorator to enable fp16 training automatically.

  This decorator is useful when you write custom modules and want to support

  mixed precision training. If inputs arguments are fp32 tensors, they will

  be converted to fp16 automatically. Arguments other than fp32 tensors are

  ignored.

  Args:

  apply_to (Iterable, optional): The argument names to be converted.

  `None` indicates all arguments.

  out_fp32 (bool): Whether to convert the output back to fp32.

  :Example:

  class MyModule1(nn.Module)

  # Convert x and y to fp16

  @auto_fp16()

  def forward(self, x, y):

  pass

  class MyModule2(nn.Module):

  # convert pred to fp16

  @auto_fp16(apply_to=('pred', ))

  def do_something(self, pred, others):

  pass

  """

  def auto_fp16_wrapper(old_func):

  @functools.wraps(old_func)

  def new_func(*args, **kwargs):

  # check if the module has set the attribute `fp16_enabled`, if not,

  # just fallback to the original method.

  if not isinstance(args[0], torch.nn.Module):

  raise TypeError('@auto_fp16 can only be used to decorate the '

  'method of nn.Module')

  if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):

  return old_func(*args, **kwargs)

  # get the arg spec of the decorated method

  args_info = getfullargspec(old_func)

  # get the argument names to be casted

  args_to_cast = args_info.args if apply_to is None else apply_to

  # convert the args that need to be processed

  new_args = []

  # NOTE: default args are not taken into consideration

  if args:

  arg_names = args_info.args[:len(args)]

  for i, arg_name in enumerate(arg_names):

  if arg_name in args_to_cast:

  new_args.append(

  cast_tensor_type(args[i], torch.float, torch.half))

  else:

  new_args.append(args[i])

  # convert the kwargs that need to be processed

  new_kwargs = {}

  if kwargs:

  for arg_name, arg_value in kwargs.items():

  if arg_name in args_to_cast:

  new_kwargs[arg_name] = cast_tensor_type(

  arg_value, torch.float, torch.half)

  else:

  new_kwargs[arg_name] = arg_value

  # apply converted arguments to the decorated method

  output = old_func(*new_args, **new_kwargs)

  # cast the results back to fp32 if necessary

  if out_fp32:

  output = cast_tensor_type(output, torch.half, torch.float)

  return output

  return new_func

  return auto_fp16_wrapper

  def force_fp32(apply_to=None, out_fp16=False):

  """Decorator to convert input arguments to fp32 in force.

  This decorator is useful when you write custom modules and want to support

  mixed precision training. If there are some inputs that must be processed

  in fp32 mode, then this decorator can handle it. If inputs arguments are

  fp16 tensors, they will be converted to fp32 automatically. Arguments other

  than fp16 tensors are ignored.

  Args:

  apply_to (Iterable, optional): The argument names to be converted.

  `None` indicates all arguments.

  out_fp16 (bool): Whether to convert the output back to fp16.

  :Example:

  class MyModule1(nn.Module)

  # Convert x and y to fp32

  @force_fp32()

  def loss(self, x, y):

  pass

  class MyModule2(nn.Module):

  # convert pred to fp32

  @force_fp32(apply_to=('pred', ))

  def post_process(self, pred, others):

  pass

  """

  def force_fp32_wrapper(old_func):

  @functools.wraps(old_func)

  def new_func(*args, **kwargs):

  # check if the module has set the attribute `fp16_enabled`, if not,

  # just fallback to the original method.

  if not isinstance(args[0], torch.nn.Module):

  raise TypeError('@force_fp32 can only be used to decorate the '

  'method of nn.Module')

  if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):

  return old_func(*args, **kwargs)

  # get the arg spec of the decorated method

  args_info = getfullargspec(old_func)

  # get the argument names to be casted

  args_to_cast = args_info.args if apply_to is None else apply_to

  # convert the args that need to be processed

  new_args = []

  if args:

  arg_names = args_info.args[:len(args)]

  for i, arg_name in enumerate(arg_names):

  if arg_name in args_to_cast:

  new_args.append(

  cast_tensor_type(args[i], torch.half, torch.float))

  else:

  new_args.append(args[i])

  # convert the kwargs that need to be processed

  new_kwargs = dict()

  if kwargs:

  for arg_name, arg_value in kwargs.items():

  if arg_name in args_to_cast:

  new_kwargs[arg_name] = cast_tensor_type(

  arg_value, torch.half, torch.float)

  else:

  new_kwargs[arg_name] = arg_value

  # apply converted arguments to the decorated method

  output = old_func(*new_args, **new_kwargs)

  # cast the results back to fp32 if necessary

  if out_fp16:

  output = cast_tensor_type(output, torch.float, torch.half)

  return output

  return new_func

  return force_fp32_wrapper

  mmdetection/mmdet/models/losses/utils.py

  import functools

  import torch.nn.functional as F

  def reduce_loss(loss, reduction):

  """Reduce loss as specified.

  Args:

  loss (Tensor): Elementwise loss tensor.

  reduction (str): Options are "none", "mean" and "sum".

  Return:

  Tensor: Reduced loss tensor.

  """

  reduction_enum = F._Reduction.get_enum(reduction)

  # none: 0, elementwise_mean:1, sum: 2

  if reduction_enum == 0:

  return loss

  elif reduction_enum == 1:

  return loss.mean()

  elif reduction_enum == 2:

  return loss.sum()

  def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):

  """Apply element-wise weight and reduce loss.

  Args:无锡做人流多少钱 http://mobile.ytsg029.com/

  loss (Tensor): Element-wise loss.

  weight (Tensor): Element-wise weights.

  reduction (str): Same as built-in losses of PyTorch.

  avg_factor (float): Avarage factor when computing the mean of losses.

  Returns:

  Tensor: Processed loss values.

  """

  # if weight is specified, apply element-wise weight

  if weight is not None:

  loss = loss * weight

  # if avg_factor is not specified, just reduce the loss

  if avg_factor is None:

  loss = reduce_loss(loss, reduction)

  else:

  # if reduction is mean, then average the loss by avg_factor

  if reduction == 'mean':

  loss = loss.sum() / avg_factor

  # if reduction is 'none', then do nothing, otherwise raise an error

  elif reduction != 'none':

  raise ValueError('avg_factor can not be used with reduction="sum"')

  return loss

  def weighted_loss(loss_func):

  """Create a weighted version of a given loss function.

  To use this decorator, the loss function must have the signature like

  `loss_func(pred, target, **kwargs)`. The function only needs to compute

  element-wise loss without any reduction. This decorator will add weight

  and reduction arguments to the function. The decorated function will have

  the signature like `loss_func(pred, target, weight=None, reduction='mean',

  avg_factor=None, **kwargs)`.

  :Example:

  >>> @weighted_loss

  >>> def l1_loss(pred, target):

  >>> return (pred - target).abs()

  >>> pred = torch.Tensor([0, 2, 3])

  >>> target = torch.Tensor([1, 1, 1])

  >>> weight = torch.Tensor([1, 0, 1])

  >>> l1_loss(pred, target)

  tensor(1.3333)

  >>> l1_loss(pred, target, weight)

  tensor(1.)

  >>> l1_loss(pred, target, reduction='none')

  tensor([1., 1., 2.])

  >>> l1_loss(pred, target, weight, avg_factor=2)

  tensor(1.5000)

  """

  @functools.wraps(loss_func)

  def wrapper(pred,

  target,

  weight=None,

  reduction='mean',

  avg_factor=None,

  **kwargs):

  # get element-wise loss

  loss = loss_func(pred, target, **kwargs)

  loss = weight_reduce_loss(loss, weight, reduction, avg_factor)

  return loss

  return wrapper

猜你喜欢

转载自www.cnblogs.com/gnz49/p/11654170.html