caffe中的卷积层源码笔记

卷积层的实现在conv_layer.hpp和conv_layer.cpp,在于其中的ConvolutionLayer类,继承于BaseConvolutionLayer类。

卷积主要的处理过程是将图像bottom[i]->cpu_data()+j * this->bottom_dim_进行im2col处理,转化成一个新的矩阵col_buff,然后用权值矩阵weights 转化后的矩阵 得到的矩阵output再经过col2im处理得到top[i]->cpu_data()+j * this->bottom_dim_。

其中bottom_dim_表示一个输入blob内一幅图像的大小。



以下的分析基于图像每层通道均为2维

设输入图像的某层通道经过im2col转化后,得到矩阵X,W、b随机值取决于网络配置文件prototxt里关于卷积层的定义

 weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }


optional FillerParameter weight_filler = 7; // The filler for the weight
  optional FillerParameter bias_filler = 8; // The filler for the bias

Y为矩阵相乘后的结果矩阵。

dY 表示 LossY ,即损失值对于Y的导数。

dX 表示 LossX

dW 表示 LossW

db 表示 Lossb

前向传播:卷积的过程可以表示为 Y=WX+b ,

反向传播: dW=dYXTdb=dYdX=WTdY



前向传播

总的处理流程是分别对num_个图像依次处理。

Y=WX 分组进行,组数为group_


这里写图片描述

图片表示的是一个图片的前向传播中的Y=W*X,分group_组进行,假设这里group_==3,就要进行3次乘积。箭头表示处理每一组乘积时指针指向的位置,所以3个指针(分别指向W中的某一部分、X中的某一部分、Y中的某一部分)要分别移动group_-1(这里为3-1=2)次


这里写图片描述



这里写图片描述



这里写图片描述


Y=Y+b 一次进行

有一个细节是如果输入图像的通道进行1*1卷积,即卷积核大小1*1,no

padding ,stride=1,那么跳过了im2col处理,反向传播时跳过了col2im处理



反向传播

dW=dYXT 分组进行,组数为group_

dX=WTdY 分组进行,组数为group_

db=dY 一次进行



conv_layer.cpp

compute_output_shape()用于计算输出图像通道内每一维度的大小,可以理解为输出图像每一通道上的宽和高。

const int output_dim = (input_dim + 2 * pad_data[i] - kernel_extent)
        / stride_data[i] + 1;
this->output_shape_.push_back(output_dim);

存储到output_shape_里面。

Forward_cpu()、Backward_cpu()实现前向传播和反向传播算法;



conv_layer.cpp源码:

#include <vector>

#include "caffe/layers/conv_layer.hpp"

namespace caffe {

template <typename Dtype>
void ConvolutionLayer<Dtype>::compute_output_shape() {
  const int* kernel_shape_data = this->kernel_shape_.cpu_data();
  const int* stride_data = this->stride_.cpu_data();
  const int* pad_data = this->pad_.cpu_data();
  const int* dilation_data = this->dilation_.cpu_data();
  this->output_shape_.clear();
  for (int i = 0; i < this->num_spatial_axes_; ++i) {
    // i + 1 to skip channel axis
    const int input_dim = this->input_shape(i + 1);
    const int kernel_extent = dilation_data[i] * (kernel_shape_data[i] - 1) + 1;
    const int output_dim = (input_dim + 2 * pad_data[i] - kernel_extent)
        / stride_data[i] + 1;
    this->output_shape_.push_back(output_dim);
  }
}

template <typename Dtype>
void ConvolutionLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const Dtype* weight = this->blobs_[0]->cpu_data();
  for (int i = 0; i < bottom.size(); ++i) {
    const Dtype* bottom_data = bottom[i]->cpu_data();
    Dtype* top_data = top[i]->mutable_cpu_data();
    for (int n = 0; n < this->num_; ++n) {
      this->forward_cpu_gemm(bottom_data + n * this->bottom_dim_, weight,
          top_data + n * this->top_dim_);//实现Y=w*X,X为图像转化后的矩阵,w为参数,Y为结果矩阵
      if (this->bias_term_) {
        const Dtype* bias = this->blobs_[1]->cpu_data();
        this->forward_cpu_bias(top_data + n * this->top_dim_, bias);//Y=Y+b
      }
    }
  }
}

template <typename Dtype>
void ConvolutionLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  const Dtype* weight = this->blobs_[0]->cpu_data();
  Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff();
  for (int i = 0; i < top.size(); ++i) {
    const Dtype* top_diff = top[i]->cpu_diff();
    const Dtype* bottom_data = bottom[i]->cpu_data();
    Dtype* bottom_diff = bottom[i]->mutable_cpu_diff();
    // Bias gradient, if necessary.
    if (this->bias_term_ && this->param_propagate_down_[1])
    {
       Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff();
       for (int n = 0; n < this->num_; ++n)//累加n个图片的db
       {
         this->backward_cpu_bias(bias_diff, top_diff + n * this->top_dim_);//   对于每个图片,db=dy,这里累加db到bias_diff。
       }
    }
    if (this->param_propagate_down_[0] || propagate_down[i])
    {
        for (int n = 0; n < this->num_; ++n)
        {
            // gradient w.r.t. weight. Note that we will accumulate diffs.
            if (this->param_propagate_down_[0])
            {
                this->weight_cpu_gemm(bottom_data + n * this->bottom_dim_,
                    top_diff + n * this->top_dim_, weight_diff);//对于每个图片,dW=dY * X^{T},这里累加dW
            }
            // gradient w.r.t. bottom data, if necessary.
            if (propagate_down[i])
            {
                this->backward_cpu_gemm(top_diff + n * this->top_dim_, weight,
                    bottom_diff + n * this->bottom_dim_);//dX=W^{T} *dY,分num_个图片分别进行,不像db和dW一样累加。
            }
       }
    }
  }
}

#ifdef CPU_ONLY
STUB_GPU(ConvolutionLayer);
#endif

INSTANTIATE_CLASS(ConvolutionLayer);

}  // namespace caffe



base_conv_layer.cpp

LayerSetUp()和Reshape()函数的作用主要是为了计算一些后面用到的参数,简单地说就是预处理。

其中Reshape 函数,根据ConvolutionLayer或DeconvolutionLayer类的compute_output_shape()函数求出的输出图片的形状来对top进行赋值

,剩下的几个函数分别实现了前向传播中矩阵的乘法运算、Y=Y+b运算、反向传播中求解dX、dW、db。



base_conv_layer.cpp源码:

#include <algorithm>
#include <vector>

#include "caffe/filler.hpp"
#include "caffe/layers/base_conv_layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"

namespace caffe {

  /*
     LayerSetUp和Reshape函数的作用主要是为了计算一些后面用到的参数,简单地说就是预处理。
     其中Reshape 函数,根据ConvolutionLayer或DeconvolutionLayer类的compute_output_shape()函数求
     出的输出图片的形状来对top进行赋值

    */

template <typename Dtype>
void BaseConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  // Configure the kernel size, padding, stride, and inputs.
  ConvolutionParameter conv_param = this->layer_param_.convolution_param();

  force_nd_im2col_ = conv_param.force_nd_im2col();//是否强制使用n维通用卷积

  channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis());

  const int first_spatial_axis = channel_axis_ + 1;

  const int num_axes = bottom[0]->num_axes();//return shape_.size();返回输入层的维数


  num_spatial_axes_ = num_axes - first_spatial_axis;//每层通道上的维数

  CHECK_GE(num_spatial_axes_, 0);

  vector<int> bottom_dim_blob_shape(1, num_spatial_axes_ + 1);

  vector<int> spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1));

  // Setup filter kernel dimensions (kernel_shape_).
  // 计算卷积核的形状

  kernel_shape_.Reshape(spatial_dim_blob_shape);

  int* kernel_shape_data = kernel_shape_.mutable_cpu_data();

  if (conv_param.has_kernel_h() || conv_param.has_kernel_w())
  {
    CHECK_EQ(num_spatial_axes_, 2)
        << "kernel_h & kernel_w can only be used for 2D convolution.";
    CHECK_EQ(0, conv_param.kernel_size_size())
        << "Either kernel_size or kernel_h/w should be specified; not both.";
    kernel_shape_data[0] = conv_param.kernel_h();
    kernel_shape_data[1] = conv_param.kernel_w();
  }
  else
  {
    const int num_kernel_dims = conv_param.kernel_size_size();
    CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_)
        << "kernel_size must be specified once, or once per spatial dimension "
        << "(kernel_size specified " << num_kernel_dims << " times; "
        << num_spatial_axes_ << " spatial dims).";
      for (int i = 0; i < num_spatial_axes_; ++i)
        {
        kernel_shape_data[i] =
            conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i);
        }
  }
  for (int i = 0; i < num_spatial_axes_; ++i)
  {
    CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero.";
  }


   /* 计算步长相关的参数  */
  // Setup stride dimensions (stride_).
  stride_.Reshape(spatial_dim_blob_shape);
  int* stride_data = stride_.mutable_cpu_data();

  if (conv_param.has_stride_h() || conv_param.has_stride_w())
    {
        CHECK_EQ(num_spatial_axes_, 2)
            << "stride_h & stride_w can only be used for 2D convolution.";
        CHECK_EQ(0, conv_param.stride_size())
            << "Either stride or stride_h/w should be specified; not both.";
        stride_data[0] = conv_param.stride_h();
        stride_data[1] = conv_param.stride_w();
    }
  else
    {
        const int num_stride_dims = conv_param.stride_size();
        CHECK(num_stride_dims == 0 || num_stride_dims == 1 ||
            num_stride_dims == num_spatial_axes_)
            << "stride must be specified once, or once per spatial dimension "
            << "(stride specified " << num_stride_dims << " times; "
            << num_spatial_axes_ << " spatial dims).";

        const int kDefaultStride = 1;
        for (int i = 0; i < num_spatial_axes_; ++i)
        {
        stride_data[i] = (num_stride_dims == 0) ? kDefaultStride :
            conv_param.stride((num_stride_dims == 1) ? 0 : i);

        CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero.";
        }
    }

   /* 计算pad值  */
  // Setup pad dimensions (pad_).
  pad_.Reshape(spatial_dim_blob_shape);
  int* pad_data = pad_.mutable_cpu_data();
  if (conv_param.has_pad_h() || conv_param.has_pad_w())
  {
        CHECK_EQ(num_spatial_axes_, 2)
            << "pad_h & pad_w can only be used for 2D convolution.";
        CHECK_EQ(0, conv_param.pad_size())
            << "Either pad or pad_h/w should be specified; not both.";
        pad_data[0] = conv_param.pad_h();
        pad_data[1] = conv_param.pad_w();
  }
  else
    {
        const int num_pad_dims = conv_param.pad_size();
        CHECK(num_pad_dims == 0 || num_pad_dims == 1 ||
            num_pad_dims == num_spatial_axes_)
            << "pad must be specified once, or once per spatial dimension "
            << "(pad specified " << num_pad_dims << " times; "
            << num_spatial_axes_ << " spatial dims).";

        const int kDefaultPad = 0;

        for (int i = 0; i < num_spatial_axes_; ++i)
        {
            pad_data[i] = (num_pad_dims == 0) ? kDefaultPad :
                conv_param.pad((num_pad_dims == 1) ? 0 : i);
        }
    }


   /* 计算dilation  */
  // Setup dilation dimensions (dilation_).
  dilation_.Reshape(spatial_dim_blob_shape);
  int* dilation_data = dilation_.mutable_cpu_data();
  const int num_dilation_dims = conv_param.dilation_size();
  CHECK(num_dilation_dims == 0 || num_dilation_dims == 1 ||
        num_dilation_dims == num_spatial_axes_)
      << "dilation must be specified once, or once per spatial dimension "
      << "(dilation specified " << num_dilation_dims << " times; "
      << num_spatial_axes_ << " spatial dims).";

  const int kDefaultDilation = 1;

  for (int i = 0; i < num_spatial_axes_; ++i)
    {
        dilation_data[i] = (num_dilation_dims == 0) ? kDefaultDilation :
                       conv_param.dilation((num_dilation_dims == 1) ? 0 : i);
    }


   //判断是否是卷积核是否是1*1吗,且不填充、步长为1。
  // Special case: im2col is the identity for 1x1 convolution with stride 1
  // and no padding, so flag for skipping the buffer and transformation.
  is_1x1_ = true;
  for (int i = 0; i < num_spatial_axes_; ++i)
  {
    is_1x1_ &=
        kernel_shape_data[i] == 1 && stride_data[i] == 1 && pad_data[i] == 0;
    if (!is_1x1_)  break;
  }

  // Configure output channels and groups.
  channels_ = bottom[0]->shape(channel_axis_);//输入图像的通道数
  num_output_ = this->layer_param_.convolution_param().num_output();//卷积后图像的通道数
  CHECK_GT(num_output_, 0);

  group_ = this->layer_param_.convolution_param().group();//group_:The group size for group conv
  CHECK_EQ(channels_ % group_, 0);
  CHECK_EQ(num_output_ % group_, 0)
      << "Number of output should be multiples of group.";


  if (reverse_dimensions())//这个虚函数的实现应该是conv_layer和deconv_layer的区别之一
    {
        conv_out_channels_ = channels_;
        conv_in_channels_ = num_output_;
    }
  else
  {
     conv_out_channels_ = num_output_;
     conv_in_channels_ = channels_;
  }

  /*
  /*
   The vector that stores the learnable parameters as a set of blobs.
  vector<shared_ptr<Blob<Dtype> > > blobs_;
  定义于layer.hpp
  */

  // Handle the parameters: weights and biases.
  // - blobs_[0] holds the filter weights
  // - blobs_[1] holds the biases (optional)
  vector<int> weight_shape(2);
  weight_shape[0] = conv_out_channels_;
  weight_shape[1] = conv_in_channels_ / group_;

  for (int i = 0; i < num_spatial_axes_; ++i)
  {
    weight_shape.push_back(kernel_shape_data[i]);
  }

  bias_term_ = this->layer_param_.convolution_param().bias_term();//bool类型,是否启动偏置
  vector<int> bias_shape(bias_term_, num_output_);
  if (this->blobs_.size() > 0)
  {
    CHECK_EQ(1 + bias_term_, this->blobs_.size())
        << "Incorrect number of weight blobs.";
    if (weight_shape != this->blobs_[0]->shape())
    {
        Blob<Dtype> weight_shaped_blob(weight_shape);
        LOG(FATAL) << "Incorrect weight shape: expected shape "
            << weight_shaped_blob.shape_string() << "; instead, shape was "
            << this->blobs_[0]->shape_string();
    }
    if (bias_term_ && bias_shape != this->blobs_[1]->shape())
    {
        Blob<Dtype> bias_shaped_blob(bias_shape);
        LOG(FATAL) << "Incorrect bias shape: expected shape "
            << bias_shaped_blob.shape_string() << "; instead, shape was "
            << this->blobs_[1]->shape_string();
    }
    LOG(INFO) << "Skipping parameter initialization";
  }
  else
    {
        if (bias_term_)
        {
            this->blobs_.resize(2);
        }
        else
        {
            this->blobs_.resize(1);
        }
        // Initialize and fill the weights:
        // output channels x input channels per-group x kernel height x kernel width
        this->blobs_[0].reset(new Blob<Dtype>(weight_shape));
        shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.convolution_param().weight_filler()));
        weight_filler->Fill(this->blobs_[0].get());
        // If necessary, initialize and fill the biases.
        if (bias_term_)
        {
            this->blobs_[1].reset(new Blob<Dtype>(bias_shape));
            shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
                this->layer_param_.convolution_param().bias_filler()));
            bias_filler->Fill(this->blobs_[1].get());
        }
  }
  kernel_dim_ = this->blobs_[0]->count(1); // kernel_dim_ = 输入图像的维度/group_*卷积核的h*卷积核的w
  weight_offset_ = conv_out_channels_ * kernel_dim_ / group_;
  // Propagate gradients to the parameters (as directed by backward pass).
  this->param_propagate_down_.resize(this->blobs_.size(), true);
}

template <typename Dtype>
void BaseConvolutionLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top)
{
  const int first_spatial_axis = channel_axis_ + 1;
  CHECK_EQ(bottom[0]->num_axes(), first_spatial_axis + num_spatial_axes_)
      << "bottom num_axes may not change.";
  num_ = bottom[0]->count(0, channel_axis_);
  CHECK_EQ(bottom[0]->shape(channel_axis_), channels_)
      << "Input size incompatible with convolution kernel.";
  // TODO: generalize to handle inputs of different shapes.
  for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) {
    CHECK(bottom[0]->shape() == bottom[bottom_id]->shape())
        << "All inputs must have the same shape.";
  }
  // Shape the tops.
  bottom_shape_ = &bottom[0]->shape();
  compute_output_shape();//  计算输出形状vector<int> output_shape_;
  vector<int> top_shape(bottom[0]->shape().begin(),
      bottom[0]->shape().begin() + channel_axis_);
  top_shape.push_back(num_output_);
  for (int i = 0; i < num_spatial_axes_; ++i) {
    top_shape.push_back(output_shape_[i]);
  }
  for (int top_id = 0; top_id < top.size(); ++top_id) {
    top[top_id]->Reshape(top_shape);
  }

  if (reverse_dimensions())

    //int conv_out_spatial_dim_ 卷积的输出的空间维度 = 卷积后图像h*卷积后图像w
    conv_out_spatial_dim_ = bottom[0]->count(first_spatial_axis);

  else  conv_out_spatial_dim_ = top[0]->count(first_spatial_axis);//这说明top的形状事先就求好了,传进来。
  /*
  在layer.hpp中的SetUp函数里,确实是先调用了LayerSetUp(bottom, top);
    再调用Reshape(bottom, top);
  */



  col_offset_ = kernel_dim_ * conv_out_spatial_dim_;
  output_offset_ = conv_out_channels_ * conv_out_spatial_dim_ / group_;
  // Setup input dimensions (conv_input_shape_).
  vector<int> bottom_dim_blob_shape(1, num_spatial_axes_ + 1);
  conv_input_shape_.Reshape(bottom_dim_blob_shape);
  int* conv_input_shape_data = conv_input_shape_.mutable_cpu_data();
  for (int i = 0; i < num_spatial_axes_ + 1; ++i) {
    if (reverse_dimensions()) {
      conv_input_shape_data[i] = top[0]->shape(channel_axis_ + i);
    } else {
      conv_input_shape_data[i] = bottom[0]->shape(channel_axis_ + i);
    }
  }
  // The im2col result buffer will only hold one image at a time to avoid
  // overly large memory usage. In the special case of 1x1 convolution
  // it goes lazily unused to save memory.
  col_buffer_shape_.clear();
  col_buffer_shape_.push_back(kernel_dim_ * group_);
  for (int i = 0; i < num_spatial_axes_; ++i) {
    if (reverse_dimensions()) {
      col_buffer_shape_.push_back(input_shape(i + 1));
    } else {
      col_buffer_shape_.push_back(output_shape_[i]);
    }
  }
  col_buffer_.Reshape(col_buffer_shape_);
  bottom_dim_ = bottom[0]->count(channel_axis_);
  top_dim_ = top[0]->count(channel_axis_);
  num_kernels_im2col_ = conv_in_channels_ * conv_out_spatial_dim_;
  num_kernels_col2im_ = reverse_dimensions() ? top_dim_ : bottom_dim_;
  // Set up the all ones "bias multiplier" for adding biases by BLAS
  out_spatial_dim_ = top[0]->count(first_spatial_axis);
  if (bias_term_) {
    vector<int> bias_multiplier_shape(1, out_spatial_dim_);
    bias_multiplier_.Reshape(bias_multiplier_shape);
    caffe_set(bias_multiplier_.count(), Dtype(1),
        bias_multiplier_.mutable_cpu_data());
  }
}

template <typename Dtype>
void BaseConvolutionLayer<Dtype>::forward_cpu_gemm(const Dtype* input,
    const Dtype* weights, Dtype* output, bool skip_im2col) {
  const Dtype* col_buff = input;
  if (!is_1x1_) {
    if (!skip_im2col) {
      conv_im2col_cpu(input, col_buffer_.mutable_cpu_data());
    }
    col_buff = col_buffer_.cpu_data();
  }
  for (int g = 0; g < group_; ++g) {
    caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, conv_out_channels_ /
        group_, conv_out_spatial_dim_, kernel_dim_,
        (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g,
        (Dtype)0., output + output_offset_ * g);
  }
}

template <typename Dtype>
void BaseConvolutionLayer<Dtype>::forward_cpu_bias(Dtype* output,
    const Dtype* bias) {
  caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_,
      out_spatial_dim_, 1, (Dtype)1., bias, bias_multiplier_.cpu_data(),
      (Dtype)1., output);
}

template <typename Dtype>
void BaseConvolutionLayer<Dtype>::backward_cpu_gemm(const Dtype* output,
    const Dtype* weights, Dtype* input) {
  Dtype* col_buff = col_buffer_.mutable_cpu_data();
  if (is_1x1_) {
    col_buff = input;
  }
  for (int g = 0; g < group_; ++g) {
    caffe_cpu_gemm<Dtype>(CblasTrans, CblasNoTrans, kernel_dim_,
        conv_out_spatial_dim_, conv_out_channels_ / group_,
        (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g,
        (Dtype)0., col_buff + col_offset_ * g);
  }
  if (!is_1x1_) {
    conv_col2im_cpu(col_buff, input);
  }
}

template <typename Dtype>
void BaseConvolutionLayer<Dtype>::weight_cpu_gemm(const Dtype* input,
    const Dtype* output, Dtype* weights) {
  const Dtype* col_buff = input;
  if (!is_1x1_) {
    conv_im2col_cpu(input, col_buffer_.mutable_cpu_data());
    col_buff = col_buffer_.cpu_data();
  }
  for (int g = 0; g < group_; ++g) {
    caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasTrans, conv_out_channels_ / group_,
        kernel_dim_, conv_out_spatial_dim_,
        (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g,
        (Dtype)1., weights + weight_offset_ * g);
  }
}

template <typename Dtype>
void BaseConvolutionLayer<Dtype>::backward_cpu_bias(Dtype* bias,
    const Dtype* input) {
  caffe_cpu_gemv<Dtype>(CblasNoTrans, num_output_, out_spatial_dim_, 1.,
      input, bias_multiplier_.cpu_data(), 1., bias);
}

#ifndef CPU_ONLY

template <typename Dtype>
void BaseConvolutionLayer<Dtype>::forward_gpu_gemm(const Dtype* input,
    const Dtype* weights, Dtype* output, bool skip_im2col) {
  const Dtype* col_buff = input;
  if (!is_1x1_) {
    if (!skip_im2col) {
      conv_im2col_gpu(input, col_buffer_.mutable_gpu_data());
    }
    col_buff = col_buffer_.gpu_data();
  }
  for (int g = 0; g < group_; ++g) {
    caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, conv_out_channels_ /
        group_, conv_out_spatial_dim_, kernel_dim_,
        (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g,
        (Dtype)0., output + output_offset_ * g);
  }
}

template <typename Dtype>
void BaseConvolutionLayer<Dtype>::forward_gpu_bias(Dtype* output,
    const Dtype* bias) {
  caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_,
      out_spatial_dim_, 1, (Dtype)1., bias, bias_multiplier_.gpu_data(),
      (Dtype)1., output);
}

template <typename Dtype>
void BaseConvolutionLayer<Dtype>::backward_gpu_gemm(const Dtype* output,
    const Dtype* weights, Dtype* input) {
  Dtype* col_buff = col_buffer_.mutable_gpu_data();
  if (is_1x1_) {
    col_buff = input;
  }
  for (int g = 0; g < group_; ++g) {
    caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, kernel_dim_,
        conv_out_spatial_dim_, conv_out_channels_ / group_,
        (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g,
        (Dtype)0., col_buff + col_offset_ * g);
  }
  if (!is_1x1_) {
    conv_col2im_gpu(col_buff, input);
  }
}

template <typename Dtype>
void BaseConvolutionLayer<Dtype>::weight_gpu_gemm(const Dtype* input,
    const Dtype* output, Dtype* weights) {
  const Dtype* col_buff = input;
  if (!is_1x1_) {
    conv_im2col_gpu(input, col_buffer_.mutable_gpu_data());
    col_buff = col_buffer_.gpu_data();
  }
  for (int g = 0; g < group_; ++g) {
    caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, conv_out_channels_ / group_,
        kernel_dim_, conv_out_spatial_dim_,
        (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g,
        (Dtype)1., weights + weight_offset_ * g);
  }
}

template <typename Dtype>
void BaseConvolutionLayer<Dtype>::backward_gpu_bias(Dtype* bias,
    const Dtype* input) {
  caffe_gpu_gemv<Dtype>(CblasNoTrans, num_output_, out_spatial_dim_, 1.,
      input, bias_multiplier_.gpu_data(), 1., bias);
}

#endif  // !CPU_ONLY

INSTANTIATE_CLASS(BaseConvolutionLayer);

}  // namespace caffe




base_conv_layer.hpp

主要需要看懂这两个函数,另外还有conv_im2col_gpu、conv_col2im_gpu形式:

调用了im2col.cpp中定义的函数im2col_cpu、col2im_cpu等。


 inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) {
    if (!force_nd_im2col_ && num_spatial_axes_ == 2) {
      im2col_cpu(data, conv_in_channels_,
          conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2],
          kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1],
          pad_.cpu_data()[0], pad_.cpu_data()[1],
          stride_.cpu_data()[0], stride_.cpu_data()[1],
          dilation_.cpu_data()[0], dilation_.cpu_data()[1], col_buff);
    } else {
      im2col_nd_cpu(data, num_spatial_axes_, conv_input_shape_.cpu_data(),
          col_buffer_shape_.data(), kernel_shape_.cpu_data(),
          pad_.cpu_data(), stride_.cpu_data(), dilation_.cpu_data(), col_buff);
    }
  }
  inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) {
    if (!force_nd_im2col_ && num_spatial_axes_ == 2) {
      col2im_cpu(col_buff, conv_in_channels_,
          conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2],
          kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1],
          pad_.cpu_data()[0], pad_.cpu_data()[1],
          stride_.cpu_data()[0], stride_.cpu_data()[1],
          dilation_.cpu_data()[0], dilation_.cpu_data()[1], data);
    } else {
      col2im_nd_cpu(col_buff, num_spatial_axes_, conv_input_shape_.cpu_data(),
          col_buffer_shape_.data(), kernel_shape_.cpu_data(),
          pad_.cpu_data(), stride_.cpu_data(), dilation_.cpu_data(), data);
    }
  }

猜你喜欢

转载自blog.csdn.net/yskyskyer123/article/details/79040790