添加新Layer -caffe

1.RoiPoolingLayer 

2.SmoothL1LossLayer

caffe 的 Fast_Rcnn 实现, 里面包含 RoiPoolingLayer  和 SmoothL1LossLayer

https://github.com/rbgirshick/caffe-fast-rcnn/



想要在现有的caffe版本添加ROIPoolingLayer 和SmoothL1LossLayer,首先到这里https://github.com/rbgirshick/caffe-fast-rcnn可以找到他们的定义。caffe添加layer的详细方法在这里可以看到https://github.com/BVLC/caffe/wiki/,然后我们就可以开始啦!!

1.ROIPoolingLayer 和SmoothL1LossLayer的声明与实现:

声明:caffe_root/include/caffe/fast_rcnn_layers.hpp

实现:caffe_root/src/caffe/layers/roi_pooling_layer.cpp

    caffe_root/src/caffe/layers/roi_pooling_layer.cu

    caffe_root/src/caffe/layers/smooth_L1_loss_layer.cpp

    caffe_root/src/caffe/layers/smooth_L1_loss_layer.cu

2.ROIPoolingLayer 和SmoothL1LossLayer的测试:

caffe_root/src/caffe/test/test_roi_pooling_layer.cpp

caffe_root/src/caffe/test/test_smooth_L1_loss_layer.cpp

3.注册

caffe_root/src/caffe/proto/caffe.proto添加

optional ROIPoolingParameter roi_pooling_param = 8266711;


// Message that stores parameters used by ROIPoolingLayer
message ROIPoolingParameter {
// Pad, kernel size, and stride are all given as a single value for equal
// dimensions in height and width or as Y, X pairs.
  optional uint32 pooled_h = 1 [default = 0]; // The pooled output height
  optional uint32 pooled_w = 2 [default = 0]; // The pooled output width
  // Multiplicative spatial scale factor to translate ROI coords from their
  // input scale to the scale used when pooling
  optional float spatial_scale = 3 [default = 1];
}

optional SmoothL1LossParameter smooth_l1_loss_param = 8266712;

message SmoothL1LossParameter {
  // SmoothL1Loss(x) =
  //   0.5 * (sigma * x) ** 2    -- if x < 1.0 / sigma / sigma
  //   |x| - 0.5 / sigma / sigma -- otherwise
  optional float sigma = 1 [default = 1];
}

完成




caffe  添加 smooth

smooth_L1_loss_layer.hpp

#ifndef CAFFE_SMOOTH_L1_LOSS_LAYER_HPP_
#define CAFFE_SMOOTH_L1_LOSS_LAYER_HPP_


#include <vector>


#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"


#include "caffe/layers/loss_layer.hpp"


namespace caffe {




template <typename Dtype>
class SmoothL1LossLayer : public LossLayer<Dtype> {
public:
explicit SmoothL1LossLayer(const LayerParameter& param)
: LossLayer<Dtype>(param), diff_() {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);


virtual inline const char* type() const { return "SmoothL1Loss"; }


//virtual inline int ExactNumBottomBlobs() const { return -1; }
//virtual inline int MinBottomBlobs() const { return 2; }
//virtual inline int MaxBottomBlobs() const { return 4; }


/**
* Unlike most loss layers, in the SmoothL1LossLayer we can backpropagate
* to both inputs -- override to return true and always allow force_backward.
*/
virtual inline bool AllowForceBackward(const int bottom_index) const {
return true;
}


protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
//virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
// const vector<Blob<Dtype>*>& top);


virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
//virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
// const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);


Blob<Dtype> diff_;
Blob<Dtype> errors_;
Blob<Dtype> ones_;
bool has_weights_;
Dtype sigma2_;
};


}  // namespace caffe


#endif  // CAFFE_FAST_RCNN_LAYERS_HPP_


smooth_L1_loss_layer.cpp



#include <vector>


#include "caffe/layers/smooth_L1_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"


namespace caffe {


template <typename Dtype>
void SmoothL1LossLayer<Dtype>::LayerSetUp(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::LayerSetUp(bottom, top);
SmoothL1LossParameter loss_param = this->layer_param_.smooth_l1_loss_param();
sigma2_ = loss_param.sigma() * loss_param.sigma();
has_weights_ = (bottom.size() >= 3);
if (has_weights_) {
CHECK_EQ(bottom.size(), 4) << "If weights are used, must specify both "
"inside and outside weights";
}
}


template <typename Dtype>
void SmoothL1LossLayer<Dtype>::Reshape(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::Reshape(bottom, top);
CHECK_EQ(bottom[0]->channels(), bottom[1]->channels());
CHECK_EQ(bottom[0]->height(), bottom[1]->height());
CHECK_EQ(bottom[0]->width(), bottom[1]->width());
if (has_weights_) {
CHECK_EQ(bottom[0]->channels(), bottom[2]->channels());
CHECK_EQ(bottom[0]->height(), bottom[2]->height());
CHECK_EQ(bottom[0]->width(), bottom[2]->width());
CHECK_EQ(bottom[0]->channels(), bottom[3]->channels());
CHECK_EQ(bottom[0]->height(), bottom[3]->height());
CHECK_EQ(bottom[0]->width(), bottom[3]->width());
}
diff_.Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
errors_.Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
// vector of ones used to sum
ones_.Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
for (int i = 0; i < bottom[0]->count(); ++i) {
ones_.mutable_cpu_data()[i] = Dtype(1);
}
}


template <typename Dtype>
void SmoothL1LossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// cpu implementation
CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1))
<< "Inputs must have the same dimension.";
int count = bottom[0]->count();
caffe_sub(count,
bottom[0]->cpu_data(),
bottom[1]->cpu_data(),
diff_.mutable_cpu_data());


if (has_weights_) {
caffe_mul(count,
bottom[2]->cpu_data(),
diff_.cpu_data(),
diff_.mutable_cpu_data());
}
// f(x) = 0.5 * (sigma * x)^2          if |x| < 1 / sigma / sigma
//        |x| - 0.5 / sigma / sigma    otherwise
const Dtype* in = diff_.cpu_data();
Dtype* out = errors_.mutable_cpu_data();
for (int index = 0; index<count; ++index) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val < 1.0 / sigma2_) {
out[index] = 0.5 * val * val * sigma2_;
}
else {
out[index] = abs_val - 0.5 / sigma2_;
}
}


if (has_weights_) {
caffe_mul(count, bottom[3]->cpu_data(), out, errors_.mutable_cpu_data());
}


// compute loss
Dtype loss = caffe_cpu_dot(count, ones_.cpu_data(), errors_.cpu_data());
top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num();
// end cpu implementation
}


template <typename Dtype>
void SmoothL1LossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {


// cpu implementation
int count = diff_.count();
const Dtype* in = diff_.cpu_data();
Dtype* out = diff_.mutable_cpu_data();
for (int index = 0; index < count; index++) {
Dtype val = in[index];
Dtype abs_val = abs(val);
if (abs_val < 1.0 / sigma2_) {
out[index] = sigma2_ *  val;
}
else {
out[index] = (Dtype(0) < val) - (val < Dtype(0));
}
}


for (int i = 0; i<2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
caffe_cpu_axpby(
count,
alpha,
out,//diff_.cpu_data(), 
Dtype(0),
bottom[i]->mutable_cpu_diff());


if (has_weights_) {
caffe_mul(
count,
bottom[2]->cpu_data(),
bottom[i]->cpu_diff(),
bottom[i]->mutable_cpu_data());
caffe_mul(
count,
bottom[3]->cpu_data(),
bottom[i]->cpu_diff(),
bottom[i]->mutable_cpu_data());
}
}
}
// end cpu implementation
}


#ifdef CPU_ONLY
STUB_GPU(SmoothL1LossLayer);
#endif


INSTANTIATE_CLASS(SmoothL1LossLayer);
REGISTER_LAYER_CLASS(SmoothL1Loss);


}  // namespace caffe


注册:

注意  SmoothL1Loss 最好与 virtual inline const char* type() const { return "SmoothL1Loss"; } 的返回  保持一致

optional SmoothL1LossParameter smooth_l1_loss_param = 152;

message SmoothL1LossParameter {
  optional float sigma = 1 [default = 1];
}


猜你喜欢

转载自blog.csdn.net/u011808673/article/details/80758981