Caffe17天攻坚战之Caffe代码梳理及其数据结构(倒数第15天)

代码梳理

.
├── build -> .build_release   # 存放编译结果
├── cmake
│   ├── External
│   ├── Modules
│   └── Templates
├── data                    # 存放原始数据及其获取脚本
│   ├── cifar10
│   ├── ilsvrc12
│   └── mnist
├── distribute              # 编译后,用于迁移的发布包存放处
│   ├── bin
│   └── lib
├── docker                  # 便于迁移 
│   ├── cpu
│   └── gpu
├── docs
│   ├── images
│   ├── _layouts
│   ├── stylesheets
│   └── tutorial
│       ├── fig
│       └── layers
├── examples             # 简单例程
│   ├── cifar10
│   ├── cpp_classification
│   ├── feature_extraction
│   ├── finetune_flickr_style
│   ├── finetune_pascal_detection
│   ├── hdf5_classification
│   ├── imagenet
│   ├── images
│   ├── mnist
│   │   ├── mnist_test_lmdb
│   │   └── mnist_train_lmdb
│   ├── net_surgery
│   ├── pycaffe
│   │   └── layers
│   ├── siamese
│   └── web_demo
│       └── templates
├── include               #Caffe头文件集中营(重要)
│   └── caffe
│       ├── layers
│       ├── test
│       └── util
├── matlab
│   ├── +caffe
│   │   ├── imagenet
│   │   ├── private
│   │   └── +test
│   ├── demo
│   └── hdf5creation
├── models
│   ├── bvlc_alexnet
│   ├── bvlc_googlenet
│   ├── bvlc_reference_caffenet
│   ├── bvlc_reference_rcnn_ilsvrc13
│   └── finetune_flickr_style
├── python
│   └── caffe
│       ├── imagenet
│       ├── proto
│       ├── __pycache__
│       └── test
├── scripts
│   └── travis
├── src              # Caffe 源码 (重要)
│   ├── caffe
│   │   ├── layers
│   │   ├── proto  # proto描述文件,从这学习数据结构
│   │   ├── solvers
│   │   ├── test
│   │   │   └── test_data
│   │   └── util
│   └── gtest
└── tools          # 工具源码  (重要)
    └── extra


然后就是阅读源代码,bula,bula,bula~,enjoy!

数据结构

几大基本概念

  • blob:提供统一的存储器接口,持有一批图像或其他数据、权值、权重更新值,类似于Torch/Theano/TensorFlow中的Tensor;Caffe使用称为Blob的4维数组用于存储和交换数据。
    维数解释:Blob的四维分别表示为(width_,height_,channels,num_) ,num_在视频流技术中表示为第几帧。
    基本用法:Blob是一个模板类,需制定模板参数。

caffe.cpp相关代码阅读:


// Specifies the shape (dimensions) of a Blob.

 // 该结构是对Blob形状信息的描述
message BlobShape {
  repeated int64 dim = 1 [packed = true];
  //只包含类型值为int16的,表示Blob4个维度的大小。packed表示这些值是紧密排布的,没有空洞。
}
//该结构是对Blob在磁盘中序列化之后形态的描述.
message BlobProto {
  optional BlobShape shape = 7;
  //可选,包含一个BlobShape对象.
  repeated float data = 5 [packed = true];
  //包含若干浮点数,存储数据或权值,元素数目有shape或(n,c,h,w)决定,内部精密排布。
  repeated float diff = 6 [packed = true];
  //包含若干浮点数,存储增量信息,维度与data一样。
  repeated double double_data = 8 [packed = true];
  //与data并列,但模型为double.
  repeated double double_diff = 9 [packed = true];
  //与diff并列,但模型为double.
  // 4D dimensions -- deprecated.  Use "shape" instead.
  //以下为维度信息,新版本caffe推荐使用shape
  optional int32 num = 1 [default = 0];
  optional int32 channels = 2 [default = 0];
  optional int32 height = 3 [default = 0];
  optional int32 width = 4 [default = 0];
}

Blob作为一个模板类,封装了SyncedMemory类,作为基本计算单元服务Layer、Net、Slover等,源码阅读:

#ifndef CAFFE_BLOB_HPP_
#define CAFFE_BLOB_HPP_

#include <algorithm>
#include <string>
#include <vector>

#include "caffe/common.hpp"
#include "caffe/proto/caffe.pb.h" / /~由proto生成的头文件,
声明了BlobProto、 BlobShape等遵循caffe.proto协议的数据结构
#include "caffe/syncedmem.hpp"    //~CPU/GPU共享内存类, 用于数据同步



const int kMaxBlobAxes = 32;  //~Blob最大维数目

namespace caffe {

/**
 * @brief A wrapper around SyncedMemory holders serving as the basic
 *        computational unit through which Layer%s, Net%s, and Solver%s
 *        interact.
 *
 * TODO(dox): more thorough description.
 */
template <typename Dtype>
class Blob {  //~类申明
 public: // 默认构造函数
  Blob()
       : data_(), diff_(), count_(0), capacity_(0) {}
       
//~显示构造函数,避免隐式类型数据转换
  /// @brief Deprecated; use <code>Blob(const vector<int>& shape)</code>.
  explicit Blob(const int num, const int channels, const int height,
      const int width);
  explicit Blob(const vector<int>& shape);

  /// @brief Deprecated; use <code>Reshape(const vector<int>& shape)</code>.
  void Reshape(const int num, const int channels, const int height,
      const int width);
  /**
   * @brief Change the dimensions of the blob, allocating new memory if
   *        necessary.
   *
   * This function can be called both to create an initial allocation
   * of memory, and to adjust the dimensions of a top blob during Layer::Reshape
   * or Layer::Forward. When changing the size of blob, memory will only be
   * reallocated if sufficient memory does not already exist, and excess memory
   * will never be freed.
   *
   * Note that reshaping an input blob and immediately calling Net::Backward is
   * an error; either Net::Forward or Net::Reshape need to be called to
   * propagate the new input shape to higher layers.
   */
   //~变形函数,根据输入参数重新设置当前Blob形状,必要时从新分配内存
  void Reshape(const vector<int>& shape);
  void Reshape(const BlobShape& shape);
  void ReshapeLike(const Blob& other);
//~得到Blob形状字符串用于打印log,类似“Top shape: 100 1 28 28 (78400)”

  inline string shape_string() const {
    ostringstream stream;
    for (int i = 0; i < shape_.size(); ++i) {
      stream << shape_[i] << " ";
    }
    stream << "(" << count_ << ")";
    return stream.str();              //~ 返回Blob形状
  }
  inline const vector<int>& shape() const { return shape_; }                   //~返回某一维度的尺寸
  /**
   * @brief Returns the dimension of the index-th axis (or the negative index-th
   *        axis from the end, if index is negative).
   *
   * @param index the axis index, which may be negative as it will be
   *        "canonicalized" using CanonicalAxisIndex.
   *        Dies on out of range index.
   */
  inline int shape(int index) const {
    return shape_[CanonicalAxisIndex(index)];                 //~返回维度数目
  }
  inline int num_axes() const { return shape_.size(); }  //~返回Blob中元素总数
  inline int count() const { return count_; }                       //~返回Blob中某几维子集的总数

  /**
   * @brief Compute the volume of a slice; i.e., the product of dimensions
   *        among a range of axes.
   *
   * @param start_axis The first axis to include in the slice.
   *
   * @param end_axis The first axis to exclude from the slice.
   */
  inline int count(int start_axis, int end_axis) const {
    CHECK_LE(start_axis, end_axis);//~start_axis<=end_axis
    CHECK_GE(start_axis, 0);                            //~start_axis>=0
    CHECK_GE(end_axis, 0);                             //~end_axis>=0
    CHECK_LE(start_axis, num_axes());      //~start_axis<=总维度数
    CHECK_LE(end_axis, num_axes());       //~end_axis<=总维度数
    int count = 1;
    for (int i = start_axis; i < end_axis; ++i) {
      count *= shape(i);
    }
    return count;
  }//~从某一维度开始,元素总数
  /**
   * @brief Compute the volume of a slice spanning from a particular first
   *        axis to the final axis.
   *
   * @param start_axis The first axis to include in the slice.
   */
  inline int count(int start_axis) const {
    return count(start_axis, num_axes());
  }                                           //~转换[-N,N)->[0,N)

  /**
   * @brief Returns the 'canonical' version of a (usually) user-specified axis,
   *        allowing for negative indexing (e.g., -1 for the last axis).
   *
   * @param axis_index the axis index.
   *        If 0 <= index < num_axes(), return index.
   *        If -num_axes <= index <= -1, return (num_axes() - (-index)),
   *        e.g., the last axis index (num_axes() - 1) if index == -1,
   *        the second to last if index == -2, etc.
   *        Dies on out of range index.
   */
  inline int CanonicalAxisIndex(int axis_index) const {
    CHECK_GE(axis_index, -num_axes())          //axis_index >= -num_axes()
        << "axis " << axis_index << " out of range for " << num_axes()
        << "-D Blob with shape " << shape_string();
    CHECK_LT(axis_index, num_axes())            //保证axis_index<num_axes()
        << "axis " << axis_index << " out of range for " << num_axes()
        << "-D Blob with shape " << shape_string();
    if (axis_index < 0) {
      return axis_index + num_axes();
      //负索引表示从后向前访问,-1表示最后一个元素,普通索引值为N-1;同理,-2=>N-2,...
    }
    return axis_index;
  }
//获取形状某一维的尺寸
  /// @brief Deprecated legacy shape accessor num: use shape(0) instead.
  inline int num() const { return LegacyShape(0); }
  /// @brief Deprecated legacy shape accessor channels: use shape(1) instead.
  inline int channels() const { return LegacyShape(1); }
  /// @brief Deprecated legacy shape accessor height: use shape(2) instead.
  inline int height() const { return LegacyShape(2); }
  /// @brief Deprecated legacy shape accessor width: use shape(3) instead.
  inline int width() const { return LegacyShape(3); }
  inline int LegacyShape(int index) const {
    CHECK_LE(num_axes(), 4)
        << "Cannot use legacy accessors on Blobs with > 4 axes.";
    CHECK_LT(index, 4);
    CHECK_GE(index, -4);
    if (index >= num_axes() || index < -num_axes()) {
      // Axis is out of range, but still in [0, 3] (or [-4, -1] for reverse
      // indexing) -- this special case simulates the one-padding used to fill
      // extraneous axes of legacy blobs.
      return 1;
    }
    return shape(index);
  }
//计算偏移量
  inline int offset(const int n, const int c = 0, const int h = 0,
      const int w = 0) const {
    CHECK_GE(n, 0);
    CHECK_LE(n, num());
    CHECK_GE(channels(), 0);
    CHECK_LE(c, channels());
    CHECK_GE(height(), 0);
    CHECK_LE(h, height());
    CHECK_GE(width(), 0);
    CHECK_LE(w, width());
    return ((n * channels() + c) * height() + h) * width() + w;
  }

  inline int offset(const vector<int>& indices) const {
    CHECK_LE(indices.size(), num_axes());
    int offset = 0;
    for (int i = 0; i < num_axes(); ++i) {
      offset *= shape(i);
      if (indices.size() > i) {
        CHECK_GE(indices[i], 0);
        CHECK_LT(indices[i], shape(i));
        offset += indices[i];
      }
    }
    return offset;
  }
  /**
   * @brief Copy from a source Blob.
   *
   * @param source the Blob to copy from
   * @param copy_diff if false, copy the data; if true, copy the diff
   * @param reshape if false, require this Blob to be pre-shaped to the shape
   *        of other (and die otherwise); if true, Reshape this Blob to other's
   *        shape if necessary
   */
//按值拷贝Blob到当前的Blob
  void CopyFrom(const Blob<Dtype>& source, bool copy_diff = false,
      bool reshape = false);

  inline Dtype data_at(const int n, const int c, const int h,
      const int w) const {
    return cpu_data()[offset(n, c, h, w)];
  }

  inline Dtype diff_at(const int n, const int c, const int h,
      const int w) const {
    return cpu_diff()[offset(n, c, h, w)];
  }

  inline Dtype data_at(const vector<int>& index) const {
    return cpu_data()[offset(index)];
  }

  inline Dtype diff_at(const vector<int>& index) const {
    return cpu_diff()[offset(index)];
  }

  inline const shared_ptr<SyncedMemory>& data() const {
    CHECK(data_);
    return data_;
  }

  inline const shared_ptr<SyncedMemory>& diff() const {
    CHECK(diff_);
    return diff_;
  }
//只读访问cpu data
  const Dtype* cpu_data() const;
  //设置cpu data
  void set_cpu_data(Dtype* data);
  //只读访问gpu data
  const int* gpu_shape() const;
  const Dtype* gpu_data() const;
  void set_gpu_data(Dtype* data);
  //只读访问cpu diff
  const Dtype* cpu_diff() const;
  const Dtype* gpu_diff() const;
  //读写访问cpu data
  Dtype* mutable_cpu_data();
  Dtype* mutable_gpu_data();
  Dtype* mutable_cpu_diff();
  Dtype* mutable_gpu_diff();
  void Update();//Blob更新运算,可简单理解为data与diff的merge的过程
  void FromProto(const BlobProto& proto, bool reshape = true);//反序列化函数,从BlobProto中恢复一个Blob对象。
  void ToProto(BlobProto* proto, bool write_diff = false) const;//序列化函数,将内存中的Blob对象保存到BlobProto中。

  /// @brief Compute the sum of absolute values (L1 norm) of the data.
  Dtype asum_data() const;//计算data的L1范数
  /// @brief Compute the sum of absolute values (L1 norm) of the diff.
  Dtype asum_diff() const;
  /// @brief Compute the sum of squares (L2 norm squared) of the data.
  Dtype sumsq_data() const;//计算data的L2范数
  /// @brief Compute the sum of squares (L2 norm squared) of the diff.
  Dtype sumsq_diff() const;

  /// @brief Scale the blob data by a constant factor.
  void scale_data(Dtype scale_factor);//data乘上一个标量
  /// @brief Scale the blob diff by a constant factor.
  void scale_diff(Dtype scale_factor);

  /**
   * @brief Set the data_ shared_ptr to point to the SyncedMemory holding the
   *        data_ of Blob other -- useful in Layer%s which simply perform a copy
   *        in their Forward pass.
   *
   * This deallocates the SyncedMemory holding this Blob's data_, as
   * shared_ptr calls its destructor when reset with the "=" operator.
   */
  void ShareData(const Blob& other);//共享其他的Blob的data_
  /**
   * @brief Set the diff_ shared_ptr to point to the SyncedMemory holding the
   *        diff_ of Blob other -- useful in Layer%s which simply perform a copy
   *        in their Forward pass.
   *
   * This deallocates the SyncedMemory holding this Blob's diff_, as
   * shared_ptr calls its destructor when reset with the "=" operator.
   */
  void ShareDiff(const Blob& other);//共享其他的Blob的diff_

  bool ShapeEquals(const BlobProto& other);

 protected:
  shared_ptr<SyncedMemory> data_;//存放指向data的指针
  shared_ptr<SyncedMemory> diff_;
  shared_ptr<SyncedMemory> shape_data_;
  vector<int> shape_;//形状信息
  int count_;//存放有效元素数目信息
  int capacity_;//存放Blob容器的容量信息

  DISABLE_COPY_AND_ASSIGN(Blob);//禁止拷贝构造函数、赋值运算符重载
};  // class Blob

}  // namespace caffe

#endif  // CAFFE_BLOB_HPP_

猜你喜欢

转载自blog.csdn.net/qq_42910179/article/details/106885503