Caffe (1) Blob 层代码解读

Caffe 中所有的数据都是存放在Blob对象中,废话不多说,直接上代码,头文件

实际上blob是对SyncedMemory对象做了一次封装,意思是在内部,做了几个SyncedMemory类型变量指针,对于CPU和GPU内存管理最为核心的部分是在SyncedMemory对象中,后续将详细介绍这个部分。

(1)头文件

#ifndef CAFFE_BLOB_HPP_
#define CAFFE_BLOB_HPP_

#include <algorithm>
#include <string>
#include <vector>

#include "caffe/common.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/syncedmem.hpp"

const int kMaxBlobAxes = 32;
// 整个头文件定义了一个Blob类,类的名字首字母都是大写

namespace caffe {

/**
实际的内存空间在SyncedMemory对象中,该对象负责内存空间CPU,GPU位置的选择,
这里的blob.h在SyncedMemory对象基础上在进行了一次封装
 * @brief A wrapper around SyncedMemory holders 
serving as the basic computational unit through which Layer%s, Net%s, and Solver%s interact.
 
 * TODO(dox): more thorough description.
 */
template <typename Dtype>
class Blob {
 public:
  Blob()
       : data_(), diff_(), count_(0), capacity_(0) {}

  /// @brief Deprecated; use <code>Blob(const vector<int>& shape)</code>.
  // 显示构造函数,避免隐式数据类型转换
  explicit Blob(const int num, const int channels, const int height,const int width);
  explicit Blob(const vector<int>& shape);

  /// @brief Deprecated; use <code>Reshape(const vector<int>& shape)</code>.
  void Reshape(const int num, const int channels, const int height, const int width);
  /**
   * @brief Change the dimensions of the blob, allocating new memory if
   *        necessary. 在改变维度的同时,还需要分配所需的内存
   * Reshape 函数可以用来创建新数据,还可以用来改变数据的维度
   * This function can be called both to create an initial allocation
   * of memory, and to adjust the dimensions of a top blob during Layer::Reshape
   * or Layer::Forward. When changing the size of blob, memory will only be
   * reallocated if sufficient memory does not already exist, and excess memory
   * will never be freed.
   *
   * Note that reshaping an input blob and immediately calling Net::Backward is
   * an error; either Net::Forward or Net::Reshape need to be called to
   * propagate the new input shape to higher layers.
   */
  void Reshape(const vector<int>& shape);
  void Reshape(const BlobShape& shape);
  void ReshapeLike(const Blob& other);
  // 返回 shape的 string 类型
  inline string shape_string() const {
    ostringstream stream;
    for (int i = 0; i < shape_.size(); ++i) {
      stream << shape_[i] << " ";
    }
	// 这里如果shape[0]~shape[3]都没有值,那么count_为1,因为默认为1,也就是说blob正常情况下是一个张量,nxcxhxw,如果各个维度都没有值,那么为标量,只有一个值
	// 就是说对于blob来说,虽然在内存上 blob只有一个值,a[0][0][0][0] = 1 和 blob 为标量只有一个值,count_ 都为1,但是一个是张量,一个是标量
    stream << "(" << count_ << ")"; // num channels height width (count) 100 1 28 28 (78400), 
    return stream.str();
  }
  // 返回Blob对象的形状 vector引用 
  inline const vector<int>& shape() const { return shape_; }
  /**
   * @brief Returns the dimension of the index-th axis (or the negative index-th
   *        axis from the end, if index is negative).
   *
   * @param index the axis index, which may be negative as it will be
   *        "canonicalized" using CanonicalAxisIndex.
   *        Dies on out of range index.
            如果index是负数,那么调用CanonicalAxisIndex把index做一个变换
   */
  // 返回blob指定维度的大小信息
  inline int shape(int index) const {
    return shape_[CanonicalAxisIndex(index)];
  }
  // 返回blob轴的个数,为4
  inline int num_axes() const { return shape_.size(); }
  // 返回整个blob内有效元素的个数
  inline int count() const { return count_; }

  /**
   * @brief Compute the volume of a slice; i.e., the product of dimensions
   *        among a range of axes.
   *
   * @param start_axis The first axis to include in the slice.
   *
   * @param end_axis The first axis to exclude from the slice.
   */
  // 统计从start_axis开始到 end_axis结束的维度的大小乘积 - 也即这些维度内的元素总个数
  inline int count(int start_axis, int end_axis) const {
    CHECK_LE(start_axis, end_axis); // 这个star和end可以相等
    CHECK_GE(start_axis, 0);
    CHECK_GE(end_axis, 0);
    CHECK_LE(start_axis, num_axes());
    CHECK_LE(end_axis, num_axes());
	// 尼玛的,这个函数至少返回1,不会返回0,下面的循环不执行也返回1
    int count = 1;
	// 如果 start_axis = 0 end_axis = 0, 那么下面这句不会被执行,整个函数返回 count = 1
    for (int i = start_axis; i < end_axis; ++i) {
      count *= shape(i);
    }
    return count;
  }
  /**
   * @brief Compute the volume of a slice spanning from a particular first
   *        axis to the final axis.
   *
   * @param start_axis The first axis to include in the slice.
   */
  // 统计从start_axis开始到 结束的维度的大小乘积 - 也即这些维度内的元素总个数
  inline int count(int start_axis) const {
    return count(start_axis, num_axes());
  }

  /**
   * @brief Returns the 'canonical' version of a (usually) user-specified axis,
   *        allowing for negative indexing (e.g., -1 for the last axis).
   *
   * @param axis_index the axis index.
   *        If 0 <= index < num_axes(), return index.
   *        If -num_axes <= index <= -1, return (num_axes() - (-index)),
   *        e.g., the last axis index (num_axes() - 1) if index == -1,
   *        the second to last if index == -2, etc.
   *        Dies on out of range index.
   */
  // 将负数索引转换为实际的正数索引
  // 负数索引表示从后向前访问,-1表示最后一个元素,普通索引值为N-1; 同理 -2 为 N - 2, -3 为 N - 3
  inline int CanonicalAxisIndex(int axis_index) const {
    CHECK_GE(axis_index, -num_axes())
        << "axis " << axis_index << " out of range for " << num_axes()
        << "-D Blob with shape " << shape_string();
    CHECK_LT(axis_index, num_axes())
        << "axis " << axis_index << " out of range for " << num_axes()
        << "-D Blob with shape " << shape_string();
    if (axis_index < 0) {
      return axis_index + num_axes();
    }
    return axis_index;
  }
  // 返回blob的维度信息,这里的legacy shape不知道是什么意义

  /// @brief Deprecated legacy shape accessor num: use shape(0) instead.
  inline int num() const { return LegacyShape(0); }
  /// @brief Deprecated legacy shape accessor channels: use shape(1) instead.
  inline int channels() const { return LegacyShape(1); }
  /// @brief Deprecated legacy shape accessor height: use shape(2) instead.
  inline int height() const { return LegacyShape(2); }
  /// @brief Deprecated legacy shape accessor width: use shape(3) instead.
  inline int width() const { return LegacyShape(3); }
  inline int LegacyShape(int index) const {
    CHECK_LE(num_axes(), 4)
        << "Cannot use legacy accessors on Blobs with > 4 axes.";
    CHECK_LT(index, 4);
    CHECK_GE(index, -4);
    if (index >= num_axes() || index < -num_axes()) {
      // Axis is out of range, but still in [0, 3] (or [-4, -1] for reverse
      // indexing) -- this special case simulates the one-padding used to fill
      // extraneous axes of legacy blobs.
      return 1;
    }
    return shape(index);
  }
  // 计算偏移,输入是(Num, Chanel, Height, Width) 返回的结果是 w + Width*(h + Height*(c + n*Num))
  inline int offset(const int n, const int c = 0, const int h = 0,const int w = 0) const {
    CHECK_GE(n, 0);
    CHECK_LE(n, num());
    CHECK_GE(channels(), 0);
    CHECK_LE(c, channels());
    CHECK_GE(height(), 0);
    CHECK_LE(h, height());
    CHECK_GE(width(), 0);
    CHECK_LE(w, width());
    return ((n * channels() + c) * height() + h) * width() + w;
	// 理解这个多维数组的索引机制 其中
	// channels() =3 height()=固定数 width()= 固定数
	// n, c, h, w 为变动数字,分别为在各个维度上的索引
	// 通常简单的做法是做一个四维数组取值时 a[n][c][h][w]即可,但是这个函数offset貌似是将内存视为1维,然后计算当前索引对于1维数据的偏置
  }
  /*
  bottom_data += bottom[0]->offset(0, 1);
  这里bottom[0]->offset(0, 1),是调用的上面的函数
  offset(0, 1, 0, 0)
  w + Width*(h + Height*(c + n*Num)) = 0 + Width*(0 + Height*(1 + 0*Num)) = Width*Height, 实际上是返回的一个Channel维度上,一个变化,对于的数据空间变化数据
  */
  inline int offset(const vector<int>& indices) const {
    CHECK_LE(indices.size(), num_axes());
    int offset = 0;
    for (int i = 0; i < num_axes(); ++i) {
      offset *= shape(i);
      if (indices.size() > i) {
        CHECK_GE(indices[i], 0);
        CHECK_LT(indices[i], shape(i));
        offset += indices[i];
      }
    }
    return offset;
  }


  /**
   * @brief Copy from a source Blob.
   *
   * @param source the Blob to copy from
   * @param copy_diff if false, copy the data; if true, copy the diff
   * @param reshape if false, require this Blob to be pre-shaped to the shape
   *        of other (and die otherwise); if true, Reshape this Blob to other's
   *        shape if necessary
   */

  // 按值拷贝source blob 到当前的blob
  void CopyFrom(const Blob<Dtype>& source, bool copy_diff = false, bool reshape = false);
  
  // getter and setter, 取出在(n, c, h, w)位置的值
  // 取(n, c, h, w)位置的cpu_data()所指向的值,从而可以设置或者读取
  inline Dtype data_at(const int n, const int c, const int h, const int w) const {
    return cpu_data()[offset(n, c, h, w)];
  }
  // 取(n, c, h, w)位置的cpu_diff(),从而可以设置或者读取
  inline Dtype diff_at(const int n, const int c, const int h,const int w) const {
    return cpu_diff()[offset(n, c, h, w)];
  }

  inline Dtype data_at(const vector<int>& index) const {
    return cpu_data()[offset(index)];
  }

  inline Dtype diff_at(const vector<int>& index) const {
    return cpu_diff()[offset(index)];
  }
  // 返回SyncedMemory对象 data_ 引用
  inline const shared_ptr<SyncedMemory>& data() const {
    CHECK(data_);
    return data_;
  }
  // 返回SyncedMemory对象 diff_ 引用
  inline const shared_ptr<SyncedMemory>& diff() const {
    CHECK(diff_);
    return diff_;
  }
  // 下面这些函数,都是返回的数据指针值,即数据所在地址
  // 实际的指针是在SyncedMemory对象中
  // 只读访问 cpu data
  const Dtype* cpu_data() const;
  // 设置cpu data
  void set_cpu_data(Dtype* data);
  const int* gpu_shape() const;
  // 只读访问 cpu data
  const Dtype* gpu_data() const;
  const Dtype* cpu_diff() const;
  const Dtype* gpu_diff() const;
  // 读写访问 cpu data 和 gpu data
  Dtype* mutable_cpu_data(); 
  Dtype* mutable_gpu_data();
  Dtype* mutable_cpu_diff();
  Dtype* mutable_gpu_diff();
  // data = data - diff
  void Update();
  

  // 反序列化函数,从BlobProto中恢复一个Blob对象
  void FromProto(const BlobProto& proto, bool reshape = true);
  // 序列化函数,将内存中的Blob对象保存到BlobProto中
  void ToProto(BlobProto* proto, bool write_diff = false) const;

  /// @brief Compute the sum of absolute values (L1 norm) of the data. 计算data的L1范数
  Dtype asum_data() const;
  /// @brief Compute the sum of absolute values (L1 norm) of the diff. 计算diff的L1范数
  Dtype asum_diff() const;
  /// @brief Compute the sum of squares (L2 norm squared) of the data. 计算data的L2范数
  Dtype sumsq_data() const;
  /// @brief Compute the sum of squares (L2 norm squared) of the diff. 计算diff的L2范数
  Dtype sumsq_diff() const;

  /// @brief Scale the blob data by a constant factor.
  // data 数据乘以一个标量
  void scale_data(Dtype scale_factor);
  /// @brief Scale the blob diff by a constant factor.
  // diff 数据乘以一个标量
  void scale_diff(Dtype scale_factor);

  /**
   * @brief Set the data_ shared_ptr to point to the SyncedMemory holding the
   *        data_ of Blob other -- useful in Layer%s which simply perform a copy
   *        in their Forward pass.
   *
   * This deallocates the SyncedMemory holding this Blob's data_, as
   * shared_ptr calls its destructor when reset with the "=" operator.
   */
  // 共享另外一个Blob的 Data_
  void ShareData(const Blob& other);
  /**
   * @brief Set the diff_ shared_ptr to point to the SyncedMemory holding the
   *        diff_ of Blob other -- useful in Layer%s which simply perform a copy
   *        in their Forward pass.
   *
   * This deallocates the SyncedMemory holding this Blob's diff_, as
   * shared_ptr calls its destructor when reset with the "=" operator.
   */
  // 共享另外一个Blob的 diff_
  void ShareDiff(const Blob& other);

  bool ShapeEquals(const BlobProto& other);

 protected:
// data_ 为一个SyncedMemory对象指针,
// 实际存放数据空间为对象中的 cpu_ptr_ 指针地址值 和 gpu_ptr_
  shared_ptr<SyncedMemory> data_; 
  shared_ptr<SyncedMemory> diff_; // 
  shared_ptr<SyncedMemory> shape_data_; //  
  vector<int> shape_; // 存放shape 信息的vector
  // 这里 shape_data_ 与 shape_ 存放的数据一样
  int count_;         // 存放 有效元素数目的变量
  int capacity_;      // 存放 Blob 容器的容量信息

  //DISABLE_COPY_AND_ASSIGN(Blob); // 禁止拷贝构造很熟,赋值运算符重载
};  // class Blob

}  // namespace caffe

#endif  // CAFFE_BLOB_HPP_



/**

message BlobShape {
repeated int64 dim = 1 [packed = true];
}

message BlobProto{
optional BlobShape shape = 7;
repeated float data = 5[packed = true];
repeated float diff = 6[packed = true];
repeated double double_data = 8[packed = true];
repeated double double_diff = 9[packed = true];

// 4D dimensions -- deprecated.  Use "shape" instead.
optional int32 num = 1[default = 0];
optional int32 channels = 2[default = 0];
optional int32 height = 3[default = 0];
optional int32 width = 4[default = 0];}

*/

(2) cpp文件

#include <climits>
#include <vector>

#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"

/*----------------------------------------------------------------------
 
 (1) Blob.cpp 与 syncedmen.cpp 文件关系
 Blob.cpp 文件一定要与syncedmen.cpp文件结合起来一起看
 Blob是比syncedmen大的概念
 Blob内包含了 <SyncedMemory> data_, diff_, shape_data_ 三个 SyncedMemory 对象
 而每个对象其实是代表了一段内存或者数据区域,用SyncedMemory来控制数据在CPU还是GPU上
 SyncedMemory 对象中包含了cpu_ptr_,gpu_ptr_ 具体的CPU和GPU数据指针

 (2) Blob 对象与BlobProto对象,Datum 对象 数据位数关系
 
 --1:对于Blob 对象的本身可以存储任意类型的数据 (new SyncedMemory(capacity_ * sizeof(Dtype))
 所以Blob对象的FromProto 函数可以接受任何类型数据Dtype
 Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape)

 Blob对象的ToProto 函数只接受double 和 float
 void Blob<double>::ToProto(BlobProto* proto, bool write_diff)
 void Blob<float>::ToProto(BlobProto* proto, bool write_diff)


 --2:对于BlobProto中只能有float 和 double类型数据
 repeated float data = 5[packed = true];
 repeated float diff = 6[packed = true];
 repeated double double_data = 8[packed = true];
 repeated double double_diff = 9[packed = true];

 --3:对于Datum对象,主要用来读一张图像的数据和标记,只有float和byte类型
 optional bytes data = 4;
 optional int32 label = 5;
 repeated float float_data = 6;
 所有通过Datum对象,读入数据以后,需要根据读入是float还是byte类型,写入到BlobProto里面


 Blob内的 count_值 = (各个维度的乘积), 那么data_, diff_ 这2个数据集和具有同样的count_值

 两个类最直接的结合点是: 
 1: data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
 2: diff_->cpu_data()

 有很多关键函数是定义在Blob.h文件中, 如果要访问具体位置的数据,方法是 cpu_data()[offset(n, c, h, w)];
-*/

namespace caffe {

// 只针对4个维度的Blob,专门针对图像应用
template <typename Dtype>
void Blob<Dtype>::Reshape(const int num, const int channels, const int height,const int width) {
  vector<int> shape(4);
  shape[0] = num;
  shape[1] = channels;
  shape[2] = height;
  shape[3] = width;
  Reshape(shape);
}

// 真正的变维函数
// 这个Reshape是Blob初始化的核心函数,内存相关的部分都在这里
// 针对任意维度数据,4个维度Blob是一种特殊情况
template <typename Dtype>
void Blob<Dtype>::Reshape(const vector<int>& shape) 
// shape 是传入参数 为一个int类型的 vector
// shape_ 是当前Blob对象的成员,也是int类型的vector
// shape_data_ 是当前Blob对象的成员,是SyncedMemory对象指针
{
  CHECK_LE(shape.size(), kMaxBlobAxes);

  count_ = 1;// 用于计算元素总数 = num * channels * height * width, 这里上来就把这个值直接设置成1,表示blob至少是一个标量
  shape_.resize(shape.size()); // 成员变量的维度也被重置

  // 搞不懂为什么这里的shape本身就很小,为什么还要弄一个SyncedMemory对象 对于图像 shape.size()应该为四, 这里的shape_data_就存放了4个整形数据,可能是怕数据维数太大,非图像数据?
  // 这个shape本身是一维数据,存放了各个维度的尺寸大小值,并不存放各个维度的具体数据值
  // 而且这回shape_data_与shape_两个数组,存的内容一样。唯一的区别是shape_data_可以定位到GPU上
  if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) {
	 // 如果当前Blob对象的 shape_data_值为空,或者 shape_data_所指向空间的大小小于所需要空间的大小时
    shape_data_.reset(new SyncedMemory(shape.size() * sizeof(int)));
  }
  // 获得形状信息数据地址指针
  int* shape_data = static_cast<int*>(shape_data_->mutable_cpu_data());
  // 多所有axes进行遍历,在遍历的时候累成,获得需要总的数据大小
  for (int i = 0; i < shape.size(); ++i) {

    CHECK_GE(shape[i], 0);// 保证每个axes尺寸都大于等于0

    if (count_ != 0) { // 保证count_不溢出
      CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX";  // 这里count_ 初始值为1// 当前axei的数据尺寸,要小于 INT_MAX / count_, 
    }
    count_ *= shape[i]; // count_ 累乘 在图像里面相当于总的数据量是 chanel数 x height数 x width数
    // 这两个值一样
	shape_[i] = shape[i]; // 对Blob对象的shape_成员赋值
    shape_data[i] = shape[i]; // Blob对象中 shape_data_指针所指地址赋值 
  } // for (int i = 0; i < shape.size(); ++i) {

  // capacity_ 在Blob对象创建时就已经初始化为0了
  if (count_ > capacity_) { //如果新的count_大于当前已分配空间容量,扩容,重新分配data_和diff_的空间
    capacity_ = count_; // 
    data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); // 这里reset是对这个指针重新赋值,可以发现SyncedMemory的数据空间大小不是随便设置,而是需要多少就严格申请多少
    diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); // 这里发现,如果Blob是float类型的数据,那么这里就申请float型大小的空间,double也一样,对于char呢?
  }
}

template <typename Dtype>
void Blob<Dtype>::Reshape(const BlobShape& shape) {
  CHECK_LE(shape.dim_size(), kMaxBlobAxes);
  vector<int> shape_vec(shape.dim_size());
  // 将数据的各个维度尺寸信息读入shape_vec
  for (int i = 0; i < shape.dim_size(); ++i) {
    shape_vec[i] = shape.dim(i); 
  }
  Reshape(shape_vec);
}

template <typename Dtype>
void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) {
  Reshape(other.shape());
}

// 在调用Reshape之前,必须初始化capacity_,否则会导致不可 预知的结果
template <typename Dtype>
Blob<Dtype>::Blob(const int num, const int channels, const int height, const int width)
  // capacity_ must be initialized before calling Reshape
  : capacity_(0) {
  Reshape(num, channels, height, width);
}

template <typename Dtype>
Blob<Dtype>::Blob(const vector<int>& shape)
  // capacity_ must be initialized before calling Reshape
  : capacity_(0) {
  Reshape(shape);
}

template <typename Dtype>
const int* Blob<Dtype>::gpu_shape() const {
  CHECK(shape_data_);
  return (const int*)shape_data_->gpu_data();
}

// 只读获得cpu data指针
template <typename Dtype>
const Dtype* Blob<Dtype>::cpu_data() const {
  CHECK(data_);
  return (const Dtype*)data_->cpu_data(); // 返回data_对象的 cpu指针值
}

// 修改cpu data指针
template <typename Dtype>
void Blob<Dtype>::set_cpu_data(Dtype* data) {
  CHECK(data);
  data_->set_cpu_data(data);  //返回data_对象的 cpu指针值
}

// 只读获得gpu data指针
template <typename Dtype>
const Dtype* Blob<Dtype>::gpu_data() const {
  CHECK(data_);
  return (const Dtype*)data_->gpu_data(); // 返回data_对象的 gpu指针值
}

// 只读获得cpu diff指针
template <typename Dtype>
const Dtype* Blob<Dtype>::cpu_diff() const {
  CHECK(diff_);
  return (const Dtype*)diff_->cpu_data(); // 返回diff_对象的 cpu指针值
}
// 只读获得gpu diff指针
template <typename Dtype>
const Dtype* Blob<Dtype>::gpu_diff() const {
  CHECK(diff_);
  return (const Dtype*)diff_->gpu_data();
}

// static_cast < type-id > ( expression ) 该运算符把expression转换为type-id类型,但没有运行时类型检查来保证转换的安全性。
// 读写访问cpu data 指针
// 这里在做指针类型的转换,本来是void类型的指针,转成特定类型Dtype类型的指针

template <typename Dtype>
Dtype* Blob<Dtype>::mutable_cpu_data() {
  CHECK(data_);
  return static_cast<Dtype*>(data_->mutable_cpu_data());
}

template <typename Dtype>
Dtype* Blob<Dtype>::mutable_gpu_data() {
  CHECK(data_);
  return static_cast<Dtype*>(data_->mutable_gpu_data());
}

// 读写访问cpu diff 指针
template <typename Dtype>
Dtype* Blob<Dtype>::mutable_cpu_diff() {
  CHECK(diff_);
  return static_cast<Dtype*>(diff_->mutable_cpu_data());
}

// 读写访问gpu diff 指针
template <typename Dtype>
Dtype* Blob<Dtype>::mutable_gpu_diff() {
  CHECK(diff_);
  return static_cast<Dtype*>(diff_->mutable_gpu_data());
}

// 共享另外一个Blob的data指针
template <typename Dtype>
void Blob<Dtype>::ShareData(const Blob& other) {
  CHECK_EQ(count_, other.count());
  data_ = other.data();
}

// 共享另外一个Blob的diff指针,
template <typename Dtype>
void Blob<Dtype>::ShareDiff(const Blob& other) {
  CHECK_EQ(count_, other.count());
  diff_ = other.diff();//智能指针直接赋值--这里需要查看
}

//Update() 函数用于网络参数Blob的更新,其中int 和 unsigned int 类型处理并未实现,因为Update函数是用来更新参数的
// The "update" method is used for parameter blobs in a Net, which are stored
// as Blob<float> or Blob<double> -- hence we do not define it for
// Blob<int> or Blob<unsigned int>.
template <> void Blob<unsigned int>::Update() { NOT_IMPLEMENTED; }
template <> void Blob<int>::Update() { NOT_IMPLEMENTED; }

template <typename Dtype>
void Blob<Dtype>::Update() {
  // We will perform update based on where the data is located.
  // data 在哪里,就在哪里更新

  switch (data_->head()) {
  case SyncedMemory::HEAD_AT_CPU: // data 在 CPU端
    // perform computation on CPU // data_->mutable_cpu_data() = -1 * diff_->cpu_data() + data_->mutable_cpu_data()
    caffe_axpy<Dtype>(count_, Dtype(-1), static_cast<const Dtype*>(diff_->cpu_data()), static_cast<Dtype*>(data_->mutable_cpu_data()));
    break;
  case SyncedMemory::HEAD_AT_GPU: // data 位于 GPU端,或者CPU/GPU 已同步
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
    // perform computation on GPU
    caffe_gpu_axpy<Dtype>(count_, Dtype(-1),
        static_cast<const Dtype*>(diff_->gpu_data()),
        static_cast<Dtype*>(data_->mutable_gpu_data()));
#else
    NO_GPU;
#endif
    break;
  default:
    LOG(FATAL) << "Syncedmem not initialized.";
  }
}

template <> unsigned int Blob<unsigned int>::asum_data() const {
  NOT_IMPLEMENTED;
  return 0;
}

template <> int Blob<int>::asum_data() const {
  NOT_IMPLEMENTED;
  return 0;
}

// 计算 data_ 的 L1 范数
template <typename Dtype>
Dtype Blob<Dtype>::asum_data() const {
  if (!data_) { return 0; }
  switch (data_->head()) {
  case SyncedMemory::HEAD_AT_CPU:
    return caffe_cpu_asum(count_, cpu_data()); // 执行 CPU上的 asum计算
  case SyncedMemory::HEAD_AT_GPU:
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
  {
    Dtype asum;
    caffe_gpu_asum(count_, gpu_data(), &asum); // 执行 GPU上的 asum计算
    return asum;
  }
#else
    NO_GPU;
#endif
  case SyncedMemory::UNINITIALIZED:
    return 0;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
  }
  return 0;
}

template <> unsigned int Blob<unsigned int>::asum_diff() const {
  NOT_IMPLEMENTED;
  return 0;
}

template <> int Blob<int>::asum_diff() const {
  NOT_IMPLEMENTED;
  return 0;
}

// 计算 diff_ 的 L1 范数
template <typename Dtype>
Dtype Blob<Dtype>::asum_diff() const {
  if (!diff_) { return 0; }
  switch (diff_->head()) {
  case SyncedMemory::HEAD_AT_CPU:
    return caffe_cpu_asum(count_, cpu_diff());
  case SyncedMemory::HEAD_AT_GPU:
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
  {
    Dtype asum;
    caffe_gpu_asum(count_, gpu_diff(), &asum);
    return asum;
  }
#else
    NO_GPU;
#endif
  case SyncedMemory::UNINITIALIZED:
    return 0;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
  }
  return 0;
}

template <> unsigned int Blob<unsigned int>::sumsq_data() const {
  NOT_IMPLEMENTED;
  return 0;
}

template <> int Blob<int>::sumsq_data() const {
  NOT_IMPLEMENTED;
  return 0;
}

// 计算 data_ 的 L2 范数
template <typename Dtype>
Dtype Blob<Dtype>::sumsq_data() const {
  Dtype sumsq;
  const Dtype* data;
  if (!data_) { return 0; }
  switch (data_->head()) {
  case SyncedMemory::HEAD_AT_CPU:
    data = cpu_data();
    sumsq = caffe_cpu_dot(count_, data, data); // 执行 CPU上的 dot 计算
    break;
  case SyncedMemory::HEAD_AT_GPU:
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
    data = gpu_data();
    caffe_gpu_dot(count_, data, data, &sumsq); // 执行 GPU上的 dot 计算
#else
    NO_GPU;
#endif
    break;
  case SyncedMemory::UNINITIALIZED:
    return 0;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
  }
  return sumsq;
}

template <> unsigned int Blob<unsigned int>::sumsq_diff() const {
  NOT_IMPLEMENTED;
  return 0;
}

template <> int Blob<int>::sumsq_diff() const {
  NOT_IMPLEMENTED;
  return 0;
}

// 计算 diff_ 的 L2 范数
template <typename Dtype>
Dtype Blob<Dtype>::sumsq_diff() const {
  Dtype sumsq;
  const Dtype* diff;
  if (!diff_) { return 0; }
  switch (diff_->head()) {
  case SyncedMemory::HEAD_AT_CPU:
    diff = cpu_diff();
    sumsq = caffe_cpu_dot(count_, diff, diff);
    break;
  case SyncedMemory::HEAD_AT_GPU:
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
    diff = gpu_diff();
    caffe_gpu_dot(count_, diff, diff, &sumsq);
    break;
#else
    NO_GPU;
#endif
  case SyncedMemory::UNINITIALIZED:
    return 0;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
  }
  return sumsq;
}

template <> void Blob<unsigned int>::scale_data(unsigned int scale_factor) {
  NOT_IMPLEMENTED;
}

template <> void Blob<int>::scale_data(int scale_factor) {
  NOT_IMPLEMENTED;
}
// 对 data_ 进行幅度缩放
template <typename Dtype>
void Blob<Dtype>::scale_data(Dtype scale_factor) {
  Dtype* data;
  if (!data_) { return; }
  switch (data_->head()) {
  case SyncedMemory::HEAD_AT_CPU:
    data = mutable_cpu_data();
    caffe_scal(count_, scale_factor, data);
    return;
  case SyncedMemory::HEAD_AT_GPU:
  //由此可见在数据是同步的情况下,是只需要计算GPU的部分,对于CPU的部分,是不用管的
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
    data = mutable_gpu_data();
    caffe_gpu_scal(count_, scale_factor, data);
    return;
#else
    NO_GPU;
#endif
  case SyncedMemory::UNINITIALIZED:
    return;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
  }
}

template <> void Blob<unsigned int>::scale_diff(unsigned int scale_factor) {
  NOT_IMPLEMENTED;
}

template <> void Blob<int>::scale_diff(int scale_factor) {
  NOT_IMPLEMENTED;
}

// 对 diff_ 进行幅度缩放
template <typename Dtype>
void Blob<Dtype>::scale_diff(Dtype scale_factor) {
  Dtype* diff;
  if (!diff_) { return; }
  switch (diff_->head()) {
  case SyncedMemory::HEAD_AT_CPU:
    diff = mutable_cpu_diff();
    caffe_scal(count_, scale_factor, diff);
    return;
  case SyncedMemory::HEAD_AT_GPU:
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
    diff = mutable_gpu_diff();
    caffe_gpu_scal(count_, scale_factor, diff);
    return;
#else
    NO_GPU;
#endif
  case SyncedMemory::UNINITIALIZED:
    return;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
  }
}

// 判断形状是否相同
template <typename Dtype>
bool Blob<Dtype>::ShapeEquals(const BlobProto& other) {
  if (other.has_num() || other.has_channels() || other.has_height() || other.has_width()) {
    // Using deprecated 4D Blob dimensions --
    // shape is (num, channels, height, width).
    // Note: we do not use the normal Blob::num(), Blob::channels(), etc.
    // methods as these index from the beginning of the blob shape, where legacy
    // parameter blobs were indexed from the end of the blob shape (e.g., bias
    // Blob shape (1 x 1 x 1 x N), IP layer weight Blob shape (1 x 1 x M x N)).
	// 输入的维度若使用了过时的维度信息(num, channels, height, width),则需要转换为新的vector参数,代码使用了C++中的 懒 逻辑
    return shape_.size() <= 4 &&
           LegacyShape(-4) == other.num() &&
           LegacyShape(-3) == other.channels() &&
           LegacyShape(-2) == other.height() &&
           LegacyShape(-1) == other.width();
  }
  // 创建一个新的shape_数据,然后直接对比
  vector<int> other_shape(other.shape().dim_size());
  for (int i = 0; i < other.shape().dim_size(); ++i) {
    other_shape[i] = other.shape().dim(i);
  }
  return shape_ == other_shape;
}

// 从另外一个Blob对象拷贝data (可选diff),必要时进行变维
template <typename Dtype>
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
  if (source.count() != count_ || source.shape() != shape_) {
    if (reshape) {
      ReshapeLike(source); // 要求变维,就照做
    } else { // 不然因为两个Blob的形状不同,硬要拷贝,报错
      LOG(FATAL) << "Trying to copy blobs of different sizes.";
    }
  }
  // 拷贝数据
  switch (Caffe::mode()) {
  case Caffe::GPU: // GPU模式
    if (copy_diff) {
      caffe_copy(count_, source.gpu_diff(), static_cast<Dtype*>(diff_->mutable_gpu_data())); // diff - > diff
    } else {
      caffe_copy(count_, source.gpu_data(), static_cast<Dtype*>(data_->mutable_gpu_data())); // data -> data
    }
    break;
  case Caffe::CPU: // CPU模式
    if (copy_diff) {
      caffe_copy(count_, source.cpu_diff(), static_cast<Dtype*>(diff_->mutable_cpu_data()));
    } else {
      caffe_copy(count_, source.cpu_data(), static_cast<Dtype*>(data_->mutable_cpu_data()));
    }
    break;
  default:
    LOG(FATAL) << "Unknown caffe mode.";
  }
}

// 从BlobProto 中加载一个Blob, 适用于从磁盘载入之前导出的Blob
template <typename Dtype>
void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) {
    // 从BlobProto对象中获取所需各个维度信息
	if (reshape) { // 如果reshape flage = true
    vector<int> shape;
    if (proto.has_num() || proto.has_channels() || // 旧版的维度信息,只能有4个维度的固定输入,现在不推荐这样做
        proto.has_height() || proto.has_width()) {
      // Using deprecated 4D Blob dimensions -- deprecated// 不赞成的
      // shape is (num, channels, height, width).
	  // 过时的维度信息
      shape.resize(4);
      shape[0] = proto.num();
      shape[1] = proto.channels();
      shape[2] = proto.height();
      shape[3] = proto.width();
    } else {
      shape.resize(proto.shape().dim_size());
      for (int i = 0; i < proto.shape().dim_size(); ++i) {
        shape[i] = proto.shape().dim(i);
      }
    }
    Reshape(shape); // Blob 按照维度信息进行变换
  } else {
    CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)";
  }
  // 这里 data_和 diff_具有同样的数据空间大小
  Dtype* data_vec = mutable_cpu_data();
  if (proto.double_data_size() > 0) { // 如果之前保存的是 double 类型的data // 是不是proto内只要是repeated的成员,这里就能直接用double_data_size(),固定格式去套
    CHECK_EQ(count_, proto.double_data_size()); // 确定事先申请的当前Blob对象数据总空间总大小是
    for (int i = 0; i < count_; ++i) {
      data_vec[i] = proto.double_data(i); // 加载double data
    }
  } else {
    CHECK_EQ(count_, proto.data_size());
    for (int i = 0; i < count_; ++i) { // 否则加载float data
      data_vec[i] = proto.data(i);
    }
  }
  if (proto.double_diff_size() > 0) {  // 如果之前保存的是 double 类型的diff
    CHECK_EQ(count_, proto.double_diff_size());
    Dtype* diff_vec = mutable_cpu_diff();
    for (int i = 0; i < count_; ++i) {
      diff_vec[i] = proto.double_diff(i); // 加载double diff
    }
  } else if (proto.diff_size() > 0) {
    CHECK_EQ(count_, proto.diff_size());
    Dtype* diff_vec = mutable_cpu_diff();
    for (int i = 0; i < count_; ++i) {  // 否则加载float diff
      diff_vec[i] = proto.diff(i);
    }
  }
}

// 将Blob 中的 data(可选 diff)导出到BlobProto的结构体中,便于存储到磁盘文件中
template <>
void Blob<double>::ToProto(BlobProto* proto, bool write_diff) const {
  proto->clear_shape(); //重置proto的维度,保证 与Blob相同,
  for (int i = 0; i < shape_.size(); ++i) {
    proto->mutable_shape()->add_dim(shape_[i]);
  }
  proto->clear_double_data(); // 清除 data
  proto->clear_double_diff(); // 清除 diff
  const double* data_vec = cpu_data(); // 将data 导出到 proto
  for (int i = 0; i < count_; ++i) {
    proto->add_double_data(data_vec[i]);
  }
  if (write_diff) {    // 若有write_diff的需求 
    const double* diff_vec = cpu_diff(); // 将diff导出到proto
    for (int i = 0; i < count_; ++i) {
      proto->add_double_diff(diff_vec[i]);
    }
  }
}

template <>
void Blob<float>::ToProto(BlobProto* proto, bool write_diff) const {
  proto->clear_shape();
  for (int i = 0; i < shape_.size(); ++i) {
    proto->mutable_shape()->add_dim(shape_[i]);
  }
  proto->clear_data();
  proto->clear_diff();
  const float* data_vec = cpu_data();
  for (int i = 0; i < count_; ++i) {
    proto->add_data(data_vec[i]);
  }
  if (write_diff) {
    const float* diff_vec = cpu_diff();
    for (int i = 0; i < count_; ++i) {
      proto->add_diff(diff_vec[i]);
    }
  }
}

INSTANTIATE_CLASS(Blob);
template class Blob<int>;
template class Blob<unsigned int>;

}  // namespace caffe

/**

message BlobShape {
repeated int64 dim = 1 [packed = true];
}

message BlobProto{
optional BlobShape shape = 7; // 是否是所有optional的项,往外输出数据都是 proto->mutable_shape()->add_dim(shape_[i]);
repeated float data = 5[packed = true];
repeated float diff = 6[packed = true];
repeated double double_data = 8[packed = true];
repeated double double_diff = 9[packed = true];

// 4D dimensions -- deprecated.  Use "shape" instead. 下面这种做法现在不推荐了
optional int32 num = 1[default = 0];
optional int32 channels = 2[default = 0];
optional int32 height = 3[default = 0];
optional int32 width = 4[default = 0];}


message Datum {
optional int32 channels = 1;
optional int32 height = 2;
optional int32 width = 3;
// the actual image data, in bytes
optional bytes data = 4;
optional int32 label = 5;
// Optionally, the datum could also hold float data.
repeated float float_data = 6;
// If true data contains an encoded image that need to be decoded
optional bool encoded = 7 [default = false];
}

*/

猜你喜欢

转载自blog.csdn.net/hnshahao/article/details/81216043
今日推荐