caffe源码追踪--blob
2017-08-08 16:10
225 查看
首先来看看头文件:caffe/include/caffe/blob.hpp
再来看看具体实现caffe/src/caffe/blob.cpp
#ifndef CAFFE_BLOB_HPP_ #define CAFFE_BLOB_HPP_ #include <algorithm> #include <string> #include <vector> #include "caffe/common.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/syncedmem.hpp" const int kMaxBlobAxes = 32;//blob最大维度数目为32 namespace caffe { template <typename Dtype>//定义Blob的模板类 class Blob { public:Blob(): data_(), diff_(), count_(0), capacity_(0) {}//默认构造函数 explicit Blob(const int num, const int channels, const int height,const int width);//不建议使用,用下面这个代替 explicit Blob(const vector<int>& shape);//explicit抑制了"可以用 单个形参来调用 的构造函数定义了从 形参类型 到 该类类型 的一个隐式转换"这种隐式转换。 void Reshape(const int num, const int channels, const int height,const int width);//不建议使用,用下面这个代替 void Reshape(const vector<int>& shape);//重塑blob维度,如果不够则开辟内存,多的内存不会被释放 void Reshape(const BlobShape& shape); void ReshapeLike(const Blob& other); inline string shape_string() const {//该函数用来将维度信息转化为字符串,比如1 3 1 1 (3) ostringstream stream; for (int i = 0; i < shape_.size(); ++i) { stream << shape_[i] << " "; } stream << "(" << count_ << ")"; return stream.str(); } inline const vector<int>& shape() const { return shape_; }//shape_存储维度信息的容器 inline int shape(int index) const {//获取index维度的维度数目 return shape_[CanonicalAxisIndex(index)]; } inline int num_axes() const { return shape_.size(); }//获取总的维度数目,如图像通常4维 inline int count() const { return count_; }//获取总的blob数目(存储在count_里) inline int count(int start_axis, int end_axis) const {//获取start_axis到end_axis的blob数目(包括start_axis,不包括end_axis) CHECK_LE(start_axis, end_axis); CHECK_GE(start_axis, 0); CHECK_GE(end_axis, 0); CHECK_LE(start_axis, num_axes()); CHECK_LE(end_axis, num_axes()); int count = 1; for (int i = start_axis; i < end_axis; ++i) { count *= shape(i); } return count; } inline int count(int start_axis) const {//获取start_axis到最后的blob总数目 return count(start_axis, num_axes()); } inline int CanonicalAxisIndex(int axis_index) const {//获取维度索引号,eg:如果0<=index<=num_axes(),则返回index;若-num_axes <= index <= -1, 则返回num_axes() - (-index),比如最后一维是-1,倒数第二维是-2 CHECK_GE(axis_index, -num_axes()) << "axis " << axis_index << " out of range for " << num_axes() << "-D Blob with shape " << shape_string(); CHECK_LT(axis_index, num_axes()) << "axis " << axis_index << " out of range for " << num_axes() << "-D Blob with shape " << shape_string(); if (axis_index < 0) { return axis_index + num_axes(); } return axis_index; } inline int num() const { return LegacyShape(0); }//获取批处理大小,不建议使用,用shape(0)代替 inline int channels() const { return LegacyShape(1); }//获取通道数,不建议使用,用shape(1)代替 inline int height() const { return LegacyShape(2); }//获取高,不建议使用,用shape(2)代替 inline int width() const { return LegacyShape(3); }//获取宽,不建议使用,用shape(3)代替 inline int LegacyShape(int index) const { CHECK_LE(num_axes(), 4) << "Cannot use legacy accessors on Blobs with > 4 axes."; CHECK_LT(index, 4); CHECK_GE(index, -4); if (index >= num_axes() || index < -num_axes()) { // Axis is out of range, but still in [0, 3] (or [-4, -1] for reverse // indexing) -- this special case simulates the one-padding used to fill // extraneous axes of legacy blobs. return 1; } return shape(index); } inline int offset(const int n, const int c = 0, const int h = 0, const int w = 0) const {//获取坐标(n,c,h,w)的物理位置 ((n * C + c) * H + h) * W + w CHECK_GE(n, 0); CHECK_LE(n, num()); CHECK_GE(channels(), 0); CHECK_LE(c, channels()); CHECK_GE(height(), 0); CHECK_LE(h, height()); CHECK_GE(width(), 0); CHECK_LE(w, width()); return ((n * channels() + c) * height() + h) * width() + w; } inline int offset(const vector<int>& indices) const {//同上,接受的输入为索引向量 CHECK_LE(indices.size(), num_axes()); int offset = 0; for (int i = 0; i < num_axes(); ++i) { offset *= shape(i); if (indices.size() > i) { CHECK_GE(indices[i], 0); CHECK_LT(indices[i], shape(i)); offset += indices[i]; } } return offset; } /** * @brief Copy from a source Blob. * * @param source the Blob to copy from * @param copy_diff if false, copy the data; if true, copy the diff * @param reshape if false, require this Blob to be pre-shaped to the shape * of other (and die otherwise); if true, Reshape this Blob to other's * shape if necessary */ void CopyFrom(const Blob<Dtype>& source, bool copy_diff = false, bool reshape = false);//拷贝数据,如果copy_diff为false,则拷贝数据,否则拷贝梯度;reshape如果为false,拷贝之前需要提前reshape,为true,则可以copy中进行reshape inline Dtype data_at(const int n, const int c, const int h, const int w) const {//实现at功能,即获取坐标(n,c,h,w)的数据 return cpu_data()[offset(n, c, h, w)]; } inline Dtype diff_at(const int n, const int c, const int h, const int w) const {//获取坐标(n,c,h,w)的梯度 return cpu_diff()[offset(n, c, h, w)]; } inline Dtype data_at(const vector<int>& index) const {//同上 return cpu_data()[offset(index)]; } inline Dtype diff_at(const vector<int>& index) const {//同上 return cpu_diff()[offset(index)]; } inline const shared_ptr<SyncedMemory>& data() const {//获取数据的地址 CHECK(data_); return data_; } inline const shared_ptr<SyncedMemory>& diff() const {//获取梯度的地址 CHECK(diff_); return diff_; } const Dtype* cpu_data() const;//只读的CPU数据指针 void set_cpu_data(Dtype* data);//设置CPU数据 const int* gpu_shape() const;//只读GPU数据维度指针 const Dtype* gpu_data() const;//只读的GPU数据指针 const Dtype* cpu_diff() const;//只读CPU梯度指针 const Dtype* gpu_diff() const;//只读的GPU梯度指针 Dtype* mutable_cpu_data();//可写的CPU数据指针 Dtype* mutable_gpu_data();//可写的GPU数据指针 Dtype* mutable_cpu_diff();//可写的CPU梯度指针 Dtype* mutable_gpu_diff();//可写的GPU梯度指针 void Update(); void FromProto(const BlobProto& proto, bool reshape = true);//从proto中恢复一个blob对象 void ToProto(BlobProto* proto, bool write_diff = false) const;//将blob序列化为proto Dtype asum_data() const;//计算数据的绝对值和 Dtype asum_diff() const;//计算梯度的绝对值和 Dtype sumsq_data() const;//计算数据的平方和 Dtype sumsq_diff() const;//计算梯度的平方和 void scale_data(Dtype scale_factor);//计算scale*data void scale_diff(Dtype scale_factor);//计算scale*diff void ShareData(const Blob& other);//共享数据,将other中指向data的指针赋给this指向data的指针,同时this之前指向的data会被释放。 void ShareDiff(const Blob& other);//共享梯度,将other中指向diff的指针赋给this指向diff的指针,同时this之前指向的diff会被释放。 bool ShapeEquals(const BlobProto& other);//判断形状是否相同 protected: shared_ptr<SyncedMemory> data_; shared_ptr<SyncedMemory> diff_; shared_ptr<SyncedMemory> shape_data_; vector<int> shape_; int count_; int capacity_; DISABLE_COPY_AND_ASSIGN(Blob); }; // class Blob } // namespace caffe #endif // CAFFE_BLOB_HPP_
再来看看具体实现caffe/src/caffe/blob.cpp
#include <climits> #include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/syncedmem.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> void Blob<Dtype>::Reshape(const int num, const int channels, const int height, const int width) { vector<int> shape(4); shape[0] = num; shape[1] = channels; shape[2] = height; shape[3] = width; Reshape(shape); } template <typename Dtype> void Blob<Dtype>::Reshape(const vector<int>& shape) {//shape_data_是指向SyncedMemory的智能指针 CHECK_LE(shape.size(), kMaxBlobAxes); count_ = 1; shape_.resize(shape.size());//shape_塑形 if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) {//如果shape_data_没有分配内存或是原有内存不够,重新分配 shape_data_.reset(new SyncedMemory(shape.size() * sizeof(int))); } int* shape_data = static_cast<int*>(shape_data_->mutable_cpu_data());//类型转换 for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); if (count_ != 0) { CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; } count_ *= shape[i];//得到count_,即blob总数 shape_[i] = shape[i];//shape_赋值 shape_data[i] = shape[i];//shape_data赋值 } if (count_ > capacity_) {//capacity_为数据块的容量,当不够时,才重新分配 capacity_ = count_; data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); } } template <typename Dtype> void Blob<Dtype>::Reshape(const BlobShape& shape) { CHECK_LE(shape.dim_size(), kMaxBlobAxes); vector<int> shape_vec(shape.dim_size()); for (int i = 0; i < shape.dim_size(); ++i) { shape_vec[i] = shape.dim(i); } Reshape(shape_vec); } template <typename Dtype> void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) { Reshape(other.shape()); } template <typename Dtype> Blob<Dtype>::Blob(const int num, const int channels, const int height, const int width): capacity_(0) {//给blob申请空间 Reshape(num, channels, height, width); } template <typename Dtype> Blob<Dtype>::Blob(const vector<int>& shape) : capacity_(0) {//给blob申请空间 Reshape(shape); } template <typename Dtype> const int* Blob<Dtype>::gpu_shape() const {//获取指向gpu数据维度信息的指针 CHECK(shape_data_); return (const int*)shape_data_->gpu_data(); } template <typename Dtype> const Dtype* Blob<Dtype>::cpu_data() const {//获取指向cpu数据的指针 CHECK(data_); return (const Dtype*)data_->cpu_data(); } template <typename Dtype> void Blob<Dtype>::set_cpu_data(Dtype* data) {//将data_指向data指向的数据释放掉原数据,具体实现见SyncedMemory文件 CHECK(data); data_->set_cpu_data(data); } template <typename Dtype> const Dtype* Blob<Dtype>::gpu_data() const {//获取指向gpu只读数据的指针 CHECK(data_); return (const Dtype*)data_->gpu_data(); } template <typename Dtype> const Dtype* Blob<Dtype>::cpu_diff() const {//获取指向cpu只读梯度的指针 CHECK(diff_); return (const Dtype*)diff_->cpu_data(); } template <typename Dtype> const Dtype* Blob<Dtype>::gpu_diff() const {//获取指向gpu只读梯度的指针 CHECK(diff_); return (const Dtype*)diff_->gpu_data(); } template <typename Dtype> Dtype* Blob<Dtype>::mutable_cpu_data() {//获取指向cpu可写数据的指针 CHECK(data_); return static_cast<Dtype*>(data_->mutable_cpu_data()); } template <typename Dtype> Dtype* Blob<Dtype>::mutable_gpu_data() {//获取指向gpu可写数据的指针 CHECK(data_); return static_cast<Dtype*>(data_->mutable_gpu_data()); } template <typename Dtype> Dtype* Blob<Dtype>::mutable_cpu_diff() {//获取指向cpu梯度的指针 CHECK(diff_); return static_cast<Dtype*>(diff_->mutable_cpu_data()); } template <typename Dtype> Dtype* Blob<Dtype>::mutable_gpu_diff() {//获取指向gpu梯度的指针 CHECK(diff_); return static_cast<Dtype*>(diff_->mutable_gpu_data()); } template <typename Dtype> void Blob<Dtype>::ShareData(const Blob& other) {//将data_指向other中的数据 CHECK_EQ(count_, other.count()); data_ = other.data(); } template <typename Dtype> void Blob<Dtype>::ShareDiff(const Blob& other) {//将diff_指向other中的梯度 CHECK_EQ(count_, other.count()); diff_ = other.diff(); } template <> void Blob<unsigned int>::Update() { NOT_IMPLEMENTED; }//函数模板显示具体化 template <> void Blob<int>::Update() { NOT_IMPLEMENTED; }//同上 template <> void Blob<bool>::Update() { NOT_IMPLEMENTED; }//同上 template <typename Dtype> void Blob<Dtype>::Update() { switch (data_->head()) { case SyncedMemory::HEAD_AT_CPU://在CPU上计算 caffe_axpy<Dtype>(count_, Dtype(-1), static_cast<const Dtype*>(diff_->cpu_data()), static_cast<Dtype*>(data_->mutable_cpu_data()));//Y=Y-X,(X,Y) break; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: #ifndef CPU_ONLY//在GPU上计算 caffe_gpu_axpy<Dtype>(count_, Dtype(-1), static_cast<const Dtype*>(diff_->gpu_data()), static_cast<Dtype*>(data_->mutable_gpu_data())); #else NO_GPU; #endif break; default: LOG(FATAL) << "Syncedmem not initialized."; } } template <> unsigned int Blob<unsigned int>::asum_data() const { NOT_IMPLEMENTED; return 0; } template <> int Blob<int>::asum_data() const { NOT_IMPLEMENTED; return 0; } template <> bool Blob<bool>::asum_data() const { NOT_IMPLEMENTED; return 0; } template <typename Dtype> Dtype Blob<Dtype>::asum_data() const {//数据的L1范数 if (!data_) { return 0; } switch (data_->head()) { case SyncedMemory::HEAD_AT_CPU: return caffe_cpu_asum(count_, cpu_data()); case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: #ifndef CPU_ONLY { Dtype asum; caffe_gpu_asum(count_, gpu_data(), &asum); return asum; } #else NO_GPU; #endif case SyncedMemory::UNINITIALIZED: return 0; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); } return 0; } template <> unsigned int Blob<unsigned int>::asum_diff() const { NOT_IMPLEMENTED; return 0; } template <> int Blob<int>::asum_diff() const { NOT_IMPLEMENTED; return 0; } template <> bool Blob<bool>::asum_diff() const { NOT_IMPLEMENTED; return 0; } template <typename Dtype> Dtype Blob<Dtype>::asum_diff() const {//梯度的L1范数 if (!diff_) { return 0; } switch (diff_->head()) { case SyncedMemory::HEAD_AT_CPU: return caffe_cpu_asum(count_, cpu_diff()); case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: #ifndef CPU_ONLY { Dtype asum; caffe_gpu_asum(count_, gpu_diff(), &asum); return asum; } #else NO_GPU; #endif case SyncedMemory::UNINITIALIZED: return 0; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head(); } return 0; } template <> unsigned int Blob<unsigned int>::sumsq_data() const { NOT_IMPLEMENTED; return 0; } template <> int Blob<int>::sumsq_data() const { NOT_IMPLEMENTED; return 0; } template <> bool Blob<bool>::sumsq_data() const { NOT_IMPLEMENTED; return 0; } template <typename Dtype> Dtype Blob<Dtype>::sumsq_data() const {//数据的L2范数 Dtype sumsq; const Dtype* data; if (!data_) { return 0; } switch (data_->head()) { case SyncedMemory::HEAD_AT_CPU: data = cpu_data(); sumsq = caffe_cpu_dot(count_, data, data); break; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: #ifndef CPU_ONLY data = gpu_data(); caffe_gpu_dot(count_, data, data, &sumsq); #else NO_GPU; #endif break; case SyncedMemory::UNINITIALIZED: return 0; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); } return sumsq; } template <> unsigned int Blob<unsigned int>::sumsq_diff() const { NOT_IMPLEMENTED; return 0; } template <> int Blob<int>::sumsq_diff() const { NOT_IMPLEMENTED; return 0; } template <> bool Blob<bool>::sumsq_diff() const { NOT_IMPLEMENTED; return 0; } template <typename Dtype> Dtype Blob<Dtype>::sumsq_diff() const {//梯度的L2范数 Dtype sumsq; const Dtype* diff; if (!diff_) { return 0; } switch (diff_->head()) { case SyncedMemory::HEAD_AT_CPU: diff = cpu_diff(); sumsq = caffe_cpu_dot(count_, diff, diff); break; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: #ifndef CPU_ONLY diff = gpu_diff(); caffe_gpu_dot(count_, diff, diff, &sumsq); break; #else NO_GPU; #endif case SyncedMemory::UNINITIALIZED: return 0; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); } return sumsq; } template <> void Blob<unsigned int>::scale_data(unsigned int scale_factor) { NOT_IMPLEMENTED; } template <> void Blob<int>::scale_data(int scale_factor) { NOT_IMPLEMENTED; } template <> void Blob<bool>::scale_data(bool scale_factor) { NOT_IMPLEMENTED; } template <typename Dtype> void Blob<Dtype>::scale_data(Dtype scale_factor) {//Y=scale_factor*Y Dtype* data; if (!data_) { return; } switch (data_->head()) { case SyncedMemory::HEAD_AT_CPU: data = mutable_cpu_data(); caffe_scal(count_, scale_factor, data); return; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: #ifndef CPU_ONLY data = mutable_gpu_data(); caffe_gpu_scal(count_, scale_factor, data); return; #else NO_GPU; #endif case SyncedMemory::UNINITIALIZED: return; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); } } template <> void Blob<unsigned int>::scale_diff(unsigned int scale_factor) { NOT_IMPLEMENTED; } template <> void Blob<int>::scale_diff(int scale_factor) { NOT_IMPLEMENTED; } template <> void Blob<bool>::scale_diff(bool scale_factor) { NOT_IMPLEMENTED; } template <typename Dtype> void Blob<Dtype>::scale_diff(Dtype scale_factor) {//Y_DIFF=scale_factor*Y_DIFF Dtype* diff; if (!diff_) { return; } switch (diff_->head()) { case SyncedMemory::HEAD_AT_CPU: diff = mutable_cpu_diff(); caffe_scal(count_, scale_factor, diff); return; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: #ifndef CPU_ONLY diff = mutable_gpu_diff(); caffe_gpu_scal(count_, scale_factor, diff); return; #else NO_GPU; #endif case SyncedMemory::UNINITIALIZED: return; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head(); } } template <typename Dtype> bool Blob<Dtype>::ShapeEquals(const BlobProto& other) {//BLOB形状是否一样 if (other.has_num() || other.has_channels() || other.has_height() || other.has_width()) { return shape_.size() <= 4 && LegacyShape(-4) == other.num() && LegacyShape(-3) == other.channels() && LegacyShape(-2) == other.height() && LegacyShape(-1) == other.width(); } vector<int> other_shape(other.shape().dim_size()); for (int i = 0; i < other.shape().dim_size(); ++i) { other_shape[i] = other.shape().dim(i); } return shape_ == other_shape; } template <typename Dtype> void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {//若两个blob形状不一样,则看是否reshape,若可以,则reshape,否则报错;copy_diff控制拷贝数据还是梯度,若为真则是梯度,否则是数据 if (source.count() != count_ || source.shape() != shape_) { if (reshape) { ReshapeLike(source); } else { LOG(FATAL) << "Trying to copy blobs of different sizes."; } } switch (Caffe::mode()) { case Caffe::GPU: if (copy_diff) { caffe_copy(count_, source.gpu_diff(), static_cast<Dtype*>(diff_->mutable_gpu_data())); } else { caffe_copy(count_, source.gpu_data(), static_cast<Dtype*>(data_->mutable_gpu_data())); } break; case Caffe::CPU: if (copy_diff) { caffe_copy(count_, source.cpu_diff(), static_cast<Dtype*>(diff_->mutable_cpu_data())); } else { caffe_copy(count_, source.cpu_data(), static_cast<Dtype*>(data_->mutable_cpu_data())); } break; default: LOG(FATAL) << "Unknown caffe mode."; } } template <typename Dtype> void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) {//从protocol buffer中读数据 if (reshape) { vector<int> shape; if (proto.has_num() || proto.has_channels() || proto.has_height() || proto.has_width()) { shape.resize(4); shape[0] = proto.num(); shape[1] = proto.channels(); shape[2] = proto.height(); shape[3] = proto.width(); } else { shape.resize(proto.shape().dim_size()); for (int i = 0; i < proto.shape().dim_size(); ++i) { shape[i] = proto.shape().dim(i); } } Reshape(shape); } else { CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)"; } //开始拷贝 Dtype* data_vec = mutable_cpu_data(); if (proto.double_data_size() > 0) { CHECK_EQ(count_, proto.double_data_size()); for (int i = 0; i < count_; ++i) { data_vec[i] = proto.double_data(i); } } else { CHECK_EQ(count_, proto.data_size()); for (int i = 0; i < count_; ++i) { data_vec[i] = proto.data(i); } } if (proto.double_diff_size() > 0) { CHECK_EQ(count_, proto.double_diff_size()); Dtype* diff_vec = mutable_cpu_diff(); for (int i = 0; i < count_; ++i) { diff_vec[i] = proto.double_diff(i); } } else if (proto.diff_size() > 0) { CHECK_EQ(count_, proto.diff_size()); Dtype* diff_vec = mutable_cpu_diff(); for (int i = 0; i < count_; ++i) { diff_vec[i] = proto.diff(i); } } } template <> void Blob<double>::ToProto(BlobProto* proto, bool write_diff) const { proto->clear_shape(); for (int i = 0; i < shape_.size(); ++i) { proto->mutable_shape()->add_dim(shape_[i]); } proto->clear_double_data(); proto->clear_double_diff(); const double* data_vec = cpu_data(); for (int i = 0; i < count_; ++i) { proto->add_double_data(data_vec[i]); } if (write_diff) { const double* diff_vec = cpu_diff(); for (int i = 0; i < count_; ++i) { proto->add_double_diff(diff_vec[i]); } } } template <> void Blob<float>::ToProto(BlobProto* proto, bool write_diff) const {//同上 proto->clear_shape(); for (int i = 0; i < shape_.size(); ++i) { proto->mutable_shape()->add_dim(shape_[i]); } proto->clear_data(); proto->clear_diff(); const float* data_vec = cpu_data(); for (int i = 0; i < count_; ++i) { proto->add_data(data_vec[i]); } if (write_diff) { const float* diff_vec = cpu_diff(); for (int i = 0; i < count_; ++i) { proto->add_diff(diff_vec[i]); } } } INSTANTIATE_CLASS(Blob); template class Blob<bool>;//实例化blob类 template class Blob<int>; template class Blob<unsigned int>; } // namespace caffe
相关文章推荐
- Caffe源码中blob文件分析
- caffe源码分析 vector<Blob<Dtype>*>& bottom
- caffe源码追踪-net
- caffe源码解读之Blob
- caffe源码学习(二) Blob
- caffe源码追踪-- Google Protocol Buffer
- Caffe源码解析1:Blob
- Caffe源码解析1:Blob
- Caffe源码解析1:Blob
- caffe源码解析之blob
- Caffe源码解读1 —— Blob
- Caffe源码阅读笔记(1):Blob
- Caffe 源码阅读1-blob的实现细节
- Caffe源码解析1:Blob
- caffe源码阅读之Blob
- 【caffe源码研究】第三章:源码篇(2) :Blob 和 SyncedMemory
- caffe源码阅读3-blob.cpp
- caffe源码解析之blob.hpp或blob.cpp
- Caffe源码解读(二):Blob类的源码解读
- Caffe源码解读1--blob.hpp