Caffe学习系列(17): blob
2016-12-05 08:56
603 查看
对于blob.h文件。
先看成员变量。定义了6个保护的成员变量,包括前、后向传播的数据,新、旧形状数据(?),
数据个数及容量。
再看成员函数。包括构造函数(4个参数),reshape(改变blob形状),以及很多inline函数。
View Code
Blob:4个维度 n x c x h x w;
bottom[0] 、bottom[1]代表该层有几个输入。
bottom[0]->count(): 输入中,元素的总维数(个数)
bottom[0]->nums(): 输入中,块(block)的个数,该参数还对应batch_size,即同时输入了几张图片
c:是卷积核(filter)的个数,每个卷积核产生一个通道的输出;在输入层,c直接就是图像的通道数;
还有一个变量,dim;:每个块的维度(元素个数)
形象化:
| xxxxx | xxxxx | xxxxx | xxxxx | xxxxx | xxxxx | xxxxx | xxxxx |
上图,nums = 8, dim = 5, count = 5*8 =40;
参考:http://blog.csdn.net/qq_14975217/article/details/51524042
http://blog.csdn.net/xizero00/article/details/50886829 http://www.cnblogs.com/louyihang-loves-baiyan/p/5149628.html
关于常见的BLAS函数,参考:http://www.cnblogs.com/huashiyiqike/p/3886670.html
关于protobuf,参考:https://www.ibm.com/developerworks/cn/linux/l-cn-gpb/
先看成员变量。定义了6个保护的成员变量,包括前、后向传播的数据,新、旧形状数据(?),
数据个数及容量。
再看成员函数。包括构造函数(4个参数),reshape(改变blob形状),以及很多inline函数。
#include <climits> #include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/syncedmem.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype>//老方法调用新方法 void Blob<Dtype>::Reshape(const int num, const int channels, const int height, const int width) { vector<int> shape(4); shape[0] = num; shape[1] = channels; shape[2] = height; shape[3] = width; Reshape(shape); } template <typename Dtype> void Blob<Dtype>::Reshape(const vector<int>& shape) { CHECK_LE(shape.size(), kMaxBlobAxes);//是否小于规定的最大BLOB的维度(35维) count_ = 1; shape_.resize(shape.size());//将旧的数据大小置为新的数据大小 if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) { //shape_和shape_data_的区别就在于后者分配了空间(有什么用呢?) shape_data_.reset(new SyncedMemory(shape.size() * sizeof(int))); } int* shape_data = static_cast<int*>(shape_data_->mutable_cpu_data()); for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0);//检查数据是否合法 if (count_ != 0) { CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; } count_ *= shape[i];//数据个数 shape_[i] = shape[i];//复制shape到新的和旧的形状数据 shape_data[i] = shape[i]; } if (count_ > capacity_) {//如果超过了容量,重新分配内存 capacity_ = count_; data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); } } template <typename Dtype> void Blob<Dtype>::Reshape(const BlobShape& shape) { CHECK_LE(shape.dim_size(), kMaxBlobAxes); vector<int> shape_vec(shape.dim_size()); for (int i = 0; i < shape.dim_size(); ++i) { shape_vec[i] = shape.dim(i); } Reshape(shape_vec); } template <typename Dtype> void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) { Reshape(other.shape()); } template <typename Dtype> Blob<Dtype>::Blob(const int num, const int channels, const int height, const int width) // capacity_ must be initialized before calling Reshape : capacity_(0) { Reshape(num, channels, height, width); } template <typename Dtype> Blob<Dtype>::Blob(const vector<int>& shape) // capacity_ must be initialized before calling Reshape : capacity_(0) { Reshape(shape); } template <typename Dtype> const int* Blob<Dtype>::gpu_shape() const { CHECK(shape_data_); return (const int*)shape_data_->gpu_data(); } //得到data template <typename Dtype> const Dtype* Blob<Dtype>::cpu_data() const { CHECK(data_); return (const Dtype*)data_->cpu_data(); } //设置data template <typename Dtype> void Blob<Dtype>::set_cpu_data(Dtype* data) { CHECK(data); // Make sure CPU and GPU sizes remain equal size_t size = count_ * sizeof(Dtype); if (data_->size() != size) { data_.reset(new SyncedMemory(size)); diff_.reset(new SyncedMemory(size)); } data_->set_cpu_data(data); } template <typename Dtype> const Dtype* Blob<Dtype>::gpu_data() const { CHECK(data_); return (const Dtype*)data_->gpu_data(); } template <typename Dtype> void Blob<Dtype>::set_gpu_data(Dtype* data) { CHECK(data); // Make sure CPU and GPU sizes remain equal size_t size = count_ * sizeof(Dtype); if (data_->size() != size) { data_.reset(new SyncedMemory(size)); diff_.reset(new SyncedMemory(size)); } data_->set_gpu_data(data); } template <typename Dtype> const Dtype* Blob<Dtype>::cpu_diff() const { CHECK(diff_); return (const Dtype*)diff_->cpu_data(); } template <typename Dtype> const Dtype* Blob<Dtype>::gpu_diff() const { CHECK(diff_); return (const Dtype*)diff_->gpu_data(); } template <typename Dtype> Dtype* Blob<Dtype>::mutable_cpu_data() { CHECK(data_); return static_cast<Dtype*>(data_->mutable_cpu_data()); } //关键字mutable,变量被其修饰时,即使函数为const也能修改之 template <typename Dtype> Dtype* Blob<Dtype>::mutable_gpu_data() { CHECK(data_); return static_cast<Dtype*>(data_->mutable_gpu_data()); } template <typename Dtype> Dtype* Blob<Dtype>::mutable_cpu_diff() { CHECK(diff_); return static_cast<Dtype*>(diff_->mutable_cpu_data()); } template <typename Dtype> Dtype* Blob<Dtype>::mutable_gpu_diff() { CHECK(diff_); return static_cast<Dtype*>(diff_->mutable_gpu_data()); } //复制blob template <typename Dtype> void Blob<Dtype>::ShareData(const Blob& other) { CHECK_EQ(count_, other.count()); data_ = other.data(); } template <typename Dtype> void Blob<Dtype>::ShareDiff(const Blob& other) { CHECK_EQ(count_, other.count()); diff_ = other.diff(); } // The "update" method is used for parameter blobs in a Net, which are stored // as Blob<float> or Blob<double> -- hence we do not define it for // Blob<int> or Blob<unsigned int>. template <> void Blob<unsigned int>::Update() { NOT_IMPLEMENTED; } template <> void Blob<int>::Update() { NOT_IMPLEMENTED; } //更新 根据data_的head来更新,更新为data=-1*diff+data template <typename Dtype> void Blob<Dtype>::Update() { // We will perform update based on where the data is located. switch (data_->head()) { case SyncedMemory::HEAD_AT_CPU: // perform computation on CPU caffe_axpy<Dtype>(count_, Dtype(-1), static_cast<const Dtype*>(diff_->cpu_data()), static_cast<Dtype*>(data_->mutable_cpu_data())); break; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: #ifndef CPU_ONLY // perform computation on GPU caffe_gpu_axpy<Dtype>(count_, Dtype(-1), static_cast<const Dtype*>(diff_->gpu_data()), static_cast<Dtype*>(data_->mutable_gpu_data())); #else NO_GPU; #endif break; default: LOG(FATAL) << "Syncedmem not initialized."; } } template <> unsigned int Blob<unsigned int>::asum_data() const { NOT_IMPLEMENTED; return 0; } template <> int Blob<int>::asum_data() const { NOT_IMPLEMENTED; return 0; } template <typename Dtype> Dtype Blob<Dtype>::asum_data() const { if (!data_) { return 0; } switch (data_->head()) { case SyncedMemory::HEAD_AT_CPU: return caffe_cpu_asum(count_, cpu_data()); case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: #ifndef CPU_ONLY { Dtype asum; caffe_gpu_asum(count_, gpu_data(), &asum); return asum; } #else NO_GPU; #endif case SyncedMemory::UNINITIALIZED: return 0; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); } return 0; } template <> unsigned int Blob<unsigned int>::asum_diff() const { NOT_IMPLEMENTED; return 0; } template <> int Blob<int>::asum_diff() const { NOT_IMPLEMENTED; return 0; } //计算data的L1范数 template <typename Dtype> Dtype Blob<Dtype>::asum_diff() const { if (!diff_) { return 0; } switch (diff_->head()) { case SyncedMemory::HEAD_AT_CPU: return caffe_cpu_asum(count_, cpu_diff()); case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: #ifndef CPU_ONLY { Dtype asum; caffe_gpu_asum(count_, gpu_diff(), &asum); return asum; } #else NO_GPU; #endif case SyncedMemory::UNINITIALIZED: return 0; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head(); } return 0; } template <> unsigned int Blob<unsigned int>::sumsq_data() const { NOT_IMPLEMENTED; return 0; } template <> int Blob<int>::sumsq_data() const { NOT_IMPLEMENTED; return 0; } //L2范数 template <typename Dtype> Dtype Blob<Dtype>::sumsq_data() const { Dtype sumsq; const Dtype* data; if (!data_) { return 0; } switch (data_->head()) { case SyncedMemory::HEAD_AT_CPU: data = cpu_data(); sumsq = caffe_cpu_dot(count_, data, data); break; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: #ifndef CPU_ONLY data = gpu_data(); caffe_gpu_dot(count_, data, data, &sumsq); #else NO_GPU; #endif break; case SyncedMemory::UNINITIALIZED: return 0; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); } return sumsq; } template <> unsigned int Blob<unsigned int>::sumsq_diff() const { NOT_IMPLEMENTED; return 0; } template <> int Blob<int>::sumsq_diff() const { NOT_IMPLEMENTED; return 0; } template <typename Dtype> Dtype Blob<Dtype>::sumsq_diff() const { Dtype sumsq; const Dtype* diff; if (!diff_) { return 0; } switch (diff_->head()) { case SyncedMemory::HEAD_AT_CPU: diff = cpu_diff(); sumsq = caffe_cpu_dot(count_, diff, diff); break; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: #ifndef CPU_ONLY diff = gpu_diff(); caffe_gpu_dot(count_, diff, diff, &sumsq); break; #else NO_GPU; #endif case SyncedMemory::UNINITIALIZED: return 0; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); } return sumsq; } template <> void Blob<unsigned int>::scale_data(unsigned int scale_factor) { NOT_IMPLEMENTED; } template <> void Blob<int>::scale_data(int scale_factor) { NOT_IMPLEMENTED; } //将data部分乘一个因子 template <typename Dtype> void Blob<Dtype>::scale_data(Dtype scale_factor) { Dtype* data; if (!data_) { return; } switch (data_->head()) { case SyncedMemory::HEAD_AT_CPU: data = mutable_cpu_data(); caffe_scal(count_, scale_factor, data); return; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: #ifndef CPU_ONLY data = mutable_gpu_data(); caffe_gpu_scal(count_, scale_factor, data); return; #else NO_GPU; #endif case SyncedMemory::UNINITIALIZED: return; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); } } template <> void Blob<unsigned int>::scale_diff(unsigned int scale_factor) { NOT_IMPLEMENTED; } template <> void Blob<int>::scale_diff(int scale_factor) { NOT_IMPLEMENTED; } template <typename Dtype> void Blob<Dtype>::scale_diff(Dtype scale_factor) { Dtype* diff; if (!diff_) { return; } switch (diff_->head()) { case SyncedMemory::HEAD_AT_CPU: diff = mutable_cpu_diff(); caffe_scal(count_, scale_factor, diff); return; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED: #ifndef CPU_ONLY diff = mutable_gpu_diff(); caffe_gpu_scal(count_, scale_factor, diff); return; #else NO_GPU; #endif case SyncedMemory::UNINITIALIZED: return; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head(); } } //两个blob的shape是否一样 template <typename Dtype> bool Blob<Dtype>::ShapeEquals(const BlobProto& other) { if (other.has_num() || other.has_channels() || other.has_height() || other.has_width()) {//判断是否是旧的blob(为何能判断?) // Using deprecated 4D Blob dimensions -- // shape is (num, channels, height, width). // Note: we do not use the normal Blob::num(), Blob::channels(), etc. // methods as these index from the beginning of the blob shape, where legacy // parameter blobs were indexed from the end of the blob shape (e.g., bias // Blob shape (1 x 1 x 1 x N), IP layer weight Blob shape (1 x 1 x M x N)). return shape_.size() <= 4 && LegacyShape(-4) == other.num() && LegacyShape(-3) == other.channels() && LegacyShape(-2) == other.height() && LegacyShape(-1) == other.width(); } //不是则复制判断 vector<int> other_shape(other.shape().dim_size()); for (int i = 0; i < other.shape().dim_size(); ++i) { other_shape[i] = other.shape().dim(i); } return shape_ == other_shape; } //复制diff和data template <typename Dtype> void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) { if (source.count() != count_ || source.shape() != shape_) { if (reshape) { ReshapeLike(source); } else { LOG(FATAL) << "Trying to copy blobs of different sizes."; } } switch (Caffe::mode()) { case Caffe::GPU: if (copy_diff) { caffe_copy(count_, source.gpu_diff(), static_cast<Dtype*>(diff_->mutable_gpu_data())); } else { caffe_copy(count_, source.gpu_data(), static_cast<Dtype*>(data_->mutable_gpu_data())); } break; case Caffe::CPU: if (copy_diff) { caffe_copy(count_, source.cpu_diff(), static_cast<Dtype*>(diff_->mutable_cpu_data())); } else { caffe_copy(count_, source.cpu_data(), static_cast<Dtype*>(data_->mutable_cpu_data())); } break; default: LOG(FATAL) << "Unknown caffe mode."; } } // template <typename Dtype> void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) { if (reshape) { vector<int> shape; if (proto.has_num() || proto.has_channels() || //如果是旧的blob则直接转为新的blob中的数据 proto.has_height() || proto.has_width()) { // Using deprecated 4D Blob dimensions -- // shape is (num, channels, height, width). shape.resize(4); shape[0] = proto.num(); shape[1] = proto.channels(); shape[2] = proto.height(); shape[3] = proto.width(); } else { shape.resize(proto.shape().dim_size()); for (int i = 0; i < proto.shape().dim_size(); ++i) { shape[i] = proto.shape().dim(i); } } Reshape(shape); } else { CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)"; } // copy data复制data和diff Dtype* data_vec = mutable_cpu_data();//获取当前数据的互斥指针 if (proto.double_data_size() > 0) { CHECK_EQ(count_, proto.double_data_size()); for (int i = 0; i < count_; ++i) { data_vec[i] = proto.double_data(i); } } else { CHECK_EQ(count_, proto.data_size()); for (int i = 0; i < count_; ++i) { data_vec[i] = proto.data(i); } } if (proto.double_diff_size() > 0) { CHECK_EQ(count_, proto.double_diff_size()); Dtype* diff_vec = mutable_cpu_diff(); for (int i = 0; i < count_; ++i) { diff_vec[i] = proto.double_diff(i); } } else if (proto.diff_size() > 0) { CHECK_EQ(count_, proto.diff_size()); Dtype* diff_vec = mutable_cpu_diff(); for (int i = 0; i < count_; ++i) { diff_vec[i] = proto.diff(i); } } } template <> void Blob<double>::ToProto(BlobProto* proto, bool write_diff) const { proto->clear_shape(); for (int i = 0; i < shape_.size(); ++i) { proto->mutable_shape()->add_dim(shape_[i]); } proto->clear_double_data(); proto->clear_double_diff(); const double* data_vec = cpu_data(); for (int i = 0; i < count_; ++i) { proto->add_double_data(data_vec[i]); } if (write_diff) { const double* diff_vec = cpu_diff(); for (int i = 0; i < count_; ++i) { proto->add_double_diff(diff_vec[i]); } } } template <> void Blob<float>::ToProto(BlobProto* proto, bool write_diff) const { proto->clear_shape(); for (int i = 0; i < shape_.size(); ++i) { proto->mutable_shape()->add_dim(shape_[i]); } proto->clear_data(); proto->clear_diff(); const float* data_vec = cpu_data(); for (int i = 0; i < count_; ++i) { proto->add_data(data_vec[i]); } if (write_diff) { const float* diff_vec = cpu_diff(); for (int i = 0; i < count_; ++i) { proto->add_diff(diff_vec[i]); } } } INSTANTIATE_CLASS(Blob); template class Blob<int>; template class Blob<unsigned int>; } // namespace caffe
View Code
Blob:4个维度 n x c x h x w;
bottom[0] 、bottom[1]代表该层有几个输入。
bottom[0]->count(): 输入中,元素的总维数(个数)
bottom[0]->nums(): 输入中,块(block)的个数,该参数还对应batch_size,即同时输入了几张图片
c:是卷积核(filter)的个数,每个卷积核产生一个通道的输出;在输入层,c直接就是图像的通道数;
还有一个变量,dim;:每个块的维度(元素个数)
形象化:
| xxxxx | xxxxx | xxxxx | xxxxx | xxxxx | xxxxx | xxxxx | xxxxx |
上图,nums = 8, dim = 5, count = 5*8 =40;
参考:http://blog.csdn.net/qq_14975217/article/details/51524042
http://blog.csdn.net/xizero00/article/details/50886829 http://www.cnblogs.com/louyihang-loves-baiyan/p/5149628.html
关于常见的BLAS函数,参考:http://www.cnblogs.com/huashiyiqike/p/3886670.html
关于protobuf,参考:https://www.ibm.com/developerworks/cn/linux/l-cn-gpb/
相关文章推荐
- Caffe学习系列(6):Blob,Layer and Net以及对应配置文件的编写
- Caffe学习系列(17):模型各层特征和过滤器可视化
- caffe学习系列(7):Blob,layer,Net介绍
- 每天学习一算法系列(17)(在一个字符串中找到第一个只出现一次的字符)
- Swift学习笔记系列——(17)错误处理
- 深度学习与人脸识别系列(1)__算法流程和教程大纲(基于caffe)
- Caffe学习系列(4):激活层(Activiation Layers)及参数
- Caffe学习系列(7):solver及其配置
- 项目经理案头手册学习系列【17】——项目风险管理
- Caffe学习系列(2):数据层及参数
- Caffe学习系列(18): 绘制网络模型
- caffe源码学习(二) Blob
- sharepoint 2016 学习系列篇(17)-自定义列表应用篇-(6)开启列表数据版本控制
- Caffe学习系列(4):激活层(Activiation Layers)及参数
- Caffe学习系列(3):视觉层(Vision Layers)及参数
- Caffe学习系列(22):caffe图形化操作工具digits运行实例
- [Android学习系列17]Content Provider的一些事
- Caffe学习系列(2):数据层及参数
- Caffe学习系列(13):数据可视化环境(python接口)配置
- 深度学习与人脸识别系列(4)__利用caffe训练深度学习模型