Opencv3.1 MultiBandBlender源码分析
2017-01-05 11:56
127 查看
void MultiBandBlender::prepare(Rect dst_roi) { dst_roi_final_ = dst_roi; // Crop unnecessary bands double max_len = static_cast<double>(std::max(dst_roi.width, dst_roi.height)); num_bands_ = std::min(actual_num_bands_, static_cast<int>(ceil(std::log(max_len) / std::log(2.0)))); // Add border to the final image, to ensure sizes are divided by (1 << num_bands_) dst_roi.width += ((1 << num_bands_) - dst_roi.width % (1 << num_bands_)) % (1 << num_bands_); dst_roi.height += ((1 << num_bands_) - dst_roi.height % (1 << num_bands_)) % (1 << num_bands_); Blender::prepare(dst_roi); dst_pyr_laplace_.resize(num_bands_ + 1); dst_pyr_laplace_[0] = dst_; dst_band_weights_.resize(num_bands_ + 1); dst_band_weights_[0].create(dst_roi.size(), weight_type_); dst_band_weights_[0].setTo(0); for (int i = 1; i <= num_bands_; ++i) { dst_pyr_laplace_[i].create((dst_pyr_laplace_[i - 1].rows + 1) / 2, (dst_pyr_laplace_[i - 1].cols + 1) / 2, CV_16SC3); dst_band_weights_[i].create((dst_band_weights_[i - 1].rows + 1) / 2, (dst_band_weights_[i - 1].cols + 1) / 2, weight_type_); dst_pyr_laplace_[i].setTo(Scalar::all(0)); dst_band_weights_[i].setTo(0); } }
1. 确定最终ROI区域的大小
2. 确定最终的要用多少个band
3. 对感兴趣区域的宽和高进行调整,确保能被 1 << num_bands_ 整除
4. 调用Blender类下的prepare方法,创建dst_,dst_mask和把 dst_roi赋值给Blender下的成员对象dst_roi_
5. 将ROI区域赋值给dst_pyr_laplace_的第0层,拉普拉斯金字塔总共num_bands_+1 层,相应的band权重的金字塔也为 num_bands_+1 层,band权重金字塔第0层的清零
6. 金字塔第1层的宽和高是第0层的一半,同理类推,第2层是1层的一半,第3层是2层的一半… ,最终权重金字塔的每一层都清零
分析
void MultiBandBlender::feed(InputArray _img, InputArray mask, Point tl)
void MultiBandBlender::feed(InputArray _img, InputArray mask, Point tl) { #ifdef ENABLE_LOG int64 t = getTickCount(); #endif UMat img = _img.getUMat(); CV_Assert(img.type() == CV_16SC3 || img.type() == CV_8UC3); CV_Assert(mask.type() == CV_8U); // Keep source image in memory with small border int gap = 3 * (1 << num_bands_); Point tl_new(std::max(dst_roi_.x, tl.x - gap), std::max(dst_roi_.y, tl.y - gap)); Point br_new(std::min(dst_roi_.br().x, tl.x + img.cols + gap), std::min(dst_roi_.br().y, tl.y + img.rows + gap)); // Ensure coordinates of top-left, bottom-right corners are divided by (1 << num_bands_). // After that scale between layers is exactly 2. // // We do it to avoid interpolation problems when keeping sub-images only. There is no such problem when // image is bordered to have size equal to the final image size, but this is too memory hungry approach. tl_new.x = dst_roi_.x + (((tl_new.x - dst_roi_.x) >> num_bands_) << num_bands_); tl_new.y = dst_roi_.y + (((tl_new.y - dst_roi_.y) >> num_bands_) << num_bands_); int width = br_new.x - tl_new.x; int height = br_new.y - tl_new.y; width += ((1 << num_bands_) - width % (1 << num_bands_)) % (1 << num_bands_); height += ((1 << num_bands_) - height % (1 << num_bands_)) % (1 << num_bands_); br_new.x = tl_new.x + width; br_new.y = tl_new.y + height; int dy = std::max(br_new.y - dst_roi_.br().y, 0); int dx = std::max(br_new.x - dst_roi_.br().x, 0); tl_new.x -= dx; br_new.x -= dx; tl_new.y -= dy; br_new.y -= dy; int top = tl.y - tl_new.y; int left = tl.x - tl_new.x; int bottom = br_new.y - tl.y - img.rows; int right = br_new.x - tl.x - img.cols; // Create the source image Laplacian pyramid UMat img_with_border; copyMakeBorder(_img, img_with_border, top, bottom, left, right, BORDER_REFLECT); LOGLN(" Add border to the source image, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); #ifdef ENABLE_LOG t = getTickCount(); #endif std::vector<UMat> src_pyr_laplace; if (can_use_gpu_ && img_with_border.depth() == CV_16S) createLaplacePyrGpu(img_with_border, num_bands_, src_pyr_laplace); else createLaplacePyr(img_with_border, num_bands_, src_pyr_laplace); LOGLN(" Create the source image Laplacian pyramid, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); #ifdef ENABLE_LOG t = getTickCount(); #endif // Create the weight map Gaussian pyramid UMat weight_map; std::vector<UMat> weight_pyr_gauss(num_bands_ + 1); if(weight_type_ == CV_32F) { mask.getUMat().convertTo(weight_map, CV_32F, 1./255.); } else // weight_type_ == CV_16S { mask.getUMat().convertTo(weight_map, CV_16S); UMat add_mask; compare(mask, 0, add_mask, CMP_NE); add(weight_map, Scalar::all(1), weight_map, add_mask); } copyMakeBorder(weight_map, weight_pyr_gauss[0], top, bottom, left, right, BORDER_CONSTANT); for (int i = 0; i < num_bands_; ++i) pyrDown(weight_pyr_gauss[i], weight_pyr_gauss[i + 1]); LOGLN(" Create the weight map Gaussian pyramid, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); #ifdef ENABLE_LOG t = getTickCount(); #endif int y_tl = tl_new.y - dst_roi_.y; int y_br = br_new.y - dst_roi_.y; int x_tl = tl_new.x - dst_roi_.x; int x_br = br_new.x - dst_roi_.x; // Add weighted layer of the source image to the final Laplacian pyramid layer for (int i = 0; i <= num_bands_; ++i) { Rect rc(x_tl, y_tl, x_br - x_tl, y_br - y_tl); #ifdef HAVE_OPENCL if ( !cv::ocl::useOpenCL() || !ocl_MultiBandBlender_feed(src_pyr_laplace[i], weight_pyr_gauss[i], dst_pyr_laplace_[i](rc), dst_band_weights_[i](rc)) ) #endif { Mat _src_pyr_laplace = src_pyr_laplace[i].getMat(ACCESS_READ); Mat _dst_pyr_laplace = dst_pyr_laplace_[i](rc).getMat(ACCESS_RW); Mat _weight_pyr_gauss = weight_pyr_gauss[i].getMat(ACCESS_READ); Mat _dst_band_weights = dst_band_weights_[i](rc).getMat(ACCESS_RW); if(weight_type_ == CV_32F) { for (int y = 0; y < rc.height; ++y) { const Point3_<short>* src_row = _src_pyr_laplace.ptr<Point3_<short> >(y); Point3_<short>* dst_row = _dst_pyr_laplace.ptr<Point3_<short> >(y); const float* weight_row = _weight_pyr_gauss.ptr<float>(y); float* dst_weight_row = _dst_band_weights.ptr<float>(y); for (int x = 0; x < rc.width; ++x) { dst_row[x].x += static_cast<short>(src_row[x].x * weight_row[x]); dst_row[x].y += static_cast<short>(src_row[x].y * weight_row[x]); dst_row[x].z += static_cast<short>(src_row[x].z * weight_row[x]); dst_weight_row[x] += weight_row[x]; } } } else // weight_type_ == CV_16S { for (int y = 0; y < y_br - y_tl; ++y) { const Point3_<short>* src_row = _src_pyr_laplace.ptr<Point3_<short> >(y); Point3_<short>* dst_row = _dst_pyr_laplace.ptr<Point3_<short> >(y); const short* weight_row = _weight_pyr_gauss.ptr<short>(y); short* dst_weight_row = _dst_band_weights.ptr<short>(y); for (int x = 0; x < x_br - x_tl; ++x) { dst_row[x].x += short((src_row[x].x * weight_row[x]) >> 8); dst_row[x].y += short((src_row[x].y * weight_row[x]) >> 8); dst_row[x].z += short((src_row[x].z * weight_row[x]) >> 8); dst_weight_row[x] += weight_row[x]; } } } } #ifdef HAVE_OPENCL else { CV_IMPL_ADD(CV_IMPL_OCL); } #endif x_tl /= 2; y_tl /= 2; x_br /= 2; y_br /= 2; } LOGLN(" Add weighted layer of the source image to the final Laplacian pyramid layer, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); }
1. 分析创建拉普拉斯金字塔源码分析
void createLaplacePyr(InputArray img, int num_levels, std::vector &pyr)
void createLaplacePyr(InputArray img, int num_levels, std::vector<UMat> &pyr) { #ifdef HAVE_TEGRA_OPTIMIZATION cv::Mat imgMat = img.getMat(); if(tegra::useTegra() && tegra::createLaplacePyr(imgMat, num_levels, pyr)) return; #endif //金字塔的大小和容量都是num_levels+1 pyr.resize(num_levels + 1); if(img.depth() == CV_8U) { if(num_levels == 0) { img.getUMat().convertTo(pyr[0], CV_16S); return; } UMat downNext; UMat current = img.getUMat(); pyrDown(img, downNext); for(int i = 1; i < num_levels; ++i) { UMat lvl_up; UMat lvl_down; pyrDown(downNext, lvl_down); pyrUp(downNext, lvl_up, current.size()); subtract(current, lvl_up, pyr[i-1], noArray(), CV_16S); current = downNext; downNext = lvl_down; } { UMat lvl_up; pyrUp(downNext, lvl_up, current.size()); subtract(current, lvl_up, pyr[num_levels-1], noArray(), CV_16S); downNext.convertTo(pyr[num_levels], CV_16S); } } else { pyr[0] = img.getUMat(); for (int i = 0; i < num_levels; ++i) pyrDown(pyr[i], pyr[i + 1]); UMat tmp; for (int i = 0; i < num_levels; ++i) { pyrUp(pyr[i + 1], tmp, pyr[i].size()); subtract(pyr[i], tmp, pyr[i]); } } }
相关文章推荐
- opencv3.1
- vs2015+opencv3.1+pcl1.4环境配置
- 搭建opencv3.1
- Ubuntu 16.04+CUDA8.0+Caffe+OpenCV3.1
- win10+Opencv3.1+VS2015
- OpenCV3.1与VS2013配置教程记录(64位win7旗舰版)
- Ubuntu16环境下安装yolo2关于opencv3.1出现的错误记录
- opencv3.1中的opencv_traincascade人脸检测训练代码分析
- 基于ubuntu16.04配置opencv3.1+python3.5
- 20140404 OpencvGPU模块 参考文献交叉引用:引用->题注 加入3.1,3.2,3.2编号
- 爆详细Ubuntu16.04,CUDA9.1,OpenCV3.1,Tensorflow完全配置指南
- ubuntu16.04 opencv3.1安装
- OpenCV 3.1 imwrite()函数写入异常问题解决方法
- ubuntu16.04 安装 openCV3.1 问题
- Opencv3.1基于Vibe去除前景
- Win10 下Cmake编译配置 Opencv3.1 + Cuda7.5 + VS2013
- opencv3.1 距离变换
- vs配置各种库(以opencv3.1为例)
- opencv3.1+vs2015配置及图像加载、修改、保存
- 在Ubuntu16.04安装OpenCV3.1并实现USB摄像头图像采集