opencv之运动目标动作分割
2013-04-29 15:25
204 查看
本文所需要了解的库函数有:
如果 arr2 为空(NULL)
norm = ||arr1||C = maxI abs(arr1(I)), 如果 normType = CV_C
norm = ||arr1||L1 = sumI abs(arr1(I)), 如果 normType = CV_L1
norm = ||arr1||L2 = sqrt( sumI arr1(I)2), 如果 normType = CV_L2
/* types of array norm */
#define CV_C 1
#define CV_L1 2
#define CV_L2 4
#define CV_NORM_MASK 7
#define CV_RELATIVE 8
#define CV_DIFF 16
#define CV_MINMAX 32
#define CV_DIFF_C (CV_DIFF | CV_C)
#define CV_DIFF_L1 (CV_DIFF | CV_L1)
#define CV_DIFF_L2 (CV_DIFF | CV_L2)
#define CV_RELATIVE_C (CV_RELATIVE | CV_C)
#define CV_RELATIVE_L1 (CV_RELATIVE | CV_L1)
#define CV_RELATIVE_L2 (CV_RELATIVE | CV_L2)
更多参考:http://www.opencv.org.cn/index.php/Cxcore%E6%95%B0%E7%BB%84%E6%93%8D%E4%BD%9C
对于上式,有个数学概念要明确:(范数)
当p取1,2,∞的时候分别是以下几种最简单的情形:
1-范数:║x║1=│x1│+│x2│+…+│xn│
2-范数:║x║2=(│x1│^2+│x2│^2+…+│xn│^2)^1/2
∞-范数:║x║∞=max(│x1│,│x2│,…,│xn│)
其中2-范数就是通常意义下的距离。
void cvAbsDiff( const CvArr* src1, const CvArr* src2, CvArr* dst );//计算两个数组差的绝对值
void cvUpdateMotionHistory( const CvArr* silhouette, CvArr* mhi, double timestamp, double duration );去掉影像(silhouette) 以更新运动历史图像
void cvCalcMotionGradient( const CvArr* mhi, CvArr* mask, CvArr* orientation, double delta1, double delta2, int aperture_size=3 );计算运动历史图像的梯度方向
CvSeq* cvSegmentMotion( const CvArr* mhi, CvArr* seg_mask, CvMemStorage* storage, double timestamp, double seg_thresh );将整个运动分割为独立的运动部分,返回独立的CvConnectedComp运动部件
double cvCalcGlobalOrientation( const CvArr* orientation, const CvArr* mask, const CvArr* mhi, double timestamp, double duration );计算某些选择区域的全局运动方向
double cvNorm( const CvArr* arr1, const CvArr* arr2=NULL, int norm_type=CV_L2, const CvArr* mask=NULL );//计算范数
如果 arr2 为空(NULL)
norm = ||arr1||C = maxI abs(arr1(I)), 如果 normType = CV_C
norm = ||arr1||L1 = sumI abs(arr1(I)), 如果 normType = CV_L1
norm = ||arr1||L2 = sqrt( sumI arr1(I)2), 如果 normType = CV_L2
/* types of array norm */
#define CV_C 1
#define CV_L1 2
#define CV_L2 4
#define CV_NORM_MASK 7
#define CV_RELATIVE 8
#define CV_DIFF 16
#define CV_MINMAX 32
#define CV_DIFF_C (CV_DIFF | CV_C)
#define CV_DIFF_L1 (CV_DIFF | CV_L1)
#define CV_DIFF_L2 (CV_DIFF | CV_L2)
#define CV_RELATIVE_C (CV_RELATIVE | CV_C)
#define CV_RELATIVE_L1 (CV_RELATIVE | CV_L1)
#define CV_RELATIVE_L2 (CV_RELATIVE | CV_L2)
更多参考:http://www.opencv.org.cn/index.php/Cxcore%E6%95%B0%E7%BB%84%E6%93%8D%E4%BD%9C
对于上式,有个数学概念要明确:(范数)
当p取1,2,∞的时候分别是以下几种最简单的情形:
1-范数:║x║1=│x1│+│x2│+…+│xn│
2-范数:║x║2=(│x1│^2+│x2│^2+…+│xn│^2)^1/2
∞-范数:║x║∞=max(│x1│,│x2│,…,│xn│)
其中2-范数就是通常意义下的距离。
#pragma comment(lib,"cv.lib") #pragma comment(lib,"cvaux.lib") #pragma comment(lib,"highgui.lib") #pragma comment(lib,"cxcore.lib") #include<cv.h> #include<highgui.h> #include<stdio.h> #include<time.h> #include"Camer.h" IplImage* frame;//原始图像 IplImage* dist;//最终的显示图像 IplImage* pre=NULL;//前一张图像 IplImage* gray;//灰度图 IplImage* diff;//差值图 IplImage *mhi = 0; // 运动历史图 IplImage *orient = 0; // 方向图 IplImage *mask = 0; //掩码图 IplImage *segmask = 0; CvMemStorage* storage = 0; // 内存仓 const char* windowname= "演示"; void Process( IplImage* img) { double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds CvSize size = cvSize(img->width,img->height); // get current frame size cvCvtColor(img,gray,CV_BGR2GRAY);//灰度转换 cvAbsDiff(gray,pre,diff);//计算差值 cvCopy( gray,pre);//拷贝数据 便于下次使用 cvThreshold( diff, diff, 30, 1, CV_THRESH_BINARY ); // 二值化 cvUpdateMotionHistory( diff, mhi, timestamp, 1 ); // 运动历史图 // 转换MHI到8U cvCvtScale( mhi, mask, 255,(1 - timestamp)*255 ); cvZero( dist ); cvCvtPlaneToPix( mask, 0, 0, 0, dist ); // 计算运动的方向图 cvCalcMotionGradient( mhi, mask, orient, 0.5, 0.05, 3 ); //分割运动到独立的部分 cvClearMemStorage(storage); CvSeq *seq = cvSegmentMotion( mhi, segmask, storage, timestamp, 1 ); //获取所有的运动组件 for(int i = -1; i < seq->total; i++ ) { CvRect comp_rect; CvScalar color; double magnitude; if( i < 0 ) { // 整张图片的运动 comp_rect = cvRect( 0, 0, size.width, size.height ); color = CV_RGB(255,255,255); magnitude = 100; } else { //单独的运动部件 comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect; if( comp_rect.width + comp_rect.height < 100 ) // reject very small components continue; color = CV_RGB(255,0,0); magnitude = 30; } //设置ROI cvSetImageROI( diff, comp_rect ); cvSetImageROI( mhi, comp_rect ); cvSetImageROI( orient, comp_rect ); cvSetImageROI( mask, comp_rect ); // 计算方向 double angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, 1); angle = 360.0 - angle; // adjust for images with top-left origin double count = cvNorm( diff, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI //重设ROI cvResetImageROI( mhi ); cvResetImageROI( orient ); cvResetImageROI( mask ); cvResetImageROI( diff ); // check for the case of little motion if( count < comp_rect.width*comp_rect.height * 0.05 ) continue; // draw a clock with arrow indicating the direction CvPoint center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); cvCircle( dist, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( dist, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); } } int main() { CCameraDS camer; camer.OpenCamera(0,false); //打开摄像头 frame = camer.QueryFrame(); if(frame==NULL) { return 1; } //创建相应的变量 pre= cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1); //创建前一张显示图像的存储空间 dist = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3); //创建显示图像 gray=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);//灰度图像 diff=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);//差值图像 mhi = cvCreateImage( cvGetSize(frame), IPL_DEPTH_32F, 1 ); orient = cvCreateImage( cvGetSize(frame), IPL_DEPTH_32F, 1 ); segmask = cvCreateImage( cvGetSize(frame), IPL_DEPTH_32F, 1 ); mask = cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 ); storage = cvCreateMemStorage(0); cvNamedWindow(windowname,1); while(true) { frame = camer.QueryFrame(); if(!frame) break; Process(frame);//处理图像 cvShowImage(windowname,dist); if(cvWaitKey(10)>=0) break; } cvReleaseImage(&dist); cvReleaseImage(&pre); cvReleaseImage(&gray); cvReleaseImage(&diff); cvReleaseImage(&mhi); cvReleaseImage(&orient); cvReleaseImage(&segmask); cvReleaseImage(&mask); cvReleaseMemStorage(&storage); cvDestroyWindow(windowname); return 0; }
相关文章推荐
- 视频运动目标跟踪,基于opencv , vc++
- 基于像素自适应分割的运动目标检测算法
- 光流法-运动目标的检测二(opencv学习)
- OpenCV运动目标检测——帧间差,混合高斯模型方法
- 运动目标分割--区域生长
- 利用MOG2背景模型提取运动目标的OpenCV代码
- OpenCV实现静止背景下运动目标的检测
- OpenCV_基于混合高斯模型GMM的运动目标检测
- 基于像素自适应分割的运动目标检测算法
- 《OpenCV 3计算机视觉:Python语言实现》学习笔记——目标跟踪中基本运动检测的思考
- OpenCV_基于混合高斯模型GMM的运动目标检测
- OpenCV学习笔记(二十八)——光流法对运动目标跟踪Video
- 运动目标的背景建模-混合高斯背景建模和KNN模型建模的OpenCV代码实现
- OpenCV实现静止背景下运动目标的检测
- opencv 运动目标检测
- 【OpenCV】OpenCV3的第二天——光流法(Optial Flow)运动目标检测
- 【Python+OpenCV】目标跟踪-实现基本的运动检测
- OpenCV实现静止背景下运动目标的检测
- OpenCV_基于混合高斯模型GMM的运动目标检测
- 基于opencv的L-K光流法跟踪运动目标