您的位置:首页 > 移动开发 > IOS开发

IOS使用OPENCV实现物体跟踪

2014-11-11 21:14 567 查看
从opencv官网下载ios版 opencv2.framework文件,导入到xcode工程

下面是我按照camshift的源码改写的Object-c代码

导入这几个头文件,因为是要对视频进行逐帧分析,请导入视频相关的库文件,并加入下列头文件

#import <opencv2/imgproc/imgproc_c.h>

#import <opencv2/video/tracking.hpp>

#import <opencv2/objdetect/objdetect.hpp>



加入视频流监听

<AVCaptureVideoDataOutputSampleBufferDelegate>实现视频流监听的代理

以一是UIImage到OpenCV图像数据的转换,下面会调用到这几个函数

#pragma mark -

#pragma mark OpenCV Support Methods

// NOTE you SHOULD cvReleaseImage() for the return value when end of the code.
- (IplImage *)CreateIplImageFromUIImage:(UIImage *)image {
CGImageRef imageRef = image.CGImage;

CGColorSpaceRef colorSpace =CGColorSpaceCreateDeviceRGB();
IplImage *iplimage =cvCreateImage(cvSize(image.size.width,
image.size.height),IPL_DEPTH_8U,4);
CGContextRef contextRef =CGBitmapContextCreate(iplimage->imageData,
iplimage->width, iplimage->height,
iplimage->depth, iplimage->widthStep,

colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef,CGRectMake(0,0,
image.size.width, image.size.height), imageRef);

CGContextRelease(contextRef);

CGColorSpaceRelease(colorSpace);

IplImage *ret =cvCreateImage(cvGetSize(iplimage),IPL_DEPTH_8U,3);

cvCvtColor(iplimage, ret,CV_RGBA2BGR);

cvReleaseImage(&iplimage);

return ret;
}

// NOTE You should convert color mode as RGB before passing to this function
- (UIImage *)UIImageFromIplImage:(IplImage *)image {

NSLog(@"IplImage (%d, %d) %d bits by %d channels, %d bytes/row %s", image->width,
image->height, image->depth, image->nChannels,
image->widthStep, image->channelSeq);

CGColorSpaceRef colorSpace =CGColorSpaceCreateDeviceRGB();
NSData *data = [NSDatadataWithBytes:image->imageDatalength:image->imageSize];

CGDataProviderRef provider =CGDataProviderCreateWithCFData((CFDataRef)data);
CGImageRef imageRef =CGImageCreate(image->width,
image->height,
image->depth, image->depth * image->nChannels,
image->widthStep,

colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault,
provider,NULL,false,kCGRenderingIntentDefault);
UIImage *ret = [UIImageimageWithCGImage:imageRef];

CGImageRelease(imageRef);

CGDataProviderRelease(provider);

CGColorSpaceRelease(colorSpace);
return ret;
}

下面是我照着camsift改写的代码

- (void) CamShiftDetect:(UIImage *)capPic  {

NSAutoreleasePool * pool = [[NSAutoreleasePoolalloc]init];
   IplImage *image =0, *hsv = 0, *hue =0,
*mask = 0, *backproject =0, *histimg = 0;
   CvHistogram *hist =0;
   int backproject_mode =0;
   int track_object =0;
   int select_object =0;
   CvConnectedComp track_comp;
   CvRect selection;
   CvRect track_window;
   CvBox2D track_box;

   int hdims =16;
   float hranges_arr[] = {0,180};
   float* hranges = hranges_arr;
   int vmin =90, vmax =256, smin =90;

//if(imageView.image) {

cvSetErrMode(CV_ErrModeParent);

        

        /* allocate all the buffers */
       IplImage* frame =0;
        frame = [selfCreateIplImageFromUIImage:capPic];

        //NSLog(@"%d  %d" , cvGetSize(frame).width, cvGetSize(frame).height);
        image =cvCreateImage(cvGetSize(frame),8,3 );
        image->origin = frame->origin;
        hsv =cvCreateImage(cvGetSize(frame),8,3 );

        hue =cvCreateImage(cvGetSize(frame),8,1 );
        mask =cvCreateImage(cvGetSize(frame),8,1 );
        backproject =cvCreateImage(cvGetSize(frame),8,1 );
        hist =cvCreateHist(1, &hdims,CV_HIST_ARRAY,
&hranges,1 );
        histimg =cvCreateImage(cvSize(360,480),8,3 );
       cvZero( histimg );

        

        NSString *path = [[NSBundlemainBundle]pathForResource:@"target12"ofType:@"jpg"];
        IplImage *tempimage = [selfCreateIplImageFromUIImage:[UIImageimageWithContentsOfFile:path]];

cvCvtColor( tempimage, hsv,CV_BGR2HSV );
       int _vmin = vmin, _vmax = vmax;

        
       cvInRangeS( hsv,cvScalar(0,smin,MIN(_vmin,_vmax),0),
                  cvScalar(180,256,MAX(_vmin,_vmax),0),
mask );

       cvSplit( hsv, hue,0, 0, 0 );

        
        selection.x =1;
        selection.y =1;
        selection.width =360-1;
        selection.height=480-1;

        
       cvSetImageROI( hue, selection );
       cvSetImageROI( mask, selection );
       cvCalcHist( &hue, hist,0, mask );

        
       float max_val =0.f;

        
       cvGetMinMaxHistValue( hist,0, &max_val, 0,0 );
       cvConvertScale( hist->bins, hist->bins, max_val
?255. / max_val : 0.,0 );
       cvResetImageROI( hue );
       cvResetImageROI( mask );
        track_window = selection;
        track_object =1;

        

        
       cvZero( histimg );
       int bin_w = histimg->width / hdims;
       for(int i =0; i < hdims; i++ )
        {
           int val =cvRound(cvGetReal1D(hist->bins,i)*histimg->height/255 );
           CvScalar color =hsv2rgb(i*180.f/hdims);
           cvRectangle( histimg,cvPoint(i*bin_w,histimg->height),
                       cvPoint((i+1)*bin_w,histimg->height -
val),
                        color, -1,8,0 );
        }

        
       cvReleaseImage(&tempimage);
       cvCopy( frame, image,0 );
       cvCvtColor( image, hsv,CV_BGR2HSV );

       if( track_object )
        {
           int _vmin = vmin, _vmax = vmax;

            
           cvInRangeS( hsv,cvScalar(0,smin,MIN(_vmin,_vmax),0),
                      cvScalar(180,256,MAX(_vmin,_vmax),0),
mask );
           cvSplit( hsv, hue,0, 0, 0 );

            
           cvCalcBackProject( &hue, backproject, hist );
           cvAnd( backproject, mask, backproject,0 );

            
           cvCamShift( backproject, track_window,cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1),&track_comp,
&track_box );
            track_window = track_comp.rect;

            
           if( backproject_mode )
               cvCvtColor( backproject, image,CV_GRAY2BGR );
           if( image->origin )
                track_box.angle = -track_box.angle;
           cvEllipseBox( image, track_box,CV_RGB(255,0,0),3,CV_AA, 0 );

            // Create canvas to show the results
           CGImageRef imageRef =imageView.image.CGImage;
           CGColorSpaceRef colorSpace =CGColorSpaceCreateDeviceRGB();

            CGContextRef contextRef =CGBitmapContextCreate(NULL,imageView.image.size.width,imageView.image.size.height,
                                                           8,imageView.image.size.width *4,
                                                            colorSpace,kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
           CGContextDrawImage(contextRef,CGRectMake(0,0,imageView.image.size.width,imageView.image.size.height),
imageRef);

            
           CGContextSetLineWidth(contextRef,4);
           CGContextSetRGBStrokeColor(contextRef,0.0, 0.0, 1.0,0.5);
           

            // Draw results on the iamge

        
       NSLog(@" %d \n %d\n %d \n %d",track_window.x,track_window.y,track_window.width,track_window.height);
           NSLog(@"box %@",NSStringFromCGRect(CGRectMake(track_box.center.x,track_box.center.y,track_box.size.width,track_box.size.height)));
            [selfperformSelectorInBackground:@selector(draw1:)withObject:NSStringFromCGRect(CGRectMake(360-track_box.center.y,track_box.center.x,track_box.size.width,track_box.size.height))];
        }

        
       if( select_object && selection.width >0 &&
selection.height >0 )
        {
           cvSetImageROI( image, selection );
           cvXorS( image,cvScalarAll(255), image,0 );
           cvResetImageROI( image );
        }

        [selfhideProgressIndicator];

[poolrelease];
}

在这里调用这个方法 

- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
       fromConnection:(AVCaptureConnection *)connection
{

    [selfCamShiftDetect:[selfimageFromSampleBuffer:sampleBuffer]];、 
      
}

//添加并启动被捉视频,

- (void)setupCaptureSession
{
   NSError *error =nil;

    

    // Create the session

    AVCaptureSession *session = [[[AVCaptureSessionalloc]init] autorelease];

    

    // Configure the session to produce lower resolution video frames, if your

    // processing algorithm can cope. We'll specify medium quality for the

    // chosen device.

    session.sessionPreset =AVCaptureSessionPresetMedium;

    

    // Find a suitable AVCaptureDevice

    AVCaptureDevice *device = [AVCaptureDevice
                              defaultDeviceWithMediaType:AVMediaTypeVideo];//这里默认是使用后置摄像头,你可以改成前置摄像头

    

    // Create a device input with the device and add it to the session.

    AVCaptureDeviceInput *input = [AVCaptureDeviceInputdeviceInputWithDevice:device
                                                                       error:&error];
   if (!input) {

        // Handling the error appropriately.
    }
    [sessionaddInput:input];

    

    // Create a VideoDataOutput and add it to the session

    AVCaptureVideoDataOutput *output = [[[AVCaptureVideoDataOutputalloc]init] autorelease];
    [sessionaddOutput:output];

    

    // Configure your output.

    dispatch_queue_t queue =dispatch_queue_create("myQueue",NULL);

    [output setSampleBufferDelegate:selfqueue:queue];

    dispatch_release(queue);

    

    // Specify the pixel format

    output.videoSettings = [NSDictionarydictionaryWithObjectsAndKeys:

                            [NSNumbernumberWithInt:kCVPixelFormatType_32BGRA],kCVPixelBufferPixelFormatTypeKey,
                            [NSNumbernumberWithInt:360],
(id)kCVPixelBufferWidthKey,
                            [NSNumbernumberWithInt:480],
(id)kCVPixelBufferHeightKey,
                           nil];

    

    AVCaptureVideoPreviewLayer* preLayer = [AVCaptureVideoPreviewLayerlayerWithSession:
session];

    //preLayer = [AVCaptureVideoPreviewLayer layerWithSession:session];
    preLayer.frame =CGRectMake(0,0,360, 480);

    preLayer.videoGravity =AVLayerVideoGravityResizeAspectFill;
    [self.view.layeraddSublayer:preLayer];

    //[self.view addSubview:self.imageView];

    // If you wish to cap the frame rate to a known value, such as 15 fps, set

    // minFrameDuration.
    output.minFrameDuration =CMTimeMake(1,1);

    

    // Start the session running to start the flow of data
    [sessionstartRunning];

    

    // Assign session to an ivar.

    //[self setSession:session];
}

代码比较乱没有整理,最后截取要跟踪的物体的小一块,做为目标图片,启动程序就会自动跟踪这个体物体了,并在物体的位置画个圈标记这个物体,速度比较慢,可能是手机处理视频会很慢吧
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  ios CamShift 物体追踪