您的位置:首页 > 其它

视觉测量与三维重建(二)

2015-03-10 10:33 344 查看
//生成棋盘。
#include"cv.h"
#include"highgui.h"
#define numx 9
#define numy 9
#define size 200
#define filename "chessboard.jpg"
void main()
{
IplImage* image=cvCreateImage(cvSize(numx*size,numy*size),8,1);
///生成黑白条
for(int i=0;i<image->height;i++)
{
uchar* data=(uchar*)image->imageData+image->widthStep*i;
for(int j=0;j<image->width;j++)
{
if((j/size)%2==1)
*data=255;
else
*data=0;
data++;
}
}
//生成棋盘格
for(int i=0;i<image->height;i++)
{
if((i/size)%2==1)
{
uchar* data=(uchar*)image->imageData+image->widthStep*i;
for(int j=0;j<image->width;j++)
{
*data=255-*data;
data++;
}
}
}
cvSaveImage(filename,image);
}

OpenCV

samples目录下有一些源代码,包括标定,立体标定,立体匹配等等。最近弄了弄,放在这,希望能对新手有所帮助。

之前那篇帖子有人讨论到底用什么硬件,我暂时用的USB摄像头。

我的条件相当艰苦,现在刚开始学,标定用的棋盘格也是自己用纸打印了贴在木板上的,精度低点,但凑合着可以弄弄,相信最开始学这个的童鞋也都是用自己打印的棋盘格。

//单目标定
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#include <string.h>
#include <time.h>
// example command line (for copy-n-paste):
// calibration -w 6 -h 8 -s 2 -n 10 -o camera.yml -op -oe [<list_of_views.txt>]
/* The list of views may look as following (discard the starting and ending ------ separators):
-------------------
view000.png
view001.png
#view002.png
view003.png
view010.png
one_extra_view.jpg
-------------------
that is, the file will contain 6 lines, view002.png will not be used for calibration,
other ones will be (those, in which the chessboard pattern will be found)
*/
enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
double compute_reprojection_error( const CvMat* object_points,
const CvMat* rot_vects, const CvMat* trans_vects,
const CvMat* camera_matrix, const CvMat* dist_coeffs,
const CvMat* image_points, const CvMat* point_counts,
CvMat* per_view_errors )
{
CvMat* image_points2 = cvCreateMat( image_points->rows,
image_points->cols, image_points->type );
int i, image_count = rot_vects->rows, points_so_far = 0;
double total_err = 0, err;

for( i = 0; i < image_count; i++ )
{
CvMat object_points_i, image_points_i, image_points2_i;
int point_count = point_counts->data.i[i];
CvMat rot_vect, trans_vect;
cvGetCols( object_points, &object_points_i,
points_so_far, points_so_far + point_count );
cvGetCols( image_points, &image_points_i,
points_so_far, points_so_far + point_count );
cvGetCols( image_points2, &image_points2_i,
points_so_far, points_so_far + point_count );
points_so_far += point_count;
cvGetRow( rot_vects, &rot_vect, i );
cvGetRow( trans_vects, &trans_vect, i );
cvProjectPoints2( &object_points_i, &rot_vect, &trans_vect,
camera_matrix, dist_coeffs, &image_points2_i,
0, 0, 0, 0, 0 );
err = cvNorm( &image_points_i, &image_points2_i, CV_L1 );
if( per_view_errors )
per_view_errors->data.db[i] = err/point_count;
total_err += err;
}

cvReleaseMat( &image_points2 );
return total_err/points_so_far;
}
int run_calibration( CvSeq* image_points_seq, CvSize img_size, CvSize board_size,
float square_size, float aspect_ratio, int flags,
CvMat* camera_matrix, CvMat* dist_coeffs, CvMat** extr_params,
CvMat** reproj_errs, double* avg_reproj_err )
{
int code;
int image_count = image_points_seq->total;
int point_count = board_size.width*board_size.height;
CvMat* image_points = cvCreateMat( 1, image_count*point_count, CV_32FC2 );
CvMat* object_points = cvCreateMat( 1, image_count*point_count, CV_32FC3 );
CvMat* point_counts = cvCreateMat( 1, image_count, CV_32SC1 );
CvMat rot_vects, trans_vects;
int i, j, k;
CvSeqReader reader;
cvStartReadSeq( image_points_seq, &reader );
// initialize arrays of points
for( i = 0; i < image_count; i++ )
{
CvPoint2D32f* src_img_pt = (CvPoint2D32f*)reader.ptr;
CvPoint2D32f* dst_img_pt = ((CvPoint2D32f*)image_points->data.fl) + i*point_count;
CvPoint3D32f* obj_pt = ((CvPoint3D32f*)object_points->data.fl) + i*point_count;
for( j = 0; j < board_size.height; j++ )
for( k = 0; k < board_size.width; k++ )
{
*obj_pt++ = cvPoint3D32f(j*square_size, k*square_size, 0);
*dst_img_pt++ = *src_img_pt++;
}
CV_NEXT_SEQ_ELEM( image_points_seq->elem_size, reader );
}
cvSet( point_counts, cvScalar(point_count) );
*extr_params = cvCreateMat( image_count, 6, CV_32FC1 );
cvGetCols( *extr_params, &rot_vects, 0, 3 );
cvGetCols( *extr_params, &trans_vects, 3, 6 );
cvZero( camera_matrix );
cvZero( dist_coeffs );
if( flags & CV_CALIB_FIX_ASPECT_RATIO )
{
camera_matrix->data.db[0] = aspect_ratio;
camera_matrix->data.db[4] = 1.;
}
cvCalibrateCamera2( object_points, image_points, point_counts,
img_size, camera_matrix, dist_coeffs,
&rot_vects, &trans_vects, flags );
code = cvCheckArr( camera_matrix, CV_CHECK_QUIET ) &&
cvCheckArr( dist_coeffs, CV_CHECK_QUIET ) &&
cvCheckArr( *extr_params, CV_CHECK_QUIET );
*reproj_errs = cvCreateMat( 1, image_count, CV_64FC1 );
*avg_reproj_err =
compute_reprojection_error( object_points, &rot_vects, &trans_vects,
camera_matrix, dist_coeffs, image_points, point_counts, *reproj_errs );
cvReleaseMat( &object_points );
cvReleaseMat( &image_points );
cvReleaseMat( &point_counts );
return code;
}
void save_camera_params( const char* out_filename, int image_count, CvSize img_size,
CvSize board_size, float square_size,
float aspect_ratio, int flags,
const CvMat* camera_matrix, CvMat* dist_coeffs,
const CvMat* extr_params, const CvSeq* image_points_seq,
const CvMat* reproj_errs, double avg_reproj_err )
{
CvFileStorage* fs = cvOpenFileStorage( out_filename, 0, CV_STORAGE_WRITE );

time_t t;
time( &t );
struct tm *t2 = localtime( &t );
char buf[1024];
strftime( buf, sizeof(buf)-1, "%c", t2 );
cvWriteString( fs, "calibration_time", buf );

cvWriteInt( fs, "image_count", image_count );
cvWriteInt( fs, "image_width", img_size.width );
cvWriteInt( fs, "image_height", img_size.height );
cvWriteInt( fs, "board_width", board_size.width );
cvWriteInt( fs, "board_height", board_size.height );
cvWriteReal( fs, "square_size", square_size );

if( flags & CV_CALIB_FIX_ASPECT_RATIO )
cvWriteReal( fs, "aspect_ratio", aspect_ratio );
if( flags != 0 )
{
sprintf( buf, "flags: %s%s%s%s",
flags & CV_CALIB_USE_INTRINSIC_GUESS ? "+use_intrinsic_guess" : "",
flags & CV_CALIB_FIX_ASPECT_RATIO ? "+fix_aspect_ratio" : "",
flags & CV_CALIB_FIX_PRINCIPAL_POINT ? "+fix_principal_point" : "",
flags & CV_CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : "" );
cvWriteComment( fs, buf, 0 );
}

cvWriteInt( fs, "flags", flags );
cvWrite( fs, "camera_matrix", camera_matrix );
cvWrite( fs, "distortion_coefficients", dist_coeffs );
cvWriteReal( fs, "avg_reprojection_error", avg_reproj_err );
if( reproj_errs )
cvWrite( fs, "per_view_reprojection_errors", reproj_errs );
if( extr_params )
{
cvWriteComment( fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
cvWrite( fs, "extrinsic_parameters", extr_params );
}
if( image_points_seq )
{
cvWriteComment( fs, "the array of board corners projections used for calibration", 0 );
assert( image_points_seq->total == image_count );
CvMat* image_points = cvCreateMat( 1, image_count*board_size.width*board_size.height, CV_32FC2 );
cvCvtSeqToArray( image_points_seq, image_points->data.fl );
cvWrite( fs, "image_points", image_points );
cvReleaseMat( &image_points );
}
cvReleaseFileStorage( &fs );
}
int main( int argc, char** argv )
{
CvSize board_size = {0,0};
float square_size = 1.f, aspect_ratio = 1.f;
const char* out_filename = "out_camera_data.yml";
const char* input_filename = 0;
int i, image_count = 10;
int write_extrinsics = 0, write_points = 0;
int flags = 0;
CvCapture* capture = 0;
FILE* f = 0;
char imagename[1024];
CvMemStorage* storage;
CvSeq* image_points_seq = 0;
int elem_size, flip_vertical = 0;
int delay = 1000;
clock_t prev_timestamp = 0;
CvPoint2D32f* image_points_buf = 0;
CvFont font = cvFont( 1, 1 );
double _camera[9], _dist_coeffs[4];
CvMat camera = cvMat( 3, 3, CV_64F, _camera );
CvMat dist_coeffs = cvMat( 1, 4, CV_64F, _dist_coeffs );
CvMat *extr_params = 0, *reproj_errs = 0;
double avg_reproj_err = 0;
int mode = DETECTION;
int undistort_image = 0;
CvSize img_size = {0,0};
const char* live_capture_help =
"When the live video from camera is used as input, the following hot-keys may be used:\n"
"  <ESC>, 'q' - quit the program\n"
"  'g' - start capturing images\n"
"  'u' - switch undistortion on/off\n";
if( argc < 2 )
{
printf( "This is a camera calibration sample.\n"
"Usage: calibration\n"
"     -w <board_width>         # the number of inner corners per one of board dimension\n"
"     -h <board_height>        # the number of inner corners per another board dimension\n"
"     [-n <number_of_frames>]  # the number of frames to use for calibration\n"
"                              # (if not specified, it will be set to the number\n"
"                              #  of board views actually available)\n"
"     [-d <delay>]             # a minimum delay in ms between subsequent attempts to capture a next view\n"
"                              # (used only for video capturing)\n"
"     [-s <square_size>]       # square size in some user-defined units (1 by default)\n"
"     [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
"     [-op]                    # write detected feature points\n"
"     [-oe]                    # write extrinsic parameters\n"
"     [-zt]                    # assume zero tangential distortion\n"
"     [-a <aspect_ratio>]      # fix aspect ratio (fx/fy)\n"
"     [-p]                     # fix the principal point at the center\n"
"     [-v]                     # flip the captured images around the horizontal axis\n"
"     [input_data]             # input data, one of the following:\n"
"                              #  - text file with a list of the images of the board\n"
"                              #  - name of video file with a video of the board\n"
"                              # if input_data not specified, a live view from the camera is used\n"
"\n" );
printf( "%s", live_capture_help );
return 0;
}
for( i = 1; i < argc; i++ )
{
const char* s = argv[i];
if( strcmp( s, "-w" ) == 0 )
{
if( sscanf( argv[++i], "%u", &board_size.width ) != 1 || board_size.width <= 0 )
return fprintf( stderr, "Invalid board width\n" ), -1;
}
else if( strcmp( s, "-h" ) == 0 )
{
if( sscanf( argv[++i], "%u", &board_size.height ) != 1 || board_size.height <= 0 )
return fprintf( stderr, "Invalid board height\n" ), -1;
}
else if( strcmp( s, "-s" ) == 0 )
{
if( sscanf( argv[++i], "%f", &square_size ) != 1 || square_size <= 0 )
return fprintf( stderr, "Invalid board square width\n" ), -1;
}
else if( strcmp( s, "-n" ) == 0 )
{
if( sscanf( argv[++i], "%u", &image_count ) != 1 || image_count <= 3 )
return printf("Invalid number of images\n" ), -1;
}
else if( strcmp( s, "-a" ) == 0 )
{
if( sscanf( argv[++i], "%f", &aspect_ratio ) != 1 || aspect_ratio <= 0 )
return printf("Invalid aspect ratio\n" ), -1;
}
else if( strcmp( s, "-d" ) == 0 )
{
if( sscanf( argv[++i], "%u", &delay ) != 1 || delay <= 0 )
return printf("Invalid delay\n" ), -1;
}
else if( strcmp( s, "-op" ) == 0 )
{
write_points = 1;
}
else if( strcmp( s, "-oe" ) == 0 )
{
write_extrinsics = 1;
}
else if( strcmp( s, "-zt" ) == 0 )
{
flags |= CV_CALIB_ZERO_TANGENT_DIST;
}
else if( strcmp( s, "-p" ) == 0 )
{
flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
}
else if( strcmp( s, "-v" ) == 0 )
{
flip_vertical = 1;
}
else if( strcmp( s, "-o" ) == 0 )
{
out_filename = argv[++i];
}
else if( s[0] != '-' )
input_filename = s;
else
return fprintf( stderr, "Unknown option %s", s ), -1;
}
if( input_filename )
{
capture = cvCreateFileCapture( input_filename );
if( !capture )
{
f = fopen( input_filename, "rt" );
if( !f )
return fprintf( stderr, "The input file could not be opened\n" ), -1;
image_count = -1;
}
mode = CAPTURING;
}
else
capture = cvCreateCameraCapture(0);
if( !capture && !f )
return fprintf( stderr, "Could not initialize video capture\n" ), -2;
if( capture )
printf( "%s", live_capture_help );
elem_size = board_size.width*board_size.height*sizeof(image_points_buf[0]);
storage = cvCreateMemStorage( MAX( elem_size*4, 1 << 16 ));
image_points_buf = (CvPoint2D32f*)cvAlloc( elem_size );
image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );
cvNamedWindow( "Image View", 1 );
for(;;)
{
IplImage *view = 0, *view_gray = 0;
int count = 0, found, blink = 0;
CvPoint text_origin;
CvSize text_size = {0,0};
int base_line = 0;
char s[100];
int key;

if( f && fgets( imagename, sizeof(imagename)-2, f ))
{
int l = strlen(imagename);
if( l > 0 && imagename[l-1] == '\n' )
imagename[--l] = '\0';
if( l > 0 )
{
if( imagename[0] == '#' )
continue;
view = cvLoadImage( imagename, 1 );
}
}
else if( capture )
{
IplImage* view0 = cvQueryFrame( capture );
if( view0 )
{
view = cvCreateImage( cvGetSize(view0), IPL_DEPTH_8U, view0->nChannels );
if( view0->origin == IPL_ORIGIN_BL )
cvFlip( view0, view, 0 );
else
cvCopy( view0, view );
}
}
if( !view )
{
if( image_points_seq->total > 0 )
{
image_count = image_points_seq->total;
goto calibrate;
}
break;
}
if( flip_vertical )
cvFlip( view, view, 0 );
img_size = cvGetSize(view);
found = cvFindChessboardCorners( view, board_size,
image_points_buf, &count, CV_CALIB_CB_ADAPTIVE_THRESH );
#if 1
// improve the found corners' coordinate accuracy
view_gray = cvCreateImage( cvGetSize(view), 8, 1 );
cvCvtColor( view, view_gray, CV_BGR2GRAY );
cvFindCornerSubPix( view_gray, image_points_buf, count, cvSize(11,11),
cvSize(-1,-1), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
cvReleaseImage( &view_gray );
#endif
if( mode == CAPTURING && found && (f || clock() - prev_timestamp > delay*1e-3*CLOCKS_PER_SEC) )
{
cvSeqPush( image_points_seq, image_points_buf );
prev_timestamp = clock();
blink = !f;
#if 1
if( capture )
{
sprintf( imagename, "view%03d.png", image_points_seq->total - 1 );
cvSaveImage( imagename, view );
}
#endif
}
cvDrawChessboardCorners( view, board_size, image_points_buf, count, found );
cvGetTextSize( "100/100", &font, &text_size, &base_line );
text_origin.x = view->width - text_size.width - 10;
text_origin.y = view->height - base_line - 10;
if( mode == CAPTURING )
{
if( image_count > 0 )
sprintf( s, "%d/%d", image_points_seq ? image_points_seq->total : 0, image_count );
else
sprintf( s, "%d/?", image_points_seq ? image_points_seq->total : 0 );
}
else if( mode == CALIBRATED )
sprintf( s, "Calibrated" );
else
sprintf( s, "Press 'g' to start" );
cvPutText( view, s, text_origin, &font, mode != CALIBRATED ?
CV_RGB(255,0,0) : CV_RGB(0,255,0));
if( blink )
cvNot( view, view );
if( mode == CALIBRATED && undistort_image )
{
IplImage* t = cvCloneImage( view );
cvUndistort2( t, view, &camera, &dist_coeffs );
cvReleaseImage( &t );
}
cvShowImage( "Image View", view );
key = cvWaitKey(capture ? 50 : 500);
if( key == 27 )
break;

if( key == 'u' && mode == CALIBRATED )
undistort_image = !undistort_image;
if( capture && key == 'g' )
{
mode = CAPTURING;
cvClearMemStorage( storage );
image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );
}
if( mode == CAPTURING && (unsigned)image_points_seq->total >= (unsigned)image_count )
{
calibrate:
cvReleaseMat( &extr_params );
cvReleaseMat( &reproj_errs );
int code = run_calibration( image_points_seq, img_size, board_size,
square_size, aspect_ratio, flags, &camera, &dist_coeffs, &extr_params,
&reproj_errs, &avg_reproj_err );
// save camera parameters in any case, to catch Inf's/NaN's
save_camera_params( out_filename, image_count, img_size,
board_size, square_size, aspect_ratio, flags,
&camera, &dist_coeffs, write_extrinsics ? extr_params : 0,
write_points ? image_points_seq : 0, reproj_errs, avg_reproj_err );
if( code )
mode = CALIBRATED;
else
mode = DETECTION;
}
if( !view )
break;
cvReleaseImage( &view );
}
if( capture )
cvReleaseCapture( &capture );
if( storage )
cvReleaseMemStorage( &storage );
return 0;
}
如果使用上面一段代码生成的棋盘格,这里可以用这样的调用方式  calibration -w 8 -h 8 -s 2 -n 10 -o camera.yml -op -oe -p

调试时在Property->Configuration Properties->Debugging->Command Arguments 添加-w 8 -h 8 -s 2 -n 10 -o camera.yml -op -oe -p

//第三段代码是立体标定的,我稍微改了一下,让它直接从两个摄像头中读取图像来处理
//!  邮箱:yang3kui@gmail.com
#include "cv.h"
#include "highgui.h"
#include<iostream>
using namespace std;
/* This is sample from the OpenCV book. The copyright notice is below */
/* *************** License:**************************
Oct. 3, 2008
Right to use this code in any way you want without warrenty, support or any guarentee of it working.
BOOK: It would be nice if you cited it:
Learning OpenCV: Computer Vision with the OpenCV Library
by Gary Bradski and Adrian Kaehler
Published by O'Reilly Media, October 3, 2008
AVAILABLE AT: http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134 Or: http://oreilly.com/catalog/9780596516130/ ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
OTHER OPENCV SITES:
* The source code is on sourceforge at: http://sourceforge.net/projects/opencvlibrary/ * The OpenCV wiki page (As of Oct 1, 2008 this is down for changing over servers, but should come back): http://opencvlibrary.sourceforge.net/ * An active user group is at: http://tech.groups.yahoo.com/group/OpenCV/ * The minutes of weekly OpenCV development meetings are at: http://pr.willowgarage.com/wiki/OpenCV ************************************************** */
#undef _GLIBCXX_DEBUG
#include "cv.h"
#include "cxmisc.h"
#include "highgui.h"
#include <vector>
#include <string>
#include <algorithm>
#include <stdio.h>
#include <ctype.h>
using namespace std;
//
// Given a list of chessboard images, the number of corners (nx, ny)
// on the chessboards, and a flag: useCalibrated for calibrated (0) or
// uncalibrated (1: use cvStereoCalibrate(), 2: compute fundamental
// matrix separately) stereo. Calibrate the cameras and display the
// rectified results along with the computed disparity images.
//
static void
StereoCalib(const char* imageList, int useUncalibrated)
{
CvRect roi1, roi2;
int nx = 0, ny = 0;
int displayCorners = 1;
int showUndistorted = 1;
bool isVerticalStereo = false;//OpenCV can handle left-right
//or up-down camera arrangements
const int maxScale = 1;
const float squareSize = 1.f; //Set this to your actual square size
FILE* f = fopen(imageList, "rt");
int i, j, lr, nframes = 0, n, N = 0;
vector<string> imageNames[2];
vector<CvPoint3D32f> objectPoints;
vector<CvPoint2D32f> points[2];
vector<CvPoint2D32f> temp_points[2];
vector<int> npoints;
//    vector<uchar> active[2];
int is_found[2] = {0, 0};
vector<CvPoint2D32f> temp;
CvSize imageSize = {0,0};
// ARRAY AND VECTOR STORAGE:
double M1[3][3], M2[3][3], D1[5], D2[5];
double R[3][3], T[3], E[3][3], F[3][3];
double Q[4][4];
CvMat _M1 = cvMat(3, 3, CV_64F, M1 );
CvMat _M2 = cvMat(3, 3, CV_64F, M2 );
CvMat _D1 = cvMat(1, 5, CV_64F, D1 );
CvMat _D2 = cvMat(1, 5, CV_64F, D2 );
CvMat matR = cvMat(3, 3, CV_64F, R );
CvMat matT = cvMat(3, 1, CV_64F, T );
CvMat matE = cvMat(3, 3, CV_64F, E );
CvMat matF = cvMat(3, 3, CV_64F, F );
CvMat matQ = cvMat(4, 4, CV_64FC1, Q);
char buf[1024];
if( displayCorners )
cvNamedWindow( "corners", 1 );
// READ IN THE LIST OF CHESSBOARDS:
if( !f )
{
fprintf(stderr, "can not open file %s\n", imageList );
return;
}
if( !fgets(buf, sizeof(buf)-3, f) || sscanf(buf, "%d%d", &nx, &ny) != 2 )
return;
n = nx*ny;
temp.resize(n);
temp_points[0].resize(n);
temp_points[1].resize(n);
for(i=0;;i++)
{
int count = 0, result=0;
lr = i % 2;
vector<CvPoint2D32f>& pts = temp_points[lr];//points[lr];
if( !fgets( buf, sizeof(buf)-3, f ))
break;
size_t len = strlen(buf);
while( len > 0 && isspace(buf[len-1]))
buf[--len] = '\0';
if( buf[0] == '#')
continue;
IplImage* img = cvLoadImage( buf, 0 );
if( !img )
break;
imageSize = cvGetSize(img);
imageNames[lr].push_back(buf);
//FIND CHESSBOARDS AND CORNERS THEREIN:
for( int s = 1; s <= maxScale; s++ )
{
IplImage* timg = img;
if( s > 1 )
{
timg = cvCreateImage(cvSize(img->width*s,img->height*s),
img->depth, img->nChannels );
cvResize( img, timg, CV_INTER_CUBIC );
}
result = cvFindChessboardCorners( timg, cvSize(nx, ny),
&temp[0], &count,
CV_CALIB_CB_ADAPTIVE_THRESH |
CV_CALIB_CB_NORMALIZE_IMAGE);
if( timg != img )
cvReleaseImage( &timg );
if( result || s == maxScale )
for( j = 0; j < count; j++ )
{
temp[j].x /= s;
temp[j].y /= s;
}
if( result )
break;
}
if( displayCorners )
{
printf("%s\n", buf);
IplImage* cimg = cvCreateImage( imageSize, 8, 3 );
cvCvtColor( img, cimg, CV_GRAY2BGR );
cvDrawChessboardCorners( cimg, cvSize(nx, ny), &temp[0],
count, result );
IplImage* cimg1 = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3);
cvResize(cimg, cimg1);
cvShowImage( "corners", cimg1 );
cvReleaseImage( &cimg );
cvReleaseImage( &cimg1 );
int c = cvWaitKey(1000);
if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit
exit(-1);
}
else
putchar('.');
//N = pts.size();
//pts.resize(N + n, cvPoint2D32f(0,0));
//active[lr].push_back((uchar)result);
is_found[lr] = result > 0 ? 1 : 0;
//assert( result != 0 );
if( result )
{
//Calibration will suffer without subpixel interpolation
cvFindCornerSubPix( img, &temp[0], count,
cvSize(11, 11), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
30, 0.01) );
copy( temp.begin(), temp.end(), pts.begin() );
}
cvReleaseImage( &img );
if(lr)
{
if(is_found[0] == 1 && is_found[1] == 1)
{
assert(temp_points[0].size() == temp_points[1].size());
int current_size = points[0].size();
points[0].resize(current_size + temp_points[0].size(), cvPoint2D32f(0.0, 0.0));
points[1].resize(current_size + temp_points[1].size(), cvPoint2D32f(0.0, 0.0));
copy(temp_points[0].begin(), temp_points[0].end(), points[0].begin() + current_size);
copy(temp_points[1].begin(), temp_points[1].end(), points[1].begin() + current_size);
nframes++;
printf("Pair successfully detected...\n");
}
is_found[0] = 0;
is_found[1] = 0;
}
}
fclose(f);
printf("\n");
// HARVEST CHESSBOARD 3D OBJECT POINT LIST:
objectPoints.resize(nframes*n);
for( i = 0; i < ny; i++ )
for( j = 0; j < nx; j++ )
objectPoints[i*nx + j] =
cvPoint3D32f(i*squareSize, j*squareSize, 0);
for( i = 1; i < nframes; i++ )
copy( objectPoints.begin(), objectPoints.begin() + n,
objectPoints.begin() + i*n );
npoints.resize(nframes,n);
N = nframes*n;
CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0] );
CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0] );
cvSetIdentity(&_M1);
cvSetIdentity(&_M2);
cvZero(&_D1);
cvZero(&_D2);
// CALIBRATE THE STEREO CAMERAS
printf("Running stereo calibration ...");
fflush(stdout);
cvStereoCalibrate( &_objectPoints, &_imagePoints1,
&_imagePoints2, &_npoints,
&_M1, &_D1, &_M2, &_D2,
imageSize, &matR, &matT, &matE, &matF,
cvTermCriteria(CV_TERMCRIT_ITER+
CV_TERMCRIT_EPS, 100, 1e-5),
CV_CALIB_FIX_ASPECT_RATIO +
CV_CALIB_ZERO_TANGENT_DIST +
CV_CALIB_SAME_FOCAL_LENGTH +
CV_CALIB_FIX_K3);
printf(" done\n");
// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
vector<CvPoint3D32f> lines[2];
points[0].resize(N);
points[1].resize(N);
_imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
_imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
lines[0].resize(N);
lines[1].resize(N);
CvMat _L1 = cvMat(1, N, CV_32FC3, &lines[0][0]);
CvMat _L2 = cvMat(1, N, CV_32FC3, &lines[1][0]);
//Always work in undistorted space
cvUndistortPoints( &_imagePoints1, &_imagePoints1,
&_M1, &_D1, 0, &_M1 );
cvUndistortPoints( &_imagePoints2, &_imagePoints2,
&_M2, &_D2, 0, &_M2 );
cvComputeCorrespondEpilines( &_imagePoints1, 1, &matF, &_L1 );
cvComputeCorrespondEpilines( &_imagePoints2, 2, &matF, &_L2 );
double avgErr = 0;
for( i = 0; i < N; i++ )
{
double err = fabs(points[0][i].x*lines[1][i].x +
points[0][i].y*lines[1][i].y + lines[1][i].z)
+ fabs(points[1][i].x*lines[0][i].x +
points[1][i].y*lines[0][i].y + lines[0][i].z);
avgErr += err;
}
printf( "avg err = %g\n", avgErr/(nframes*n) );
// save intrinsic parameters
CvFileStorage* fstorage = cvOpenFileStorage("intrinsics.yml", NULL, CV_STORAGE_WRITE);
cvWrite(fstorage, "M1", &_M1);
cvWrite(fstorage, "D1", &_D1);
cvWrite(fstorage, "M2", &_M2);
cvWrite(fstorage, "D2", &_D2);
cvReleaseFileStorage(&fstorage);
//COMPUTE AND DISPLAY RECTIFICATION
if( showUndistorted )
{
CvMat* mx1 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* my1 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* mx2 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* my2 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* img1r = cvCreateMat( imageSize.height,
imageSize.width, CV_8U );
CvMat* img2r = cvCreateMat( imageSize.height,
imageSize.width, CV_8U );
CvMat* disp = cvCreateMat( imageSize.height,
imageSize.width, CV_16S );
double R1[3][3], R2[3][3], P1[3][4], P2[3][4];
CvMat _R1 = cvMat(3, 3, CV_64F, R1);
CvMat _R2 = cvMat(3, 3, CV_64F, R2);
// IF BY CALIBRATED (BOUGUET'S METHOD)
if( useUncalibrated == 0 )
{
CvMat _P1 = cvMat(3, 4, CV_64F, P1);
CvMat _P2 = cvMat(3, 4, CV_64F, P2);
cvStereoRectify( &_M1, &_M2, &_D1, &_D2, imageSize,
&matR, &matT,
&_R1, &_R2, &_P1, &_P2, &matQ,
CV_CALIB_ZERO_DISPARITY,
1, imageSize, &roi1, &roi2);
CvFileStorage* file = cvOpenFileStorage("extrinsics.yml", NULL, CV_STORAGE_WRITE);
cvWrite(file, "R", &matR);
cvWrite(file, "T", &matT);
cvWrite(file, "R1", &_R1);
cvWrite(file, "R2", &_R2);
cvWrite(file, "P1", &_P1);
cvWrite(file, "P2", &_P2);
cvWrite(file, "Q", &matQ);
cvReleaseFileStorage(&file);
isVerticalStereo = fabs(P2[1][3]) > fabs(P2[0][3]);
if(!isVerticalStereo)
roi2.x += imageSize.width;
else
roi2.y += imageSize.height;
//Precompute maps for cvRemap()
cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_P1,mx1,my1);
cvInitUndistortRectifyMap(&_M2,&_D2,&_R2,&_P2,mx2,my2);
}
//OR ELSE HARTLEY'S METHOD
else if( useUncalibrated == 1 || useUncalibrated == 2 )
// use intrinsic parameters of each camera, but
// compute the rectification transformation directly
// from the fundamental matrix
{
double H1[3][3], H2[3][3], iM[3][3];
CvMat _H1 = cvMat(3, 3, CV_64F, H1);
CvMat _H2 = cvMat(3, 3, CV_64F, H2);
CvMat _iM = cvMat(3, 3, CV_64F, iM);
//Just to show you could have independently used F
if( useUncalibrated == 2 )
cvFindFundamentalMat( &_imagePoints1,
&_imagePoints2, &matF);
cvStereoRectifyUncalibrated( &_imagePoints1,
&_imagePoints2, &matF,
imageSize,
&_H1, &_H2, 3);
cvInvert(&_M1, &_iM);
cvMatMul(&_H1, &_M1, &_R1);
cvMatMul(&_iM, &_R1, &_R1);
cvInvert(&_M2, &_iM);
cvMatMul(&_H2, &_M2, &_R2);
cvMatMul(&_iM, &_R2, &_R2);
//Precompute map for cvRemap()
cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_M1,mx1,my1);
cvInitUndistortRectifyMap(&_M2,&_D1,&_R2,&_M2,mx2,my2);
}
else
assert(0);
cvReleaseMat( &mx1 );
cvReleaseMat( &my1 );
cvReleaseMat( &mx2 );
cvReleaseMat( &my2 );
cvReleaseMat( &img1r );
cvReleaseMat( &img2r );
cvReleaseMat( &disp );
}
}
void error(int level,char * msg)
{
switch(level)
{
case 1:
///严重错误
cout<<msg<<endl;
cout<<"这是一个严重错误,程序将终止运行\n"
<<"按任意键退出"
<<endl;
cin.get();
break;
case 2:
//警告
cout<<"警告:"<<msg<<endl;
break;
case 3:
///信息
cout<<msg<<endl;
break;
default:
cout<<msg<<endl;
cout<<"这是一个级别未知的警告信息"<<endl;
break;
}
}
void main()
{
cout<<"这是摄像机双目标定程序"<<endl;
IplImage * leftimg,* rightimg,*image;
CvCapture *leftcap=NULL;
CvCapture *rightcap=NULL;
leftcap=cvCreateCameraCapture(1);
rightcap=cvCreateCameraCapture(0);
image=cvQueryFrame(leftcap);\
///左右两个照片空间的申请
leftimg=cvCloneImage(image);
rightimg=cvCloneImage(image);
if(leftcap==NULL||rightcap==NULL)
{
error(1,"有一个摄像头无法打开");
return;
}
cvNamedWindow("left",1);
cvNamedWindow("right",1);
int index=0;///当前是第几张
int total=10;///总的照片的数量
char* basename1="jpgleft";
char* basename2="jpgright";
char  filename[100];
FILE* file=fopen("filename.txt","wt");
char* buf="8 8\n";
fwrite(buf,1,strlen(buf),file);

while(1)
{
image=cvQueryFrame(leftcap);
cvCopy(image,leftimg);
image=cvQueryFrame(rightcap);
cvCopy(image,rightimg);
cvShowImage("left",leftimg);
cvShowImage("right",rightimg);
char ch=cvWaitKey(27);
if(ch=='e')
{
//正常退出
break;
}
else if(ch==' ')
{
//拍取照片,并处理之
cout<<"正在采集第"<<index+1<<"对图片,一共需要采集"<<total<<"对图片。"<<endl;
char *temp="\n";
sprintf_s(filename,100,"%s%d.jpg",basename1,index);

cvSaveImage(filename,leftimg);
fwrite(filename,1,strlen(filename),file);
fwrite(temp,1,strlen(temp),file);
sprintf_s(filename,100,"%s%d.jpg",basename2,index);

cvSaveImage(filename,rightimg);
fwrite(filename,1,strlen(filename),file);
fwrite(temp,1,strlen(temp),file);
index++;
}
if(index==total)
break;
}
fclose(file);
cout<<"开始双目标定"<<endl;
StereoCalib("filename.txt",0);
cout<<"标定已经完成,内参数保存在intrinsic.yml文件中,外参数保存在extrinsic.yml文件中!\n按任意键结束程序。"<<endl;
return;
}


立体匹配。

/*
*  stereo_match.cpp
*  calibration
*
*  Created by Victor  Eruhimov on 1/18/10.
*  Copyright 2010 Argus Corp. All rights reserved.
*
*/
#include <cv.h>
#include <highgui.h>
#include <stdio.h>
using namespace cv;
void saveXYZ(const char* filename, const Mat& mat)
{
const double max_z = 1.0e4;
FILE* fp = fopen(filename, "wt");
fprintf(fp, "\"X\"        \"Y\"        \"Z\"\n");
for(int y = 0; y < mat.rows; y++)
{
for(int x = 0; x < mat.cols; x++)
{
Vec3f point = mat.at<Vec3f>(y, x);
if(fabs(point[2] - max_z) < FLT_EPSILON || fabs(point[2]) > max_z) continue;
fprintf(fp, "%f\t%f\t%f\n", point[0]*1000, point[1]*1000, point[2]*1000);
}
}
fclose(fp);
}
void print_help()
{
printf("Usage: stereo_match <left_image> <right_image> [--algorithm=bm|sgbm|hh] [--blocksize=<block_size>]\n"
"[--max-disparity=<max_disparity>] [-i <intrinsic_filename>] [-e <extrinsic_filename>]\n"
"[--no-display] [-o <disparity_image>] [-p <point_cloud_file>]\n");
}
int main(int argc, char** argv)
{
const char* algorithm_opt = "--algorithm=";
const char* maxdisp_opt = "--max-disparity=";
const char* blocksize_opt = "--blocksize=";
const char* nodisplay_opt = "--no-display=";
//print_help();
if(argc < 3)
{
print_help();
return 0;
}
const char* img1_filename = 0;
const char* img2_filename = 0;
const char* intrinsic_filename = 0;
const char* extrinsic_filename = 0;
const char* disparity_filename = 0;
const char* point_cloud_filename = 0;
enum { STEREO_BM=0, STEREO_SGBM=1, STEREO_HH=2 };
int alg = STEREO_SGBM;
int SADWindowSize = 0, numberOfDisparities = 0;
bool no_display = false;
StereoBM bm;
StereoSGBM sgbm;
for( int i = 1; i < argc; i++ )
{
if( argv[i][0] != '-' )
{
if( !img1_filename )
img1_filename = argv[i];
else
img2_filename = argv[i];
}
else if( strncmp(argv[i], algorithm_opt, strlen(algorithm_opt)) == 0 )
{
char* _alg = argv[i] + strlen(algorithm_opt);
alg = strcmp(_alg, "bm") == 0 ? STEREO_BM :
strcmp(_alg, "sgbm") == 0 ? STEREO_SGBM :
strcmp(_alg, "hh") == 0 ? STEREO_HH : -1;
if( alg < 0 )
{
printf("Command-line parameter error: Unknown stereo algorithm\n\n");
print_help();
return -1;
}
}
else if( strncmp(argv[i], maxdisp_opt, strlen(maxdisp_opt)) == 0 )
{
if( sscanf( argv[i] + strlen(maxdisp_opt), "%d", &numberOfDisparities ) != 1 ||
numberOfDisparities < 1 || numberOfDisparities % 16 != 0 )
{
printf("Command-line parameter error: The max disparity (--maxdisparity=<...>) must be a positive integer divisible by 16\n");
print_help();
return -1;
}
}
else if( strncmp(argv[i], blocksize_opt, strlen(blocksize_opt)) == 0 )
{
if( sscanf( argv[i] + strlen(blocksize_opt), "%d", &SADWindowSize ) != 1 ||
SADWindowSize < 1 || SADWindowSize % 2 != 1 )
{
printf("Command-line parameter error: The block size (--blocksize=<...>) must be a positive odd number\n");
return -1;
}
}
else if( strcmp(argv[i], nodisplay_opt) == 0 )
no_display = true;
else if( strcmp(argv[i], "-i" ) == 0 )
intrinsic_filename = argv[++i];
else if( strcmp(argv[i], "-e" ) == 0 )
extrinsic_filename = argv[++i];
else if( strcmp(argv[i], "-o" ) == 0 )
disparity_filename = argv[++i];
else if( strcmp(argv[i], "-p" ) == 0 )
point_cloud_filename = argv[++i];
else
{
printf("Command-line parameter error: unknown option %s\n", argv[i]);
return -1;
}
}
if( !img1_filename || !img2_filename )
{
printf("Command-line parameter error: both left and right images must be specified\n");
return -1;
}
if( (intrinsic_filename != 0) ^ (extrinsic_filename != 0) )
{
printf("Command-line parameter error: either both intrinsic and extrinsic parameters must be specified, or none of them (when the stereo pair is already rectified)\n");
return -1;
}
if( extrinsic_filename == 0 && point_cloud_filename )
{
printf("Command-line parameter error: extrinsic and intrinsic parameters must be specified to compute the point cloud\n");
return -1;
}
int color_mode = alg == STEREO_BM ? 0 : -1;
Mat img1 = imread(img1_filename, color_mode);
Mat img2 = imread(img2_filename, color_mode);
Size img_size = img1.size();
Rect roi1, roi2;
Mat Q;
//        +                intrinsic_filename,        0x001f4e9e "intrinsics.yml"        const char *
if( intrinsic_filename )
{
// reading intrinsic parameters
FileStorage fs(intrinsic_filename,CV_STORAGE_READ);
if(!fs.isOpened())
{
printf("Failed to open file %s\n", intrinsic_filename);
return -1;
}
Mat M1, D1, M2, D2;
fs["M1"] >> M1;
fs["D1"] >> D1;
fs["M2"] >> M2;
fs["D2"] >> D2;
fs.open(extrinsic_filename, CV_STORAGE_READ);
if(!fs.isOpened())
{
printf("Failed to open file %s\n", extrinsic_filename);
return -1;
}
Mat R, T, R1, P1, R2, P2;
fs["R"] >> R;
fs["T"] >> T;
stereoRectify( M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q,1024, -1.0, img_size, &roi1, &roi2 );
Mat map11, map12, map21, map22;
initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);
Mat img1r, img2r;
remap(img1, img1r, map11, map12, INTER_LINEAR);
remap(img2, img2r, map21, map22, INTER_LINEAR);
img1 = img1r;
img2 = img2r;
}
numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : img_size.width/8;
bm.state->roi1 = roi1;
bm.state->roi2 = roi2;
bm.state->preFilterCap = 31;
bm.state->SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 9;
bm.state->minDisparity = 0;
bm.state->numberOfDisparities = numberOfDisparities;
bm.state->textureThreshold = 10;
bm.state->uniquenessRatio = 15;
bm.state->speckleWindowSize = 100;
bm.state->speckleRange = 32;
bm.state->disp12MaxDiff = 1;
sgbm.preFilterCap = 63;
sgbm.SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 3;
int cn = img1.channels();
sgbm.P1 = 8*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
sgbm.P2 = 32*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
sgbm.minDisparity = 0;
sgbm.numberOfDisparities = numberOfDisparities;
sgbm.uniquenessRatio = 10;
sgbm.speckleWindowSize = bm.state->speckleWindowSize;
sgbm.speckleRange = bm.state->speckleRange;
sgbm.disp12MaxDiff = 1;
sgbm.fullDP = alg == STEREO_HH;
Mat disp, disp8;
//Mat img1p, img2p, dispp;
//copyMakeBorder(img1, img1p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
//copyMakeBorder(img2, img2p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
;
int64 t = getTickCount();
if( alg == STEREO_BM)
bm(img1, img2, disp);
else
sgbm(img1, img2, disp);
t = getTickCount() - t;
printf("Time elapsed: %fms\n", t*1000/getTickFrequency());
//disp = dispp.colRange(numberOfDisparities, img1p.cols);
disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.));
if( !no_display )
{
namedWindow("left", 1);
imshow("left", img1);
namedWindow("right", 1);
imshow("right", img2);
namedWindow("disparity", 0);
imshow("disparity", disp8);
printf("press any key to continue...");
fflush(stdout);
waitKey();
printf("\n");
}
if(disparity_filename)
imwrite(disparity_filename, disp8);
if(point_cloud_filename)
{
printf("storing the point cloud...");
fflush(stdout);
Mat xyz;
reprojectImageTo3D(disp, xyz, Q, true);
saveXYZ(point_cloud_filename, xyz);
printf("\n");
}
return 0;
}
////注意此处要连接 ---d.lib,不然filestorage打不开文件。不知道哪儿的问题
如果要输出视差图和点云,需要在调试时设置Property->Configuration Properties->Debugging->Command Arguments

我是这样设置的jpgleft0.jpg jpgright0.jpg --algorithm=bm --blocksize=7 --max-disparity=96 -i intrinsics.yml -e extrinsics.yml  -o depth.jpg -p point.xyz

立体标定和立体匹配事实上是学习opencv这本书上的一段代码。在例子中被强行拆为了两个部分。

这一段时间除了折腾这些代码,还好好的学习了一下立体视觉的相关理论知识,如果有时间,再整理一篇发出来。个人感觉要用立体视觉来做测量,就软件而言最影响精度的部分,其实是匹配。

建了一个QQ群,欢迎计算机视觉这一个行业的从业者加入。无论你是在学校还是已经工作,无论你是新手还是大牛,无论你是做技术还是已经转型,我都真诚欢迎你的加入,我们都交流,才能共同进步。

群号:130541036(此群已满 请加194369242)

opencv_contrib247d.lib
opencv_core247d.lib
opencv_features2d247d.lib
opencv_flann247d.lib
opencv_gpu247d.lib
opencv_highgui247d.lib
opencv_imgproc247d.lib
opencv_legacy247d.lib
opencv_ml247d.lib
opencv_objdetect247d.lib
opencv_ts247d.lib
opencv_video247d.lib
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: