利用标定板做视频流的视觉定位
2015-11-11 16:48
288 查看
事先测出摄像机的内参数
#include<cv.h>
#include<highgui.h>
#include<stdio.h>
#include<stdlib.h>
#include<iostream>
#include"calculator.h"
using namespace cv;
using namespace std;
//extern void PrintMat(CvMat *A);
//extern int calcRtfromHomo(CvMat *H);
//extern void Matrix3together(CvMat *r1, CvMat *r2, CvMat *r3,CvMat *Tran);
extern int calibration();
int main()
{ /*载入内参*/
CvMat *intrinsics = (CvMat*)cvLoad("Intrinsics.xml");
CvMat *distortion = (CvMat*)cvLoad("Distortion.xml");
PrintMat(intrinsics);
PrintMat(distortion);
//calibration();
//const int n_featrues = 30;
const int featrues_dt=20; //提供移动时间间隔
const int featrues_w=8;
const int featrues_h=6;
int featrues_n = featrues_w*featrues_h; //每幅图特征点总数 8*6
printf("%d\n",featrues_n);
CvSize featrues_sz = cvSize(featrues_w,featrues_h);
CvCapture *capturenew = cvCreateCameraCapture(0); //获取摄像头指针
assert(capturenew);
cvNamedWindow("thefirst",CV_WINDOW_AUTOSIZE);
CvMat* Timage_points = cvCreateMat(featrues_n, 2, CV_32FC1);
CvMat* Tobject_points = cvCreateMat(featrues_n, 3, CV_32FC1);
CvMat* homography = cvCreateMat(3,3,CV_32FC1);
CvMat* rotation_vector = cvCreateMat(3, 1, CV_32FC1);
CvMat* translation_vector = cvCreateMat(3, 1, CV_32FC1);
CvMat* translation_vector1 = cvCreateMat(3, 1, CV_32FC1);
CvPoint2D32f *cornersnew = new CvPoint2D32f[featrues_n];
int corner_cont = featrues_n;
CvPoint2D32f *corners2 = new CvPoint2D32f[featrues_n];
int corner_cnt=0;
int suc = 0;
int frames = 0;
IplImage *image1 = cvQueryFrame(capturenew);
IplImage *image2 =NULL;
IplImage *gray_image1 = cvCreateImage(cvSize(640,480),IPL_DEPTH_8U,1);
IplImage *eigimage1 = cvCreateImage(cvSize(640,480),IPL_DEPTH_32F,1);
IplImage *temp_img = cvCreateImage(cvSize(640, 480), IPL_DEPTH_32F, 1);
IplImage* mapx = cvCreateImage(cvSize(640,480),IPL_DEPTH_32F,1);
IplImage* mapy = cvCreateImage(cvSize(640,480),IPL_DEPTH_32F,1);
cvInitUndistortMap(
intrinsics, //畸变矫正
distortion,
mapx,
mapy
);
cvNamedWindow("Undistort1");
//cvNamedWindow("Undistort2");
int frameNum = 0;
while(capturenew)
{
image1 =cvQueryFrame(capturenew);
cvShowImage("thefirst", image1);
IplImage *t = cvCloneImage(image1);
cvRemap(t,image1,mapx,mapy);
cvReleaseImage(&t);
picturebetter(image1);
/*frames++;
if (frames == 1)
{
image2 = cvCreateImage(cvGetSize(image1), 8, 3);
cvCopyImage(image1, image2);
}
else
{
cvCopyImage(image1, image2);
}*/
cvShowImage("Undistort1", image1);
//cvShowImage("Undistort2", image2);
if (frames++ % featrues_dt == 0) //每间隔20帧执行操作,给移动留出时间
{
int found1 = cvFindChessboardCorners(
image1, featrues_sz, cornersnew, &corner_cnt, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
cvCvtColor(image1, gray_image1, CV_BGR2GRAY);
cvFindCornerSubPix(gray_image1, cornersnew, corner_cnt, cvSize(11, 11), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
//画出交点
cvDrawChessboardCorners(image1, featrues_sz, cornersnew, corner_cnt, found1);
if (corner_cnt == featrues_n)
{
for (int j = 0; j < featrues_n; j++)
{
CV_MAT_ELEM(*Tobject_points, float, j, 0) = j / featrues_w;
CV_MAT_ELEM(*Tobject_points, float, j, 1) = j%featrues_w;
CV_MAT_ELEM(*Tobject_points, float, j, 2) = 0.0f;
CV_MAT_ELEM(*Timage_points, float, j, 0) = cornersnew[j].x;
CV_MAT_ELEM(*Timage_points, float, j, 1) = cornersnew[j].y;
}
cvFindExtrinsicCameraParams2(
Tobject_points,
Timage_points,
intrinsics,
distortion,
rotation_vector,
translation_vector
);
PrintMat(rotation_vector);
/*由实际角点间的距离倍乘(32mm)*/
for (int i = 0; i < 3; i++)
{
CV_MAT_ELEM(*translation_vector1, float, i, 0) = CV_MAT_ELEM(*translation_vector, float, i, 0) * 32;
}
PrintMat(translation_vector1);
cvFindHomography(Timage_points, Tobject_points, homography, 0);
cvSave("homography.xml", homography); //保存所得的H矩阵
calcRtfromHomo(homography);
}
}
int c=cvWaitKey(50);
if(c == 'p')
{
c = 0;
while(c!='p'&&c!=27)
{
c =cvWaitKey(250);
}
}
if(c==27)
break;
//image1 = cvQueryFrame(capturenew);
}
cvReleaseMat(&Timage_points);
cvReleaseMat(&Tobject_points);
cvReleaseMat(&homography);
while (capturenew){
image1 = cvQueryFrame(capturenew);
cvCvtColor(image1, gray_image1, CV_BGR2GRAY);
cvGoodFeaturesToTrack(
gray_image1,
eigimage1,
temp_img,
corners2,
&corner_cont,
0.01,
3, NULL,
3,
0,
0.4
);
}
return 0;
}
#include<cv.h>
#include<highgui.h>
#include<stdio.h>
#include<stdlib.h>
#include<iostream>
#include"calculator.h"
using namespace cv;
using namespace std;
//extern void PrintMat(CvMat *A);
//extern int calcRtfromHomo(CvMat *H);
//extern void Matrix3together(CvMat *r1, CvMat *r2, CvMat *r3,CvMat *Tran);
extern int calibration();
int main()
{ /*载入内参*/
CvMat *intrinsics = (CvMat*)cvLoad("Intrinsics.xml");
CvMat *distortion = (CvMat*)cvLoad("Distortion.xml");
PrintMat(intrinsics);
PrintMat(distortion);
//calibration();
//const int n_featrues = 30;
const int featrues_dt=20; //提供移动时间间隔
const int featrues_w=8;
const int featrues_h=6;
int featrues_n = featrues_w*featrues_h; //每幅图特征点总数 8*6
printf("%d\n",featrues_n);
CvSize featrues_sz = cvSize(featrues_w,featrues_h);
CvCapture *capturenew = cvCreateCameraCapture(0); //获取摄像头指针
assert(capturenew);
cvNamedWindow("thefirst",CV_WINDOW_AUTOSIZE);
CvMat* Timage_points = cvCreateMat(featrues_n, 2, CV_32FC1);
CvMat* Tobject_points = cvCreateMat(featrues_n, 3, CV_32FC1);
CvMat* homography = cvCreateMat(3,3,CV_32FC1);
CvMat* rotation_vector = cvCreateMat(3, 1, CV_32FC1);
CvMat* translation_vector = cvCreateMat(3, 1, CV_32FC1);
CvMat* translation_vector1 = cvCreateMat(3, 1, CV_32FC1);
CvPoint2D32f *cornersnew = new CvPoint2D32f[featrues_n];
int corner_cont = featrues_n;
CvPoint2D32f *corners2 = new CvPoint2D32f[featrues_n];
int corner_cnt=0;
int suc = 0;
int frames = 0;
IplImage *image1 = cvQueryFrame(capturenew);
IplImage *image2 =NULL;
IplImage *gray_image1 = cvCreateImage(cvSize(640,480),IPL_DEPTH_8U,1);
IplImage *eigimage1 = cvCreateImage(cvSize(640,480),IPL_DEPTH_32F,1);
IplImage *temp_img = cvCreateImage(cvSize(640, 480), IPL_DEPTH_32F, 1);
IplImage* mapx = cvCreateImage(cvSize(640,480),IPL_DEPTH_32F,1);
IplImage* mapy = cvCreateImage(cvSize(640,480),IPL_DEPTH_32F,1);
cvInitUndistortMap(
intrinsics, //畸变矫正
distortion,
mapx,
mapy
);
cvNamedWindow("Undistort1");
//cvNamedWindow("Undistort2");
int frameNum = 0;
while(capturenew)
{
image1 =cvQueryFrame(capturenew);
cvShowImage("thefirst", image1);
IplImage *t = cvCloneImage(image1);
cvRemap(t,image1,mapx,mapy);
cvReleaseImage(&t);
picturebetter(image1);
/*frames++;
if (frames == 1)
{
image2 = cvCreateImage(cvGetSize(image1), 8, 3);
cvCopyImage(image1, image2);
}
else
{
cvCopyImage(image1, image2);
}*/
cvShowImage("Undistort1", image1);
//cvShowImage("Undistort2", image2);
if (frames++ % featrues_dt == 0) //每间隔20帧执行操作,给移动留出时间
{
int found1 = cvFindChessboardCorners(
image1, featrues_sz, cornersnew, &corner_cnt, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
cvCvtColor(image1, gray_image1, CV_BGR2GRAY);
cvFindCornerSubPix(gray_image1, cornersnew, corner_cnt, cvSize(11, 11), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
//画出交点
cvDrawChessboardCorners(image1, featrues_sz, cornersnew, corner_cnt, found1);
if (corner_cnt == featrues_n)
{
for (int j = 0; j < featrues_n; j++)
{
CV_MAT_ELEM(*Tobject_points, float, j, 0) = j / featrues_w;
CV_MAT_ELEM(*Tobject_points, float, j, 1) = j%featrues_w;
CV_MAT_ELEM(*Tobject_points, float, j, 2) = 0.0f;
CV_MAT_ELEM(*Timage_points, float, j, 0) = cornersnew[j].x;
CV_MAT_ELEM(*Timage_points, float, j, 1) = cornersnew[j].y;
}
cvFindExtrinsicCameraParams2(
Tobject_points,
Timage_points,
intrinsics,
distortion,
rotation_vector,
translation_vector
);
PrintMat(rotation_vector);
/*由实际角点间的距离倍乘(32mm)*/
for (int i = 0; i < 3; i++)
{
CV_MAT_ELEM(*translation_vector1, float, i, 0) = CV_MAT_ELEM(*translation_vector, float, i, 0) * 32;
}
PrintMat(translation_vector1);
cvFindHomography(Timage_points, Tobject_points, homography, 0);
cvSave("homography.xml", homography); //保存所得的H矩阵
calcRtfromHomo(homography);
}
}
int c=cvWaitKey(50);
if(c == 'p')
{
c = 0;
while(c!='p'&&c!=27)
{
c =cvWaitKey(250);
}
}
if(c==27)
break;
//image1 = cvQueryFrame(capturenew);
}
cvReleaseMat(&Timage_points);
cvReleaseMat(&Tobject_points);
cvReleaseMat(&homography);
while (capturenew){
image1 = cvQueryFrame(capturenew);
cvCvtColor(image1, gray_image1, CV_BGR2GRAY);
cvGoodFeaturesToTrack(
gray_image1,
eigimage1,
temp_img,
corners2,
&corner_cont,
0.01,
3, NULL,
3,
0,
0.4
);
}
return 0;
}
相关文章推荐
- 小记
- 寻找无向连通图的割点
- touch详解
- java日志组件介绍(common-logging,log4j,slf4j,logback )
- 破了我的设计模式的处——痛苦并快乐着
- [Android] Otto源码简析
- 网络访问层
- Centos 6.3下升级subversion版本到1.7.x
- decorator模式
- iOS常用第三方库
- php的基本语法与字符串与增删改查
- JS设置cookie,读取cookie,删除cookie
- C++编译错误:multiple types in one declaration
- Perfect Squares
- 006 - ZigZag Conversion
- 第五章 处理器拦截器详解——跟着开涛学SpringMVC
- android监听应用安装,如判断是新安装的应用,还是覆盖安装的应用
- JAVA字符串格式化-String.format()的使用
- BAT解密:互联网技术发展之路(7)- 网络层技术剖析
- BAT解密:互联网技术发展之路(6)- 服务层技术剖析