双目立体视觉对比试验
2018-01-12 09:00
204 查看
%%
% 双目立体视觉
% 对比实验
%%
% 清空工作区
clc;
clear;
close all;
%%
% 导入图像数据
I1 = imread('pattern_cam1_im1.png');
I2 = imread('pattern_cam2_im1.png');
figure
imshowpair(I1, I2, 'montage');
title('Original Images');
% 导入相机参数
load cameraParams.mat
%%
% 校正
I1 = undistortImage(I1, camera1Params);
I2 = undistortImage(I2, camera1Params);
figure
imshowpair(I1, I2, 'montage');
title('Undistorted Images');
%%
% 特征点提取
imagePoints1 = detectMinEigenFeatures(rgb2gray(I1), 'MinQuality', 0.1);
%%
% 可视化
figure
imshow(I1, 'InitialMagnification', 50);
title('150 Strongest Corners from the First Image');
hold on
plot(selectStrongest(imagePoints1, 150));
%%
% Create the point tracker
tracker = vision.PointTracker('MaxBidirectionalError', 1, 'NumPyramidLevels', 5);
imagePoints1 = imagePoints1.Location;
initialize(tracker, imagePoints1, I1);
% Track the points
[imagePoints2, validIdx] = step(tracker, I2);
matchedPoints1 = imagePoints1(validIdx, :);
matchedPoints2 = imagePoints2(validIdx, :);
%%
% 特征点匹配
figure
showMatchedFeatures(I1, I2, matchedPoints1, matchedPoints2);
title('Tracked Features');
%%
% F矩阵估计
[fMatrix, epipolarInliers] = estimateFundamentalMatrix(...
matchedPoints1, matchedPoints2, 'Method', 'MSAC', 'NumTrials', 10000);
% 极线
inlierPoints1 = matchedPoints1(epipolarInliers, :);
inlierPoints2 = matchedPoints2(epipolarInliers, :);
% 显示内点
figure
showMatchedFeatures(I1, I2, inlierPoints1, inlierPoints2);
title('Epipolar Inliers');
%%
% R和T(也可以用RANSAC算法)
R = [0.9455,-0.0096,0.3253;
0.0120,0.9999,-0.0053;
-0.3252,0.0090,0.9456];
t = [98.4069,0.1741,18.9018];
%%
% 稠密的特征点
imagePoints1 = detectMinEigenFeatures(rgb2gray(I1), 'MinQuality', 0.001);
%%
% Create the point tracker 创建一个跟踪点
tracker = vision.PointTracker('MaxBidirectionalError', 1, 'NumPyramidLevels', 5);
% Initialize the point tracker
imagePoints1 = imagePoints1.Location;
initialize(tracker, imagePoints1, I1);
% Track the points
[imagePoints2, validIdx] = step(tracker, I2);
matchedPoints1 = imagePoints1(validIdx, :);
matchedPoints2 = imagePoints2(validIdx, :);
%%
% cameraMatrix
camMatrix1 = cameraMatrix(camera1Params, eye(3), [0 0 0]);
camMatrix2 = cameraMatrix(camera2Params, R', -t*R');
% 三维点云的计算
points3D = triangulate(matchedPoints1, matchedPoints2, camMatrix1, camMatrix2);
% 获取颜色信息
numPixels = size(I1, 1) * size(I1, 2);
allColors = reshape(I1, [numPixels, 3]);
colorIdx = sub2ind([size(I1, 1), size(I1, 2)], round(matchedPoints1(:,2)), ...
round(matchedPoints1(:, 1)));
color = allColors(colorIdx, :);
% 创建点云
ptCloud = pointCloud(points3D, 'Color', color);
%%
% 可视化
cameraSize = 0.3;
figure
plotCamera('Size', cameraSize, 'Color', 'r', 'Label', '1', 'Opacity', 0);
hold on
grid on
plotCamera('Location', t, 'Orientation', R, 'Size', cameraSize, ...
'Color', 'b', 'Label', '2', 'Opacity', 0);
% 点云的可视化
pcshow(ptCloud, 'VerticalAxis', 'y', 'VerticalAxisDir', 'down', ...
'MarkerSize', 45);
% Rotate and zoom the plot
camorbit(0, -30);
camzoom(1.5);
% Label the axes
xlabel('x-axis');
ylabel('y-axis');
zlabel('z-axis')
title('Up to Scale Reconstruction of the Scene');
% 双目立体视觉
% 对比实验
%%
% 清空工作区
clc;
clear;
close all;
%%
% 导入图像数据
I1 = imread('pattern_cam1_im1.png');
I2 = imread('pattern_cam2_im1.png');
figure
imshowpair(I1, I2, 'montage');
title('Original Images');
% 导入相机参数
load cameraParams.mat
%%
% 校正
I1 = undistortImage(I1, camera1Params);
I2 = undistortImage(I2, camera1Params);
figure
imshowpair(I1, I2, 'montage');
title('Undistorted Images');
%%
% 特征点提取
imagePoints1 = detectMinEigenFeatures(rgb2gray(I1), 'MinQuality', 0.1);
%%
% 可视化
figure
imshow(I1, 'InitialMagnification', 50);
title('150 Strongest Corners from the First Image');
hold on
plot(selectStrongest(imagePoints1, 150));
%%
% Create the point tracker
tracker = vision.PointTracker('MaxBidirectionalError', 1, 'NumPyramidLevels', 5);
imagePoints1 = imagePoints1.Location;
initialize(tracker, imagePoints1, I1);
% Track the points
[imagePoints2, validIdx] = step(tracker, I2);
matchedPoints1 = imagePoints1(validIdx, :);
matchedPoints2 = imagePoints2(validIdx, :);
%%
% 特征点匹配
figure
showMatchedFeatures(I1, I2, matchedPoints1, matchedPoints2);
title('Tracked Features');
%%
% F矩阵估计
[fMatrix, epipolarInliers] = estimateFundamentalMatrix(...
matchedPoints1, matchedPoints2, 'Method', 'MSAC', 'NumTrials', 10000);
% 极线
inlierPoints1 = matchedPoints1(epipolarInliers, :);
inlierPoints2 = matchedPoints2(epipolarInliers, :);
% 显示内点
figure
showMatchedFeatures(I1, I2, inlierPoints1, inlierPoints2);
title('Epipolar Inliers');
%%
% R和T(也可以用RANSAC算法)
R = [0.9455,-0.0096,0.3253;
0.0120,0.9999,-0.0053;
-0.3252,0.0090,0.9456];
t = [98.4069,0.1741,18.9018];
%%
% 稠密的特征点
imagePoints1 = detectMinEigenFeatures(rgb2gray(I1), 'MinQuality', 0.001);
%%
% Create the point tracker 创建一个跟踪点
tracker = vision.PointTracker('MaxBidirectionalError', 1, 'NumPyramidLevels', 5);
% Initialize the point tracker
imagePoints1 = imagePoints1.Location;
initialize(tracker, imagePoints1, I1);
% Track the points
[imagePoints2, validIdx] = step(tracker, I2);
matchedPoints1 = imagePoints1(validIdx, :);
matchedPoints2 = imagePoints2(validIdx, :);
%%
% cameraMatrix
camMatrix1 = cameraMatrix(camera1Params, eye(3), [0 0 0]);
camMatrix2 = cameraMatrix(camera2Params, R', -t*R');
% 三维点云的计算
points3D = triangulate(matchedPoints1, matchedPoints2, camMatrix1, camMatrix2);
% 获取颜色信息
numPixels = size(I1, 1) * size(I1, 2);
allColors = reshape(I1, [numPixels, 3]);
colorIdx = sub2ind([size(I1, 1), size(I1, 2)], round(matchedPoints1(:,2)), ...
round(matchedPoints1(:, 1)));
color = allColors(colorIdx, :);
% 创建点云
ptCloud = pointCloud(points3D, 'Color', color);
%%
% 可视化
cameraSize = 0.3;
figure
plotCamera('Size', cameraSize, 'Color', 'r', 'Label', '1', 'Opacity', 0);
hold on
grid on
plotCamera('Location', t, 'Orientation', R, 'Size', cameraSize, ...
'Color', 'b', 'Label', '2', 'Opacity', 0);
% 点云的可视化
pcshow(ptCloud, 'VerticalAxis', 'y', 'VerticalAxisDir', 'down', ...
'MarkerSize', 45);
% Rotate and zoom the plot
camorbit(0, -30);
camzoom(1.5);
% Label the axes
xlabel('x-axis');
ylabel('y-axis');
zlabel('z-axis')
title('Up to Scale Reconstruction of the Scene');
相关文章推荐
- 双目立体视觉系统精度分析
- 双目立体视觉SAD匹配算法
- 【视觉-三维视觉技术-理论篇】三维视觉技术----双目立体视觉,结构光等-战略总结0
- 双目立体视觉的数学原理
- 第六十三篇:S32V234汽车电子应用功能挖掘的相关文章:双目立体、雷达视觉融合、ADAS集成
- S32V234汽车电子应用功能挖掘的相关文章:双目立体、雷达视觉融合、ADAS集成
- 【双目视觉探索路3】分析整理Learning OpenCV3书中立体标定、校正以及对应代码(1)之总体
- 双目视觉学习总结(3)——立体匹配
- 【立体视觉】双目立体标定与立体校正
- 利用华硕e6308_P8P67_PRO主板开发双目立体视觉问题小结
- [立体视觉学习笔记]--(1)常见双目相机
- (七)立体标定与立体校正 【计算机视觉学习笔记--双目视觉几何框架系列】
- 双目立体视觉技术涉及到的基本概念
- 立体视觉–stereo correspondence(双目立体匹配)
- 立体视觉--stereo correspondence(双目立体匹配)
- 双目立体视觉
- 【视觉-立体视觉】双目测距与三维重建的OpenCV实现问题集锦
- 双目立体视觉及正交偏振 3D 显示
- 三维重建学习(5):简单地从数学原理层面理解双目立体视觉
- 双目视觉——立体匹配基本理论