《视觉SLAM十四讲精品总结》6.2:VO—— 2D-2D三角化求(XYZ)

一、原理

上一步通过对极几何约束估计了相机运动得到R、t;

接下来,需要用相机的运动估计特征点的空间位置。

三角测量:通过在两处观察同一个点的夹角,从而确定该点的距离。

通过最小二乘法求解。

s1*x1=s2*R*x2+t;

已知R和t,分别求s1和s2;

s1*x1^*x1=0=s2*x1^*R*x2+x1^*t;

二、代码详解

1、主框架

int main(int argc, char** argv)
{
	Mat img_1 = imread("1.png");
	Mat img_2 = imread("2.png");

	vector<KeyPoint> keypoints_1, keypoints_2;
	vector<DMatch> matches;
	//1. 求特征点(keypoints)和描述子之间匹配(matches)
	find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
	cout << "一共找到了" << matches.size() << "组匹配点" << endl;

	//2. 估计两张图像间运动R t
	Mat R, t;
	pose_estimation_2d2d(keypoints_1, keypoints_2, matches, R, t);

	//3. 三角化求世界坐标(points(XYZ))
	vector<Point3d> points;
	triangulation(keypoints_1, keypoints_2, matches, R, t, points);

	//4. 验证三角化点与特征点的重投影关系
	Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
	for (int i = 0; i<matches.size(); i++)
	{
		Point2d pt1_cam = pixel2cam(keypoints_1[matches[i].queryIdx].pt, K);//相机坐标系x,y
		Point2d pt1_cam_3d(
			points[i].x / points[i].z,
			points[i].y / points[i].z
			);

		cout << "point in the first camera frame: " << pt1_cam << endl;
		cout << "point projected from 3D " << pt1_cam_3d << ", d=" << points[i].z << endl;

		// 第二个图
		Point2f pt2_cam = pixel2cam(keypoints_2[matches[i].trainIdx].pt, K);
		Mat pt2_trans = R*(Mat_<double>(3, 1) << points[i].x, points[i].y, points[i].z) + t;//R*x1+t
		pt2_trans /= pt2_trans.at<double>(2, 0);//归一化???
		cout << "point in the second camera frame: " << pt2_cam << endl;
		cout << "point reprojected from second frame: " << pt2_trans.t() << endl;//转置  .t()
		cout << endl;
	}

	return 0;
}
  •  三角化点points(XYZ)世界坐标系;投影到平面上x/z;y/z;
  • 特征点从像素坐标系p转为相机坐标系x   Point2d pt1_cam = pixel2cam (keypoints_1[matches[i].queryIdx].pt, K);
  • 第二张图,通过第一张三角化的点平面投影得到第二张的投影x2=R*x1+t; 
  • 归一化
    pt2_trans /= pt2_trans.at<double>(2, 0);

2、三角化triangulation函数

调用

triangulation(keypoints_1, keypoints_2, matches, R, t, points);
void triangulation(
	const vector< KeyPoint >& keypoint_1,
	const vector< KeyPoint >& keypoint_2,
	const std::vector< DMatch >& matches,
	const Mat& R, const Mat& t,
	vector< Point3d >& points)
{
	Mat T1 = (Mat_<float>(3, 4) <<
		1, 0, 0, 0,
		0, 1, 0, 0,
		0, 0, 1, 0);
	Mat T2 = (Mat_<float>(3, 4) <<
		R.at<double>(0, 0), R.at<double>(0, 1), R.at<double>(0, 2), t.at<double>(0, 0),
		R.at<double>(1, 0), R.at<double>(1, 1), R.at<double>(1, 2), t.at<double>(1, 0),
		R.at<double>(2, 0), R.at<double>(2, 1), R.at<double>(2, 2), t.at<double>(2, 0)
		);

	Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
	vector<Point2f> pts_1, pts_2;
	//for的C++11新特性
	for (DMatch m : matches)
	{
		// 将像素坐标转换至相机坐标
		pts_1.push_back(pixel2cam(keypoint_1[m.queryIdx].pt, K));
		pts_2.push_back(pixel2cam(keypoint_2[m.trainIdx].pt, K));
	}

	Mat pts_4d;
	cv::triangulatePoints(T1, T2, pts_1, pts_2, pts_4d);

	// 转换成非齐次坐标
	for (int i = 0; i<pts_4d.cols; i++)
	{
		Mat x = pts_4d.col(i);
		x /= x.at<float>(3, 0); // 归一化
		Point3d p(
			x.at<float>(0, 0),
			x.at<float>(1, 0),
			x.at<float>(2, 0)
			);
		points.push_back(p);
	}
}
  • for在C++中新特性
//新特性
   for (DMatch m : matches)
	{
		// 将像素坐标转换至相机坐标
		pts_1.push_back(pixel2cam(keypoint_1[m.queryIdx].pt, K));
		pts_2.push_back(pixel2cam(keypoint_2[m.trainIdx].pt, K));
	}
//习惯用法

	for (int i = 0; i < (int)matches.size(); i++)
	{
		points1.push_back(keypoints_1[matches[i].queryIdx].pt);//queryIdx第一个图像索引
		points2.push_back(keypoints_2[matches[i].trainIdx].pt);//trainIdx第二个图像索引
	}
  • triangulatePoints函数用法
void cv::triangulatePoints(projMatr1,projMatr2,projPoints1,projPoints2,OutputArray points4D )	
T1 3x4 projection matrix of the first camera.
T2 3x4 projection matrix of the second camera.
pts1 2xN array of feature points in the first image.相机坐标系下坐标
pts2 2xN array of corresponding points in the second image.
pts_4d 4xN array of reconstructed points in homogeneous coordinates.重构点

三、总结

纯旋转无法使用三角测量,由于对极约束永远满足,需要有平移。

平移较大时,三角化测量将更精确。

猜你喜欢

转载自blog.csdn.net/try_again_later/article/details/81704154
今日推荐