OpenCV_估算图像之间的投影关系

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/qq_30241709/article/details/78830831

通用的相机模型(薄镜公式):


有一种简化模型成为针孔照相机模型,根据相似三角形定理,基本投影方程为:


*相机校准

相机校准就是设置相机各种参数的过程,就是用相机拍摄特定的图案并分析得到的图像,然后再优化的过程中确定最佳的参数值。

OpenCV推荐使用国际象棋棋盘的图案生成用于校准的三维场景的集合,并且由于图案视平面的,因此我们可以假设棋盘位于Z=0且X和Y的坐标轴与网格对齐的位置。

原图:


使用cv::findChessboardCorners()和cv::drawChessboardCorners检测和绘制角点:


连接角点的萧条的次序,就是角点在向量中存储的次序。

相机校准原理:


代码:

CameraCalibrator类:

class CameraCalibrator
{
private:
	//世界坐标系中的点
	std::vector<std::vector<cv::Point3f>> objectPoints;
	std::vector<std::vector<cv::Point2f>> imagePoints;
	//输出矩阵
	cv::Mat cameraMatrix;
	cv::Mat distCoeffs;
	//指定校准方式的标志
	int flag;
	cv::Mat map1, map2;
	bool mustInitUndistort;
public:
	int addChessboardPoints(const std::vector<std::string>& filelist, cv::Size& boardSize);
	void addPoints(std::vector<cv::Point2f>& imageCorners, std::vector<cv::Point3f>& objectCorners);
	double calibrate(cv::Size& imageSize);
	cv::Mat remap(const cv::Mat& image)
	{
		cv::Mat undistorted;
		if (mustInitUndistort)
		{
			cv::initUndistortRectifyMap(
				cameraMatrix,//计算得到的相机矩阵
				distCoeffs,//计算得到的畸变矩阵
				cv::Mat(),//可选矫正项
				cv::Mat(),//生成无畸变的相机矩阵
				image.size(),//无畸变图像尺寸
				CV_32FC1,//输出图片类型
				map1, map2);//x和y映射功能
			mustInitUndistort = false;
		}
		cv::remap(image, undistorted, map1, map2, cv::INTER_LINEAR);
	}

};
int CameraCalibrator::addChessboardPoints(const std::vector<std::string>& filelist, cv::Size& boardSize)
{
	//棋盘上的角点
	std::vector<cv::Point2f> imageCorners;
	std::vector<cv::Point3f> objectCorners;
	//场景中的三维点
	for (int i = 0; i < boardSize.height; i++)
	{
		for (int j = 0; j < boardSize.width; j++)
		{
			objectCorners.push_back(cv::Point3f(i, j, 0.0f));
		}
	}
	cv::Mat image;
	int success = 0;
	for (int i = 0; i < filelist.size(); i++)
	{
		image = cv::imread(filelist[i], 0);
		bool found = cv::findChessboardCorners(image, boardSize, imageCorners);
		cv::cornerSubPix(image, imageCorners,
			cv::Size(5, 5),
			cv::Size(-1, -1),
			cv::TermCriteria(cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS, 30, 0.01));
		//如果棋盘是完好的,就把它加入结果
		if (imageCorners.size() == boardSize.area())
		{
			addPoints(imageCorners, objectCorners);
			success++;
		}
	}
	return success;
}
void CameraCalibrator::addPoints(std::vector<cv::Point2f>& imageCorners, std::vector<cv::Point3f>& objectCorners)
{
	imagePoints.push_back(imageCorners);
	objectPoints.push_back(objectCorners);
}
double CameraCalibrator::calibrate(cv::Size& imageSize)
{
	//输出旋转量和平移量
	std::vector<cv::Mat> rvecs, tvecs;
	return cv::calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, flag);
}

main函数:

int main()
{
	cv::Mat image = cv::imread("chessboard.jpg");
	cv::imshow("original image",image);
	std::vector<cv::Point2f> imageCorners;
	cv::Size boardSize(9, 6);
	//获得棋盘角点
	bool found = cv::findChessboardCorners(image, boardSize, imageCorners);
	if (found)
	{
		std::cout << "检测到棋盘角点" << std::endl;
	}
	else
	{
		std::cout << "未检测到棋盘角点" << std::endl;
	}
	//画出角点
	cv::drawChessboardCorners(image, boardSize, imageCorners, found);
	cv::imshow("chessboard corners", image);
	cvWaitKey();
}
*计算图像对的基础矩阵

使用两个相机,分别对同一场景拍摄照片时,如果两个相机被刚性基线分割,我们就称之为立体视觉

如果要根据图像中的一个点找到另一幅图像中对应的点,就需要在第二个图象平面上沿着场景与焦点连线的投影搜索,这条投影线称为对极线

两个对应点必须要满足的条件:

①对于一个点,在另一个视图中与它匹配的点必须位于它的对极线上;

②对极线的准确方向取决于两个相机的相对位置。

所有对极线都通过一个点,这个特殊点对应一个相机中心在另一个相机上的投影,称为极点

首先在两幅图中找到7对最好的匹配项:

此时特征点都是cv::KeyPoint类型,需要转换成cv::Point2f类型,然后计算基础矩阵,画出全部对极线

代码:

int main()
{
	cv::Mat image1 = cv::imread("eraser_left.jpg");
	cv::resize(image1, image1, cv::Size(480, 320));
	cv::Mat image2 = cv::imread("eraser_right.jpg");
	cv::resize(image2, image2, cv::Size(480, 320));
	std::vector<cv::KeyPoint> keypoints1, keypoints2;
	cv::Ptr<cv::xfeatures2d::SURF> detector = cv::xfeatures2d::SURF::create(2000.0);
	detector->detect(image1, keypoints1);
	detector->detect(image2, keypoints2);
	cv::Ptr<cv::DescriptorExtractor> descriptor = detector;
	cv::Mat descriptor1, descriptrt2;
	descriptor->compute(image1, keypoints1, descriptor1);
	descriptor->compute(image2, keypoints2, descriptrt2);
	cv::BFMatcher matcher(cv::NORM_L2);
	std::vector<cv::DMatch> matches;
	matcher.match(descriptor1, descriptrt2, matches);
	std::nth_element(matches.begin(), matches.begin() + 7, matches.end());
	matches.erase(matches.begin() + 8, matches.end());
	cv::Mat matchedImage;
	cv::drawMatches(image1, keypoints1, image2, keypoints2, matches, matchedImage);
	cv::imshow("matched image", matchedImage);
	std::vector<cv::Point2f> selPoints1, selPoints2;
	std::vector<int> pointIndex1, pointIndex2;
	for (std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); it++)
	{
		pointIndex1.push_back(it->queryIdx);
		pointIndex2.push_back(it->trainIdx);
	}
	cv::KeyPoint::convert(keypoints1, selPoints1, pointIndex1);
	cv::KeyPoint::convert(keypoints2, selPoints2, pointIndex2);
	cv::Mat fundamental = cv::findFundamentalMat(cv::Mat(selPoints1), cv::Mat(selPoints2), CV_FM_7POINT);
	std::vector<cv::Vec3f> line1;
	cv::computeCorrespondEpilines(selPoints1,//图像点
		2,//在图像1中(也可以是图像2)
		fundamental,//基础矩阵
		line1);//对极线向量
	//遍历全部对极线
	for (std::vector<cv::Vec3f>::const_iterator it = line1.begin(); it != line1.end(); it++)
	{
		cv::line(image2, cv::Point(0, -(*it)[2] / (*it)[1]),
			cv::Point(image2.cols, -((*it)[2] + (*it)[0] * image2.cols) / (*it)[1]), cv::Scalar(255, 255, 255));
	}
	cv::imshow("correspondEpilines", image2);
	cvWaitKey();
}

错误记录:

在定义了pointIndex1,pointIndex2之后没有初始化,运行失败,加上

for (std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); it++)
{
	pointIndex1.push_back(it->queryIdx);
	pointIndex2.push_back(it->trainIdx);
}
之后程序跑通。

极点位于所有对极线的交叉点,并且它是另一个相机中心点的投影。


*用RANSAC(随机抽样一致性)算法匹配图像

遵循的规则:在匹配两幅图像的特征点时,只接受位于对极线上的匹配项。

可以从控制台输出看到,RANSAC可优化匹配结果:


但是当试图用cv::drawMatches输出匹配时,却报错,不知为什么。

代码:

int main()
{
	cv::Mat image1 = cv::imread("eraser_left.jpg");
	cv::Mat image2 = cv::imread("eraser_right.jpg");
	cv::resize(image1, image1, cv::Size(480, 320));
	cv::resize(image2, image2, cv::Size(480, 320));
	RobustMatcher rmatcher("SURF");
	std::vector<cv::DMatch> matches;
	std::vector<cv::KeyPoint> keypoints1, keypoints2;
	cv::Mat fundamental = rmatcher.match(image1, image2, matches, keypoints1, keypoints2);
	int matchedCount = 0;
	for (std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); it++)
	{
		
		std::cout << "图1匹配点 " << matchedCount << " 的坐标为:(" << keypoints1[matchedCount].pt.x << "," << keypoints1[matchedCount].pt.y << ")" << std::endl;
		matchedCount++;
	}
	int matchCount_before = 0;
	for (std::vector<cv::DMatch>::const_iterator it = rmatcher.matches_before.begin(); it != rmatcher.matches_before.end(); it++)
	{
		matchCount_before++;
	}
	std::cout << "过滤后匹配的点数为:" << matchedCount << std::endl;
	std::cout << "过滤前的匹配点数为:" << matchCount_before << std::endl;
	//cv::Mat matchedImage;
	//cv::drawMatches(image1, keypoints1, image2, keypoints2, matches, matchedImage);
	//cv::imshow("matched image", matchedImage);
	while (true);
	cvWaitKey();
}

*计算两幅图像之间的单应矩阵



根据单应矩阵得到旋转前后两幅图像的局内点:



单应矩阵是一个3*3的可逆矩阵。因此,在计算单应矩阵后,可以把一幅图像的点转移到另一幅图像。实际上,图像中的每个像素都可以转移。因此可以把整幅图像迁移到另一幅图像的视点上,这个过程称为图像拼接,常用于根据多幅图像构建一副大型全景图


代码:

int main()
{
	cv::Mat image1 = cv::imread("library_1.jpg");
	cv::Mat image2 = cv::imread("library_2.jpg");
	cv::resize(image1, image1, cv::Size(480, 320));
	cv::resize(image2, image2, cv::Size(480, 320));
	cv::imshow("original image1", image1);
	cv::imshow("original image2", image2);
	cv::Ptr<cv::FeatureDetector> detector = cv::xfeatures2d::SURF::create(1500.0);
	std::vector<cv::KeyPoint> keypoints1, keypoints2;
	detector->detect(image1, keypoints1);
	detector->detect(image2, keypoints2);
	cv::Ptr<cv::DescriptorExtractor> descriptor = detector;
	cv::Mat descriptor1, descriptor2;
	descriptor->compute(image1, keypoints1, descriptor1);
	descriptor->compute(image2, keypoints2, descriptor2);
	cv::BFMatcher matcher(cv::NORM_L2,true);
	std::vector<cv::DMatch> matches;
	matcher.match(descriptor1, descriptor2, matches);
	std::vector<cv::Point2f> points1, points2;
	for (std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); it++)
	{
		points1.push_back(keypoints1[it->queryIdx].pt);
		points2.push_back(keypoints2[it->trainIdx].pt);
	}
	//找到图像1和图像2之间的单应矩阵
	std::vector<uchar> inliners(points1.size(), 0);
	cv::Mat homegraphy = cv::findHomography(points1, points2, inliners, CV_RANSAC, 1.0);
	keypoints1.clear();
	keypoints2.clear();
	cv::KeyPoint::convert(points1, keypoints1);
	cv::KeyPoint::convert(points2, keypoints2);
	cv::drawKeypoints(image1, keypoints1,image1);
	cv::drawKeypoints(image2, keypoints2, image2);
	cv::imshow("image1 Homography Points", image1);
	cv::imshow("image2 Homography Points", image2);
	cv::Mat imageMosaic;
	cv::warpPerspective(image1, imageMosaic, homegraphy, cv::Size(2 * image1.cols, image1.rows));
	cv::Mat half(imageMosaic, cv::Rect(0, 0, image2.cols, image2.rows));
	image2.copyTo(half);
	cv::imshow("image mosaic", imageMosaic);
	cvWaitKey();
	return 0;
}





 
 
 
 
 
 
 
 
 

猜你喜欢

转载自blog.csdn.net/qq_30241709/article/details/78830831