カメラ写真のピクセル座標と世界座標変換

カメラ座標と世界座標間の変換の原理は、4 つの座標系間の直接変換です。GIS システムを考慮する場合は、空間参照座標も考慮する必要があります。空間参照座標系は、地理座標系と投影座標系も考慮する必要があります。 、最大 6 つの座標系を追加します。例の図は次のとおりです。

 

このコードは opencv に基づいており、歪みパラメータは考慮されておらず、pnp を使用して内部パラメータと外部パラメータを解決し、カメラ座標と世界座標変換メソッドがカプセル化されています。GIS システムの座標系は一種の世界座標です。 . 一般に、キャリブレーションはローカル ワールド座標系と GIS での投影です。空間参照座標には、変換を完了するための x および y 方向の変換パラメータがあります。投影から地理への変換は、GDAL で直接実行できます。具体的なコードは次のとおりです。


#include <iostream>
 
#include <opencv2/opencv.hpp>
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
#ifdef _DEBUG
	#pragma comment(lib,"opencv_world460d.lib")
#else
	#pragma comment(lib,"opencv_world460.lib")
#endif // _DEBUG

void CameraToWorld(InputArray cameraMatrix, InputArray rV, InputArray tV, Point2d& imgPoints, Point3d& worldPoints)
{
	cv::Mat cameraMatrix1 = cameraMatrix.getMat();
	///根据公式求Zc,即s
	cv::Mat imagePoint = cv::Mat::ones(3, 1, cv::DataType<double>::type);
	cv::Mat tempMat, tempMat2;
	//输入一个2D坐标点,便可以求出相应的s
	imagePoint.at<double>(0, 0) = imgPoints.x;
	imagePoint.at<double>(1, 0) = imgPoints.y;
	imagePoint.at<double>(2, 0) = 1;
	double zConst = 0;//实际坐标系的距离
	//计算参数s
	double s;
	tempMat = rV.getMat().inv() * cameraMatrix1.inv() * imagePoint;
	tempMat2 = rV.getMat().inv() * tV.getMat();
	s = zConst + tempMat2.at<double>(2, 0);
	s /= tempMat.at<double>(2, 0);

	cv::Mat imagePoint_your_know = cv::Mat::ones(3, 1, cv::DataType<double>::type); //u,v,1
	imagePoint_your_know.at<double>(0, 0) = imgPoints.x;
	imagePoint_your_know.at<double>(1, 0) = imgPoints.y;
	imagePoint_your_know.at<double>(2, 0) = 1;
	Mat wcPoint = rV.getMat().inv() * (cameraMatrix1.inv() * s * imagePoint_your_know - tV.getMat());

	worldPoints.x = wcPoint.at<double>(0, 0);
	worldPoints.y = wcPoint.at<double>(1, 0);
	worldPoints.z = wcPoint.at<double>(2, 0);
	cout << "CameraToWorld 2D to 3D:   " << worldPoints << endl;
}

void WorldToCamera(InputArray cameraMatrix, InputArray rV, InputArray tV, Point3d& worldPoint, Point2d& imgPoint)
{
	cv::Mat cameraMatrix1 = cameraMatrix.getMat();

	cv::Mat worldPoints = Mat::ones(4, 1, cv::DataType<double>::type);
	worldPoints.at<double>(0, 0) = worldPoint.x;
	worldPoints.at<double>(1, 0) = worldPoint.y;
	worldPoints.at<double>(2, 0) = worldPoint.z;
	cout << "world Points :  " << worldPoints << endl;
	Mat image_points = Mat::ones(3, 1, cv::DataType<double>::type);
	//setIdentity(image_points);
	Mat RT_;
	hconcat(rV, tV, RT_);
	cout << "RT_" << RT_ << endl;
	cout << "cameraMatrix1_" << cameraMatrix1 << endl;
	image_points = cameraMatrix1 * RT_ * worldPoints;
	Mat D_Points = Mat::ones(3, 1, cv::DataType<double>::type);
	D_Points.at<double>(0, 0) = image_points.at<double>(0, 0) / image_points.at<double>(2, 0);
	D_Points.at<double>(1, 0) = image_points.at<double>(1, 0) / image_points.at<double>(2, 0);
	//cv::projectPoints(worldPoints, rV, tV, cameraMatrix1, distCoeffs1, imagePoints);
	cout << "WorldToCamera 3D to 2D:   " << D_Points << endl;
}

int main()
{
	// 求内外参数通过PnP求解相机的R&T//
	Point2f point;
	vector<Point2f> boxPoints; //存入像素坐标
	// Loading image
	Mat sourceImage = imread("D:/source/kernel/example/opencv_uvxyztest/x64/Debug/a.bmp");
	namedWindow("Source", 1);
	// Setting box corners in image
		//one Point
		point = Point2f((float)558, (float)259); //640X480
	boxPoints.push_back(point);
	circle(sourceImage, boxPoints[0], 3, Scalar(0, 255, 0), -1, 8);

	//two Point
		point = Point2f((float)629, (float)212); //640X480
	boxPoints.push_back(point);
	circle(sourceImage, boxPoints[1], 3, Scalar(0, 255, 0), -1, 8);

	//three Point
		point = Point2f((float)744, (float)260); //640X480
	boxPoints.push_back(point);
	circle(sourceImage, boxPoints[2], 3, Scalar(0, 255, 0), -1, 8);

	//four Point
		point = Point2f((float)693, (float)209); //640X480
	boxPoints.push_back(point);
	circle(sourceImage, boxPoints[3], 3, Scalar(0, 255, 0), -1, 8);

	//Setting box corners in real world
	vector<Point3f> worldBoxPoints;  //存入世界坐标
	Point3f tmpPoint;
	tmpPoint = Point3f((float)2750, (float)890, (float)0);
	worldBoxPoints.push_back(tmpPoint);
	tmpPoint = Point3f((float)3500, (float)450, (float)0);
	worldBoxPoints.push_back(tmpPoint);
	tmpPoint = Point3f((float)2790, (float)-240, (float)0);
	worldBoxPoints.push_back(tmpPoint);
	tmpPoint = Point3f((float)3620, (float)-50, (float)0);
	worldBoxPoints.push_back(tmpPoint);
	//camera  intristic///
	cv::Mat cameraMatrix1 = Mat::eye(3, 3, cv::DataType<double>::type);  //相机内参矩阵
	cv::Mat distCoeffs1(5, 1, cv::DataType<double>::type);  //畸变参数
	distCoeffs1.at<double>(0, 0) = 0.061439051;
	distCoeffs1.at<double>(1, 0) = 0.03187556;
	distCoeffs1.at<double>(2, 0) = -0.00726151;
	distCoeffs1.at<double>(3, 0) = -0.00111799;
	distCoeffs1.at<double>(4, 0) = -0.00678974;

	//Taken from Mastring OpenCV d  相机固定参数
	double fx = 328.61652824;
	double fy = 328.56512516;
	double cx = 629.80551148;
	double cy = 340.5442837;
	cameraMatrix1.at<double>(0, 0) = fx;
	cameraMatrix1.at<double>(1, 1) = fy;
	cameraMatrix1.at<double>(0, 2) = cx;
	cameraMatrix1.at<double>(1, 2) = cy;

	//PnP solve R&T///
	cv::Mat rvec1(3, 1, cv::DataType<double>::type);  //旋转向量
	cv::Mat tvec1(3, 1, cv::DataType<double>::type);  //平移向量
	cv::solvePnP(worldBoxPoints, boxPoints, cameraMatrix1, distCoeffs1, rvec1, tvec1, false, 0);
	cv::Mat rvecM1(3, 3, cv::DataType<double>::type);  //旋转矩阵
	cv::Rodrigues(rvec1, rvecM1);

	//此处用于求相机位于坐标系内的旋转角度, 2D - 3D的转换并不用求
	const double PI = 3.1415926;
	double thetaZ = atan2(rvecM1.at<double>(1, 0), rvecM1.at<double>(0, 0)) / PI * 180;
	double thetaY = atan2(-1 * rvecM1.at<double>(2, 0), sqrt(rvecM1.at<double>(2, 1)*rvecM1.at<double>(2, 1)
		+ rvecM1.at<double>(2, 2)*rvecM1.at<double>(2, 2))) / PI * 180;
	double thetaX = atan2(rvecM1.at<double>(2, 1), rvecM1.at<double>(2, 2)) / PI * 180;
	cout << "theta x  " << thetaX << endl << "theta Y: " << thetaY << endl << "theta Z: " << thetaZ << endl;



	///World To Camera
	Point2d d2p;
	Point3d wd(3620, -590, 345);
	cout << "world Points :  " << wd << endl;
	WorldToCamera(cameraMatrix1, rvecM1, tvec1, wd, d2p);

	// Camera To World
	Point2d d2pcw(757.0414822810177, 213.3698894820287);
	Point3d wdcw;
	cout << "camera Points :  " << d2pcw << endl;
	CameraToWorld(cameraMatrix1, rvecM1, tvec1, d2pcw, wdcw);

	return 0;
}

おすすめ

転載: blog.csdn.net/chijingjing/article/details/128821285