OPENCV-Kamerakalibrierung + Entzerrung

 Verschmelzen Sie die interne Referenzmatrix der Kamerakalibrierungsmessung, den Verzerrungskoeffizienten und die Entzerrung, um ein unverzerrtes Bild zu erhalten

Ändern Sie bei Verwendung dir in den Namen des Ordners, in dem die Bilder gespeichert sind

Ändern Sie „board_size“ auf die Anzahl der Eckpunkte der Zeile und Spalte

Es empfiehlt sich, zur Entzerrung noch ein paar Bilder im Verzeichnis zu speichern

Im nächsten Schritt wird es mit der De-Perspektivtransformation verschmolzen, um eine Matrix zu erzeugen, und die De-Verzerrung und Perspektivtransformation werden gleichzeitig abgeschlossen.

#include<opencv2/opencv.hpp>
#include <fstream>
#include<iostream> 
#include<stdlib.h>
#include<iosfwd>
#include <ostream> 
#include <vector>
using namespace cv;
using namespace std;
#define linux 1


vector<string> getFilesList(string dir);
#ifdef linux
#include <memory.h>
#include <dirent.h>
vector<string> getFilesList(string dirpath) {
	vector<string> allPath;
	DIR *dir = opendir(dirpath.c_str());
	if (dir == NULL)
	{
		cout << "opendir error" << endl;
		return allPath;
	}
	struct dirent *entry;
	while ((entry = readdir(dir)) != NULL)
	{
		if (entry->d_type == DT_DIR) {//It's dir
			if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0)
				continue;
			string dirNew = dirpath + "/" + entry->d_name;
			vector<string> tempPath = getFilesList(dirNew);
			allPath.insert(allPath.end(), tempPath.begin(), tempPath.end());

		}
		else {
			//cout << "name = " << entry->d_name << ", len = " << entry->d_reclen << ", entry->d_type = " << (int)entry->d_type << endl;
			string name = entry->d_name;
			string imgdir = dirpath + "/" + name;
			//sprintf("%s",imgdir.c_str());
			allPath.push_back(imgdir);
		}

	}
	closedir(dir);
	//system("pause");
	return allPath;
}
#endif

#ifdef _WIN32//__WINDOWS_
#include <io.h>
vector<string> getFilesList(string dir)
{
	vector<string> allPath;
	// 在目录后面加上"\\*.*"进行第一次搜索
	string dir2 = dir + "\\*.*";

	intptr_t handle;
	_finddata_t findData;

	handle = _findfirst(dir2.c_str(), &findData);
	if (handle == -1) {// 检查是否成功
		cout << "can not found the file ... " << endl;
		return allPath;
	}
	while (_findnext(handle, &findData) == 0)
	{
		if (findData.attrib & _A_SUBDIR) 是否含有子目录
		{
			//若该子目录为"."或"..",则进行下一次循环,否则输出子目录名,并进入下一次搜索
			if (strcmp(findData.name, ".") == 0 || strcmp(findData.name, "..") == 0)
				continue;
			// 在目录后面加上"\\"和搜索到的目录名进行下一次搜索
			string dirNew = dir + "\\" + findData.name;
			vector<string> tempPath = getFilesList(dirNew);
			allPath.insert(allPath.end(), tempPath.begin(), tempPath.end());
		}
		else //不是子目录,即是文件,则输出文件名和文件的大小
		{
			string filePath = dir + "\\" + findData.name;
			allPath.push_back(filePath);
		}
	}
	_findclose(handle);    // 关闭搜索句柄
	return allPath;
}
#endif


cv::Mat  NewCameraMatrix;


int main() {
    string dir = "../img6";


    //读取每一幅图像,从中提取出角点,然后对角点进行亚像素精确化
    cout << "开始提取角点………………";
    int image_count = 0;  /* 图像数量 */
    Size image_size;  /* 图像的尺寸 */
    Size board_size = Size(6, 8);   //6 4 10 7 9 6   69 68 68/* 标定板上每行、列的角点数 */
    vector<Point2f> image_points_buf;  /* 缓存每幅图像上检测到的角点 */
    vector<vector<Point2f>> image_points_seq; /* 保存检测到的所有角点 */
    string filename;
    vector<string> allFileList = getFilesList(dir);
    int pic_num = allFileList.size();
    string pic_name[pic_num];
    for (size_t i = 0; i < allFileList.size(); i++)

        pic_name[i] = allFileList.at(i);


    while (image_count < pic_num) {
        filename = pic_name[image_count];
        image_count++;
        // 用于观察检验输出
        cout << "image_count = " << image_count << endl;
        /* 输出检验*/
        Mat imageInput = imread(filename);
        if (image_count == 1)  //读入第一张图片时获取图像宽高信息
        {
            image_size.width = imageInput.cols;
            image_size.height = imageInput.rows;
            cout << "image_size.width = " << image_size.width << endl;
            cout << "image_size.height = " << image_size.height << endl;
        }

        /* 提取角点 */
        if (0 == findChessboardCorners(imageInput, board_size, image_points_buf)) {
            cout << "can not find chessboard corners!\n"; //找不到角点
            exit(1);
        } else {
            Mat view_gray;
            cvtColor(imageInput, view_gray, COLOR_RGB2GRAY);
            /* 亚像素精确化 */
            find4QuadCornerSubpix(view_gray, image_points_buf, Size(11, 11)); //对粗提取的角点进行精确化
            image_points_seq.push_back(image_points_buf);  //保存亚像素角点
            /* 在图像上显示角点位置 */
            drawChessboardCorners(view_gray, board_size, image_points_buf, true); //用于在图片中标记角点
//            imshow("Camera Calibration", view_gray);//显示图片
//            waitKey(50);//暂停0.5S
        }
    }
    int total = image_points_seq.size();
    cout << "total = " << total << endl;
    cout << "角点提取完成!\n";
    //以下是摄像机标定
    cout << "开始标定………………";
    /*棋盘三维信息*/
    Size square_size = Size(10, 10);  /* 实际测量得到的标定板上每个棋盘格的大小 */
    vector<vector<Point3f>> object_points; /* 保存标定板上角点的三维坐标 */
    /*内外参数*/
    Mat cameraMatrix = Mat(3, 3, CV_32FC1, Scalar::all(0)); /* 摄像机内参数矩阵 */
    vector<int> point_counts;  // 每幅图像中角点的数量
    Mat distCoeffs = Mat(1, 5, CV_32FC1, Scalar::all(0)); /* 摄像机的5个畸变系数:k1,k2,p1,p2,k3 */
    vector<Mat> tvecsMat;  /* 每幅图像的旋转向量 */
    vector<Mat> rvecsMat; /* 每幅图像的平移向量 */
    /* 初始化标定板上角点的三维坐标 */
    int i, j, t;
    for (t = 0; t < image_count; t++) {
        vector<Point3f> tempPointSet;
        for (i = 0; i < board_size.height; i++) {
            for (j = 0; j < board_size.width; j++) {
                Point3f realPoint;
                /* 假设标定板放在世界坐标系中z=0的平面上 */
                realPoint.x = i * square_size.width;
                realPoint.y = j * square_size.height;
                realPoint.z = 0;
                tempPointSet.push_back(realPoint);
            }
        }
        object_points.push_back(tempPointSet);
    }
    /* 初始化每幅图像中的角点数量,假定每幅图像中都可以看到完整的标定板 */
    for (i = 0; i < image_count; i++) {
        point_counts.push_back(board_size.width * board_size.height);
    }
    /* 开始标定 */
    calibrateCamera(object_points, image_points_seq, image_size, cameraMatrix, distCoeffs, rvecsMat, tvecsMat, 0);
    cout << "标定完成!\n";
    //对标定结果进行评价
    cout << "开始评价标定结果………………\n";
    double total_err = 0.0; /* 所有图像的平均误差的总和 */
    double err = 0.0; /* 每幅图像的平均误差 */
    vector<Point2f> image_points2; /* 保存重新计算得到的投影点 */
    for (i = 0; i < image_count; i++) {
        vector<Point3f> tempPointSet = object_points[i];
        /* 通过得到的摄像机内外参数,对空间的三维点进行重新投影计算,得到新的投影点 */
        projectPoints(tempPointSet, rvecsMat[i], tvecsMat[i], cameraMatrix, distCoeffs, image_points2);
        /* 计算新的投影点和旧的投影点之间的误差*/
        vector<Point2f> tempImagePoint = image_points_seq[i];
        Mat tempImagePointMat = Mat(1, tempImagePoint.size(), CV_32FC2);
        Mat image_points2Mat = Mat(1, image_points2.size(), CV_32FC2);
        for (int j = 0; j < tempImagePoint.size(); j++) {
            image_points2Mat.at<Vec2f>(0, j) = Vec2f(image_points2[j].x, image_points2[j].y);
            tempImagePointMat.at<Vec2f>(0, j) = Vec2f(tempImagePoint[j].x, tempImagePoint[j].y);
        }
        err = norm(image_points2Mat, tempImagePointMat, NORM_L2);
        total_err += err /= point_counts[i];
    }
    std::cout << "总体平均误差:" << total_err / image_count << "像素" << endl;
    std::cout << "评价完成!" << endl;
    Mat rotation_matrix = Mat(3, 3, CV_32FC1, Scalar::all(0)); /* 保存每幅图像的旋转矩阵 */
    cout << "相机内参数矩阵:" << endl;
    cout << cameraMatrix << endl << endl;
    cout << "畸变系数:\n";
    cout << distCoeffs << endl << endl << endl;


    Mat K = cameraMatrix;
    Mat D = distCoeffs;
//D.at<double>(0,4)=0;

    const int nImage = pic_num;
    int ImgWidth = image_size.width;
    int ImgHeight = image_size.height;





/***************************************************/
    double fx = K.at<double>(0, 0)
    , fy = K.at<double>(1, 1)
    , ux = K.at<double>(0, 2)
    , uy = K.at<double>(1, 2);
    double k1 = D.at<double>(0, 0), k2 = D.at<double>(0, 1), k3 = D.at<double>(0, 4), p1 = D.at<double>(0,
                                                                                                        2), p2 = D.at<double>(
            0, 3);
    double k4 = 0, k5 = 0, k6 = 0;


/**********************way2***********************/


//    Mat out2 = Mat(ImgHeight, ImgWidth, CV_8UC1);


    for (int i = 0; i < nImage; i++) {
        string InputPath = pic_name[i];
        cv::Mat img_tmp = cv::imread(InputPath);
        cvtColor(img_tmp, img_tmp, COLOR_RGB2GRAY);


//        ///**********way2**********/

        int max_x=-9999999,max_y=-9999999,min_x=9999999,min_y=9999999;
        double point_xy[4][2];//左上 右上,左下,右下
        for (int i = 0; i < ImgHeight; i++) {
            for (int j = 0; j < ImgWidth; j++) {
                double xDistortion = (j - ux) / fx;
                double yDistortion = (i - uy) / fy;

                double xCorrected, yCorrected;

                double x0 = xDistortion;
                double y0 = yDistortion;
                for (int j = 0; j < 10; j++) {
                    double r2 = xDistortion * xDistortion + yDistortion * yDistortion;

                    double distRadialA = 1 / (1. + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2);
                    double distRadialB = 1. + k4 * r2 + k5 * r2 * r2 + k6 * r2 * r2 * r2;

                    double deltaX = 2. * p1 * xDistortion * yDistortion + p2 * (r2 + 2. * xDistortion * xDistortion);
                    double deltaY = p1 * (r2 + 2. * yDistortion * yDistortion) + 2. * p2 * xDistortion * yDistortion;

                    xCorrected = (x0 - deltaX) * distRadialA * distRadialB;
                    yCorrected = (y0 - deltaY) * distRadialA * distRadialB;

                    xDistortion = xCorrected;
                    yDistortion = yCorrected;
                }
                float kk=1.5;
                xCorrected = xCorrected * fx + ux;
                yCorrected = yCorrected * fy + uy;
                if(yCorrected>max_y&&abs(yCorrected)<ImgHeight*kk)max_y=yCorrected;
                if(xCorrected>max_x&&abs(xCorrected)<ImgWidth*kk)max_x=xCorrected;
                if(yCorrected<min_y&&abs(yCorrected)<ImgHeight*kk)min_y=yCorrected;
                if(xCorrected<min_x&&abs(xCorrected)<ImgWidth*kk)min_x=xCorrected;


                if(i==0&&j==0)   point_xy[0][0]=xCorrected, point_xy[0][1]=yCorrected;//左上
                if(i==0&&j==ImgWidth-1)  point_xy[1][0]=xCorrected, point_xy[1][1]=yCorrected;//右上
                if(i== ImgHeight-1&&j==0)   point_xy[2][0]=xCorrected, point_xy[2][1]=yCorrected;//左下
                if(i== ImgHeight-1&&j==ImgWidth-1)   point_xy[3][0]=xCorrected, point_xy[3][1]=yCorrected;//右下


                if(abs(xCorrected)>ImgWidth*kk||abs(yCorrected)>ImgHeight)
                {
                    img_tmp.at<uchar>(i,j)=127;
                }

            }


        }
        cout<<"size:   "<<max_x-min_x+1<<" \t"<<1+max_y-min_y<<endl;
        cout<<"size:   "<<(int)(point_xy[1][0]-point_xy[0][0])+1<<" \t"<<(int)(point_xy[3][0]-point_xy[2][0])<<endl;
        cout<<"size:   "<<(int)(point_xy[2][1]-point_xy[0][1])+1<<" \t"<<(int)(point_xy[3][1]-point_xy[1][1])<<endl;

//int ImgWidth_out= max((int)(point_xy[1][0]-point_xy[0][0])+1,(int)(point_xy[3][0]-point_xy[2][0]));
//        int  ImgHeight_out=max((int)(point_xy[2][1]-point_xy[0][1])+1,(int)(point_xy[3][1]-point_xy[1][1]));

//        int ImgWidth_out= max_x-min_x;
//        int  ImgHeight_out=max_y-min_y;
        int ImgWidth_out= ImgWidth;
        int  ImgHeight_out=ImgHeight;
        Mat out1 = Mat(ImgHeight_out, ImgWidth_out, CV_8UC1);
//        int max_width=max(point_xy[1][0],point_xy[3][0]);
//        int max_high=max(point_xy[2][1],point_xy[3][1]);
        int max_width=max_x;
        int max_high=max_y;


        int move_x=( ImgWidth_out- max_width)*0;
        int move_y=(ImgHeight_out-max_high)*0;
        cout<<"move:"<<move_x<<"\t"<<move_y<<endl;

        cout<<"size:"<<ImgHeight_out<<"\t"<<ImgWidth_out<<endl;

/********way1*******/
        for (int i = 0; i < ImgHeight_out; i++)
            for (int j = 0; j < ImgWidth_out; j++)
                out1.at<uchar>(i, j) = 0;


        for (int i = -move_y; i < ImgHeight_out; i++) {
            for (int j = -move_x; j < ImgWidth_out; j++) {
                double xCorrected = (j - ux) / fx;
                double yCorrected = (i - uy) / fy;

                double xDistortion, yDistortion;

                //我们已知的是经过畸变矫正或理想点的坐标;
                double r2 = xCorrected * xCorrected + yCorrected * yCorrected;

                double deltaRa = 1. + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2;
                double deltaRb = 1 / (1. + k4 * r2 + k5 * r2 * r2 + k6 * r2 * r2 * r2);
                double deltaTx = 2. * p1 * xCorrected * yCorrected + p2 * (r2 + 2. * xCorrected * xCorrected);
                double deltaTy = p1 * (r2 + 2. * yCorrected * yCorrected) + 2. * p2 * xCorrected * yCorrected;

                //下面为畸变模型;
                xDistortion = xCorrected * deltaRa * deltaRb + deltaTx;
                yDistortion = yCorrected * deltaRa * deltaRb + deltaTy;

                //最后再次通过相机模型将归一化的坐标转换到像素坐标系下;
                xDistortion = xDistortion * fx + ux;
                yDistortion = yDistortion * fy + uy;
                if (yDistortion >= 0 && yDistortion < ImgHeight && xDistortion >= 0 && xDistortion < ImgWidth)
                    out1.at<uchar>(i+move_y, j+move_x) = img_tmp.at<uchar>(yDistortion, xDistortion);

            }
        }

        cv::imshow("ou1", out1);

        cv::imshow("RawImage", img_tmp);
        cv::waitKey(0);
    }

}

Ich denke du magst

Origin blog.csdn.net/wu58430/article/details/127044703
Empfohlen
Rangfolge