opencv双目标定

opencv-3.0.0-alpha\samples\cpp中编译stereo_calib.cpp
执行 cpp-example-stereo_calib -w 9 -h 6 stereo_calib.xml
stereo_calib.xml对应的是图片

#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"

#include <vector>
#include <string>
#include <algorithm>
#include <iostream>
#include <iterator>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>

using namespace cv;
using namespace std;

static int print_help()
{
    cout <<
            " Given a list of chessboard images, the number of corners (nx, ny)\n"
            " on the chessboards, and a flag: useCalibrated for \n"
            "   calibrated (0) or\n"
            "   uncalibrated \n"
            "     (1: use cvStereoCalibrate(), 2: compute fundamental\n"
            "         matrix separately) stereo. \n"
            " Calibrate the cameras and display the\n"
            " rectified results along with the computed disparity images.   \n" << endl;
    cout << "Usage:\n ./stereo_calib -w board_width -h board_height [-nr /*dot not view results*/] <image list XML/YML file>\n" << endl;
    return 0;
}


static void
StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=true, bool showRectified=true)
{
    if( imagelist.size() % 2 != 0 )//成对的图像  
    {
        cout << "Error: the image list contains odd (non-even) number of elements\n";
        return;
    }

    bool displayCorners = true;//true;
    const int maxScale = 2;
    //实际的正方格尺寸
    //const float squareSize = 1.f;  // Set this to your actual square size
    const float squareSize = 36.f; // danwei mm 
    // ARRAY AND VECTOR STORAGE:

    //创建图像坐标和世界坐标系坐标矩阵 
    vector<vector<Point2f> > imagePoints[2];//图像点(存储角点) 
    vector<vector<Point3f> > objectPoints;  //物体三维坐标点  
    Size imageSize;

    int i, j, k, nimages = (int)imagelist.size()/2; //左右两个相机图片

    //确定左右视图矩阵的数量,比如10副图,左右矩阵分别为5个  
    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    vector<string> goodImageList;

    cout << "nimages: " << nimages <<endl;

    //  文件列表 需要交替 "left01.jpg" "right01.jpg"...  
    for( i = j = 0; i < nimages; i++ )  //5对图像  
    {
        for( k = 0; k < 2; k++ )//左右两个相机图片 k=0,1  
        {
            const string& filename = imagelist[i*2+k];  //逐个读取图片
            Mat img = imread(filename, 0); //图像数据 
            if(img.empty())
                break;
            if( imageSize == Size() )
                imageSize = img.size();
            else if( img.size() != imageSize )// 图片需要保持一样的大小
            {
                cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n";
                break;
            }
            bool found = false;
            vector<Point2f>& corners = imagePoints[k][j];
            for( int scale = 1; scale <= maxScale; scale++ )
            {
                Mat timg;
                //图像是8bit的灰度或彩色图像  
                if( scale == 1 )
                    timg = img;
                else
                    resize(img, timg, Size(), scale, scale);//转换成 8bit的灰度或彩色图像  
                // //参数需为 8bit的灰度或彩色图像
                found = findChessboardCorners(timg, boardSize, corners,//得到棋盘内角点坐标  存入 imagePoin
                    CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
                if(found)
                {
                    //如果为多通道图像  
                    if( scale > 1 )
                    {
                        Mat cornersMat(corners);
                        cornersMat *= 1./scale;
                    }
                    break;
                }
            }
             //显示角点  
            if( displayCorners )
            {
                cout << filename << endl;
                Mat cimg, cimg1;
                cvtColor(img, cimg, COLOR_GRAY2BGR);//转换成灰度图  
                drawChessboardCorners(cimg, boardSize, corners, found);//显示
                double sf = 640./MAX(img.rows, img.cols);//尺度因子
                resize(cimg, cimg1, Size(), sf, sf);//变换到合适大小

                imshow("corners", cimg);
                char c = (char)waitKey(50);//等待500ms
                if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit
                    exit(-1);
            }
            else
                putchar('.');

            if( !found )
            {
                cout << "ChessboardCorners not found" <<endl;           
                break;
            }
            // 亚像素级优化
            cornerSubPix(img, corners, Size(11,11), Size(-1,-1),
                         TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
                                      30, 0.01));
        }
        if( k == 2 )//上面的for循环结束后 k=2 
        {
            goodImageList.push_back(imagelist[i*2]);
            goodImageList.push_back(imagelist[i*2+1]);
            j++;
        }
    }
    cout << j << " pairs have been successfully detected.\n";
    nimages = j;
    if( nimages < 2 )
    {
        cout << "Error: too little pairs to run the calibration\n";
        return;
    }

    imagePoints[0].resize(nimages);//左相机 角点位置 
    imagePoints[1].resize(nimages);//右相机 角点位置
    // 角点实际 位置 按照 squareSize 得出 
    objectPoints.resize(nimages);

    for( i = 0; i < nimages; i++ )
    {
        for( j = 0; j < boardSize.height; j++ ) //每一行
            for( k = 0; k < boardSize.width; k++ )  //每一列 
                objectPoints[i].push_back(Point3f(j*squareSize, k*squareSize, 0)); //直接转为float类型,坐标为行、列  
    }

    cout << "Running stereo calibration ...\n";
    //创建内参矩阵
    Mat cameraMatrix[2], distCoeffs[2];//左右两个相机 的内参数矩阵和 畸变参数
    //cameraMatrix[0] = Mat::eye(3, 3, CV_64F);//初始化内参数矩阵
    //cameraMatrix[1] = Mat::eye(3, 3, CV_64F);

    cameraMatrix[0] = initCameraMatrix2D(objectPoints,imagePoints[0],imageSize,0);
    cameraMatrix[1] = initCameraMatrix2D(objectPoints,imagePoints[1],imageSize,0);  
    Mat R, T, E, F;//R 旋转矢量 T平移矢量 E本征矩阵 F基础矩阵

    double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],//真实点坐标  左右两个相机点坐标  
                    cameraMatrix[0], distCoeffs[0],
                    cameraMatrix[1], distCoeffs[1],
                    imageSize, R, T, E, F,
                    CALIB_FIX_ASPECT_RATIO +
                    CALIB_ZERO_TANGENT_DIST +
                    CALIB_SAME_FOCAL_LENGTH +
                    CALIB_RATIONAL_MODEL +
                    CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5,
                    TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 100, 1e-5) );
    cout << "done with RMS error=" << rms << endl;
    cout << "cameraMatrix[0]=" << cameraMatrix[0]<<endl;
    cout << "cameraMatrix[1]=" << cameraMatrix[1]<<endl;

// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0

//计算标定误差
    double err = 0;
    int npoints = 0;
    vector<Vec3f> lines[2];//极线  
    for( i = 0; i < nimages; i++ )
    {
        int npt = (int)imagePoints[0][i].size();
        Mat imgpt[2];
        for( k = 0; k < 2; k++ )
        {
            imgpt[k] = Mat(imagePoints[k][i]);
            //未去畸变
            undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]);
            computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]);
        }
        for( j = 0; j < npt; j++ )
        {
            double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] +
                                imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
                           fabs(imagePoints[1][i][j].x*lines[0][j][0] +
                                imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
            err += errij;
        }
        npoints += npt;
    }
    cout << "average reprojection err = " <<  err/npoints << endl;

    // save intrinsic parameters
    // 内参数 save intrinsic parameters  
    FileStorage fs("intrinsics.yml", FileStorage::WRITE);
    if( fs.isOpened() )
    {
        fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
            "M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
        fs.release();
    }
    else
        cout << "Error: can not save the intrinsic parameters\n";

    /*
    /外参数  
// R--右相机相对左相机的旋转矩阵  
// T--右相机相对左相机的平移矩阵  
// R1,R2--左右相机校准变换(旋转)矩阵  3×3  
// P1,P2--左右相机在校准后坐标系中的投影矩阵 3×4  
// Q--视差-深度映射矩阵,我利用它来计算单个目标点的三维坐标

    */
    Mat R1, R2, P1, P2, Q;//由stereoRectify()求得 
    Rect validRoi[2];//图像校正之后,会对图像进行裁剪,这里的validROI就是指裁剪之后的区域

     // 校准双目图像   摆正两幅图像  
    stereoRectify(cameraMatrix[0], distCoeffs[0],
                  cameraMatrix[1], distCoeffs[1],
                  imageSize, R, T, R1, R2, P1, P2, Q,
                  CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);


     /*  
    R T 左相机到右相机 的 旋转 平移矩阵  R 3*3      T  3*1  T中第一个Tx为 基线长度  
    立体校正的时候需要两幅图像共面并且行对准 以使得立体匹配更加的可靠  
    使得两幅图像共面的方法就是把两个摄像头的图像投影到一个公共成像面上,这样每幅图像从本图像平面投影到公共图像平面都需要一个旋转矩阵R  
    stereoRectify 这个函数计算的就是从图像平面投影都公共成像平面的旋转矩阵Rl,Rr。 Rl,Rr即为左右相机平面行对准的校正旋转矩阵。  
    左相机经过Rl旋转,右相机经过Rr旋转之后,两幅图像就已经共面并且行对准了。  
    其中Pl,Pr为两个相机的投影矩阵,其作用是将3D点的坐标转换到图像的2D点的坐标:P*[X Y Z 1]' =[x y w]   
    Q矩阵为重投影矩阵,即矩阵Q可以把2维平面(图像平面)上的点投影到3维空间的点:Q*[x y d 1] = [X Y Z W]。其中d为左右两幅图像的视差                                             

    */
    fs.open("extrinsics.yml", FileStorage::WRITE);
    if( fs.isOpened() )
    {
        fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
        fs.release();
    }
    else
        cout << "Error: can not save the extrinsic parameters\n";

    cout << "R1=" << R1<<endl;
    cout << "R2=" << R2<<endl;
    cout << "P1=" << P1<<endl;
    cout << "P2=" << P2<<endl;

    // OpenCV can handle left-right
    // or up-down camera arrangements
    // 辨认 左右结构的相机  或者 上下结构的相机  
    bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3));

// COMPUTE AND DISPLAY RECTIFICATION
    if( !showRectified )
        return;

    Mat rmap[2][2];//映射表
// IF BY CALIBRATED (BOUGUET'S METHOD)
    if( useCalibrated )
    {
        // we already computed everything
    }
// OR ELSE HARTLEY'S METHOD
    else
 // use intrinsic parameters of each camera, but
 // compute the rectification transformation directly
 // from the fundamental matrix
    {
        vector<Point2f> allimgpt[2];
        for( k = 0; k < 2; k++ )
        {
            for( i = 0; i < nimages; i++ )
                std::copy(imagePoints[k][i].begin(), imagePoints[k][i].end(), back_inserter(allimgpt[k]));
        }
        F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0);
        Mat H1, H2;
        stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3);

        cout << "F=" << F.rows<<"*"<<F.cols << " H1:"<<H1.rows<<"*"<<H1.cols<<endl;

        R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0];
        R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1];
        P1 = cameraMatrix[0];
        P2 = cameraMatrix[1];
        cout << "R1 = " << R1<<endl;
        cout << "R2 = " << R2<<endl;
        cout << "P1 = " << P1<<endl;
        cout << "P2 = " << P2<<endl;
    }
     /*  
    根据stereoRectify 计算出来的R 和 P 来计算图像的映射表 mapx,mapy  
    mapx,mapy这两个映射表接下来可以给remap()函数调用,来校正图像,使得两幅图像共面并且行对准  
    ininUndistortRectifyMap()的参数newCameraMatrix就是校正后的摄像机矩阵。在openCV里面,校正后的计算机矩阵Mrect是跟投影矩阵P一起返回的。  
    所以我们在这里传入投影矩阵P,此函数可以从投影矩阵P中读出校正后的摄像机矩阵  
    */   
    //Precompute maps for cv::remap()
    initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
    initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);
    //cout <<"cameraMatrix[0]"<<cameraMatrix[0]<<"distCoeffs[0]"<<distCoeffs[0]<<"R1"<<R1<<"P1"<<P1<<"imageSize"<<imageSize<<"rmap[0][0]"<<rmap[0][0]<<"rmap[0][1]"<<rmap[0][1]<<"\n\n"<<endl;
    //cout <<"cameraMatrix[1]"<<cameraMatrix[1]<<"distCoeffs[1]"<<distCoeffs[0]<<"R2"<<R2<<"P2"<<P2<<"imageSize"<<imageSize<<"rmap[1][0]"<<rmap[1][0]<<"rmap[1][1]"<<rmap[1][1]<<"\n\n"<<endl;

     /*  
        把校正结果显示出来  
        把左右两幅图像显示到同一个画面上  
        这里只显示了最后一副图像的校正结果。并没有把所有的图像都显示出来  
    */    
    Mat canvas;
    double sf;
    int w, h;
    if( !isVerticalStereo )
    {
        sf =600./MAX(imageSize.width, imageSize.height);
        w = cvRound(imageSize.width*sf);
        h = cvRound(imageSize.height*sf);
        canvas.create(h, w*2, CV_8UC3);
    }
    else
    {
        sf = 300./MAX(imageSize.width, imageSize.height);
        w = cvRound(imageSize.width*sf);
        h = cvRound(imageSize.height*sf);
        canvas.create(h*2, w, CV_8UC3);
    }

    for( i = 0; i < nimages; i++ )
    {
        for( k = 0; k < 2; k++ )
        {
            Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg;
            //经过remap之后,左右相机的图像已经共面并且行对准了   
            remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR);
            cvtColor(rimg, cimg, COLOR_GRAY2BGR);
            imshow("cimg", cimg);

            cout << "isVerticalStereo=" << isVerticalStereo <<"   sf="<<sf<< endl;
           //得到画布的一部分 
           Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h));
            imshow("rectified-000", canvas);

            resize(cimg, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);
            if( useCalibrated )
            {
                //获得被截取的区域  
                Rect vroi(cvRound(validRoi[k].x*sf), cvRound(validRoi[k].y*sf),
                          cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf));
                rectangle(canvasPart, vroi, Scalar(0,0,255), 3, 8);画上一个矩形 
            }
        }
             //画上对应的线条 
        if( !isVerticalStereo )
            for( j = 0; j < canvas.rows; j += 16 )
                //画平行线 
                line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
        else
            for( j = 0; j < canvas.cols; j += 16 )
                line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8);
        imshow("rectified", canvas);
        char c = (char)waitKey();
        if( c == 27 || c == 'q' || c == 'Q' )
            break;
    }
}


static bool readStringList( const string& filename, vector<string>& l )
{
    l.resize(0);
    FileStorage fs(filename, FileStorage::READ);
    if( !fs.isOpened() )
        return false;
    FileNode n = fs.getFirstTopLevelNode();
    if( n.type() != FileNode::SEQ )
        return false;
    FileNodeIterator it = n.begin(), it_end = n.end();
    for( ; it != it_end; ++it )
        l.push_back((string)*it);
    return true;
}

int main(int argc, char** argv)
{
    Size boardSize;
    string imagelistfn;
    bool showRectified = true;

    for( int i = 1; i < argc; i++ )
    {
        if( string(argv[i]) == "-w" )
        {
            if( sscanf(argv[++i], "%d", &boardSize.width) != 1 || boardSize.width <= 0 )
            {
                cout << "invalid board width" << endl;
                return print_help();
            }
        }
        else if( string(argv[i]) == "-h" )
        {
            if( sscanf(argv[++i], "%d", &boardSize.height) != 1 || boardSize.height <= 0 )
            {
                cout << "invalid board height" << endl;
                return print_help();
            }
        }
        else if( string(argv[i]) == "-nr" )
            showRectified = false;
        else if( string(argv[i]) == "--help" )
            return print_help();
        else if( argv[i][0] == '-' )
        {
            cout << "invalid option " << argv[i] << endl;
            return 0;
        }
        else
            imagelistfn = argv[i];
    }

    if( imagelistfn == "" )
    {
        imagelistfn = "stereo_calib.xml";
        boardSize = Size(9, 6);
    }
    else if( boardSize.width <= 0 || boardSize.height <= 0 )
    {
        cout << "if you specified XML file with chessboards, you should also specify the board width and height (-w and -h options)" << endl;
        return 0;
    }

    vector<string> imagelist;
    bool ok = readStringList(imagelistfn, imagelist);
    if(!ok || imagelist.empty())
    {
        cout << "can not open " << imagelistfn << " or the string list is empty" << endl;
        return print_help();
    }

    StereoCalib(imagelist, boardSize, true, showRectified);
    return 0;
}

得到内参和外参
%YAML:1.0
M1: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 4.9118398778494270e+02, 0., 3.0200043006172234e+02, 0.,
4.5254392629023482e+02, 2.4429761654279827e+02, 0., 0., 1. ]

D1: !!opencv-matrix
rows: 1
cols: 12
dt: d
data: [ 6.5502968305794426e-02, -4.3702707841165300e-01, 0., 0., 0.,
0., 0., -7.3822045850612006e-01, 0., 0., 0., 0. ]
M2: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 4.9118398778494270e+02, 0., 3.0251380762012855e+02, 0.,
4.5254392629023482e+02, 2.4325509154105887e+02, 0., 0., 1. ]
D2: !!opencv-matrix
rows: 1
cols: 12
dt: d
data: [ 1.1948494081208850e-03, -8.6262227111310374e-02, 0., 0., 0.,
0., 0., -1.6192883952372866e-01, 0., 0., 0., 0. ]

M1,M2–内参矩阵
D1,D2–畸变向量

摄像机内参矩阵:
Fx s x0
K= 0 Fy y0
0 0 1

    fx fy 为焦距   x0 y0 主点坐标(相对成像平面)s为坐标倾斜参数(理想情况下为0)

D= { K1,K2,P1, P2{K3{K4,K5,K6}, { S1,S2,S3,S4} } }

k1,k2,k3,k4,k5,k6为径向畸变,p1,p2为切向畸变

%YAML:1.0
R: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 9.9884316762675907e-01, -4.8077480586275118e-02,
-9.3933264525424703e-04, 4.8079103838389509e-02,
9.9884192810661887e-01, 1.7895335563569564e-03,
8.5220856570500326e-04, -1.8326256377959298e-03,
9.9999795760983046e-01 ]
T: !!opencv-matrix
rows: 3
cols: 1
dt: d
data: [ -7.5570887376059744e+01, -1.0940390462549969e+00,
9.5613262991292036e-01 ]
R1: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 9.9934373466630544e-01, -3.3587861035826658e-02,
-1.3563022312117781e-02, 3.3602237038357116e-02,
9.9943493791852078e-01, 8.3338695733287842e-04,
1.3527366677186979e-02, -1.2885879250515359e-03,
9.9990767068361885e-01 ]
R2: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 9.9981522140855861e-01, 1.4474315827705250e-02,
-1.2649791345115189e-02, -1.4487731769855497e-02,
9.9989457722534925e-01, -9.6956872107580310e-04,
1.2634423925127316e-02, 1.1526563494858851e-03,
9.9991951811904345e-01 ]
P1: !!opencv-matrix
rows: 3
cols: 4
dt: d
data: [ 4.4205452306772054e+02, 0., 3.1627860260009766e+02, 0., 0.,
4.4205452306772054e+02, 2.4280571746826172e+02, 0., 0., 0., 1.,
0. ]
P2: !!opencv-matrix
rows: 3
cols: 4
dt: d
data: [ 4.4205452306772054e+02, 0., 3.1627860260009766e+02,
-3.3412626514892305e+04, 0., 4.4205452306772054e+02,
2.4280571746826172e+02, 0., 0., 0., 1., 0. ]
Q: !!opencv-matrix
rows: 4
cols: 4
dt: d
data: [ 1., 0., 0., -3.1627860260009766e+02, 0., 1., 0.,
-2.4280571746826172e+02, 0., 0., 0., 4.4205452306772054e+02, 0.,
0., 1.3230163838532507e-02, 0. ]

extrinsic params外参

R–右相机相对左相机的旋转矩阵

T–右相机相对左相机的平移矩阵

R1,R2–左右相机校准变换(旋转)矩阵

P1,P2–左右相机在校准后坐标系中的投影矩阵

Q–视差-深度映射矩阵,我利用它来计算单个目标点的三维坐标

猜你喜欢

转载自blog.csdn.net/ruidongren/article/details/79900259