双目测距步骤五:测距程序测试

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/mengxiang2425/article/details/82559851

鼠标点击后返回三维坐标,看项目需求自行改进优化。我这里想往后再改为自动返回前景坐标的。 
双目测距参考http://blog.csdn.net/wangchao7281/article/details/52506691 
这个程序是使用opencv3.0版本来测试的,2.4版本有些编译错误,懒得改了,直接使用3.0的吧。 
VS2013+opencv3.0+Release模式测试成功。鼠标点击左键在视差图上即可以进行测距,返回三维坐标。 
主要用到matlab双目标定的参数。 
注意两个摄像头的位置,插反会导致不准。 
仔细调整好之后,这个测距还是挺准的,上效果图。

我手里拿着的是激光测距仪,传说要2000-3000一个,激光肯定相对来说比较准,然后蓝色3D打印的东西是我用来固定两个摄像头的。这里还需要微调的,要调到视差图里面的白点尽量小,双目测距才尽量准确。然后鼠标点击视差图里面白色区域即可返回三维坐标。就看返回的z坐标跟激光测距的数据对比一下,可以评估这个双目测距程序的准确性。

/******************************/
/*        立体匹配和测距        */
/******************************/
#include <opencv2/opencv.hpp>  
#include <highgui.h>
#include <cv.h>
#include <cxcore.h>
#include <iostream>  
using namespace std;
using namespace cv;
const int imageWidth = 600;                             //摄像头的分辨率  
const int imageHeight = 480;
Size imageSize = Size(imageWidth, imageHeight);
Mat rgbImageL, grayImageL;
Mat rgbImageR, grayImageR;
Mat rectifyImageL, rectifyImageR;
Rect validROIL;//图像校正之后,会对图像进行裁剪,这里的validROI就是指裁剪之后的区域  
Rect validROIR;
Mat mapLx, mapLy, mapRx, mapRy;     //映射表  
Mat Rl, Rr, Pl, Pr, Q;              //校正旋转矩阵R,投影矩阵P 重投影矩阵Q
Mat xyz;              //三维坐标
Point origin;         //鼠标按下的起始点
Rect selection;      //定义矩形选框
bool selectObject = false;    //是否选择对象
int blockSize = 0, uniquenessRatio = 0, numDisparities = 0;
Ptr<StereoBM> bm = StereoBM::create(16, 9);
/*
事先标定好的相机的参数
fx 0 cx
0 fy cy
0 0  1
*/
Mat cameraMatrixL = (Mat_<double>(3, 3) << 682.55880, 0, 384.13666,
    0, 682.24569, 311.19558,
    0, 0, 1);
Mat distCoeffL = (Mat_<double>(5, 1) << -0.51614, 0.36098, 0.00523, -0.00225, 0.00000);
Mat cameraMatrixR = (Mat_<double>(3, 3) << 685.03817, 0, 397.39092,
    0, 682.54282, 272.04875,
    0, 0, 1);
Mat distCoeffR = (Mat_<double>(5, 1) << -0.46640, 0.22148, 0.00947, -0.00242, 0.00000);
Mat T = (Mat_<double>(3, 1) << -61.34485, 2.89570, -4.76870);//T平移向量
Mat rec = (Mat_<double>(3, 1) << -0.00306, -0.03207, 0.00206);//rec旋转向量
Mat R;//R 旋转矩阵
/*****立体匹配*****/
void stereo_match(int, void*)
{
    bm->setBlockSize(2 * blockSize + 5);     //SAD窗口大小,5~21之间为宜
    bm->setROI1(validROIL);
    bm->setROI2(validROIR);
    bm->setPreFilterCap(31);
    bm->setMinDisparity(0);  //最小视差,默认值为0, 可以是负值,int型
    bm->setNumDisparities(numDisparities * 16 + 16);//视差窗口,即最大视差值与最小视差值之差,窗口大小必须是16的整数倍,int型
    bm->setTextureThreshold(10);
    bm->setUniquenessRatio(uniquenessRatio);//uniquenessRatio主要可以防止误匹配
    bm->setSpeckleWindowSize(100);
    bm->setSpeckleRange(32);
    bm->setDisp12MaxDiff(-1);
    Mat disp, disp8;
    bm->compute(rectifyImageL, rectifyImageR, disp);//输入图像必须为灰度图
    disp.convertTo(disp8, CV_8U, 255 / ((numDisparities * 16 + 16)*16.));//计算出的视差是CV_16S格式
    reprojectImageTo3D(disp, xyz, Q, true); //在实际求距离时,ReprojectTo3D出来的X / W, Y / W, Z / W都要乘以16(也就是W除以16),才能得到正确的三维坐标信息。
    xyz = xyz * 16;
    imshow("disparity", disp8);
}
/*****描述:鼠标操作回调*****/
static void onMouse(int event, int x, int y, int, void*)
{
    if (selectObject)
    {
        selection.x = MIN(x, origin.x);
        selection.y = MIN(y, origin.y);
        selection.width = std::abs(x - origin.x);
        selection.height = std::abs(y - origin.y);
    }
    switch (event)
    {
    case EVENT_LBUTTONDOWN:   //鼠标左按钮按下的事件
        origin = Point(x, y);
        selection = Rect(x, y, 0, 0);
        selectObject = true;
        cout << origin << "in world coordinate is: " << xyz.at<Vec3f>(origin) << endl;
        break;
    case EVENT_LBUTTONUP:    //鼠标左按钮释放的事件
        selectObject = false;
        if (selection.width > 0 && selection.height > 0)
            break;
    }
}
/*****主函数*****/
int main()
{
    VideoCapture inputVideo1(0);
    VideoCapture inputVideo2(1);
    /*
    立体校正
    */
    Rodrigues(rec, R); //Rodrigues变换
    stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY,
        0, imageSize, &validROIL, &validROIR);
    initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pr, imageSize, CV_32FC1, mapLx, mapLy);
    initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, CV_32FC1, mapRx, mapRy);
    /*
    读取图片
    */
    while (1)
    {
        inputVideo1 >> rgbImageL;
        inputVideo2 >> rgbImageR;
        //rgbImageL = imread("left01.jpg", CV_LOAD_IMAGE_COLOR);
        cvtColor(rgbImageL, grayImageL, CV_BGR2GRAY);
        //rgbImageR = imread("right01.jpg", CV_LOAD_IMAGE_COLOR);
        cvtColor(rgbImageR, grayImageR, CV_BGR2GRAY);
        imshow("ImageL Before Rectify", grayImageL);
        imshow("ImageR Before Rectify", grayImageR);
        /*
        经过remap之后,左右相机的图像已经共面并且行对准了
        */
        remap(grayImageL, rectifyImageL, mapLx, mapLy, INTER_LINEAR);
        remap(grayImageR, rectifyImageR, mapRx, mapRy, INTER_LINEAR);
        /*
        把校正结果显示出来
        */
        Mat rgbRectifyImageL, rgbRectifyImageR;
        cvtColor(rectifyImageL, rgbRectifyImageL, CV_GRAY2BGR);  //伪彩色图
        cvtColor(rectifyImageR, rgbRectifyImageR, CV_GRAY2BGR);
        //单独显示
        rectangle(rgbRectifyImageL, validROIL, Scalar(0, 0, 255), 3, 8);
        rectangle(rgbRectifyImageR, validROIR, Scalar(0, 0, 255), 3, 8);
        imshow("ImageL After Rectify", rgbRectifyImageL);
        imshow("ImageR After Rectify", rgbRectifyImageR);
        //显示在同一张图上
        Mat canvas;
        double sf;
        int w, h;
        sf = 600. / MAX(imageSize.width, imageSize.height);
        w = cvRound(imageSize.width * sf);
        h = cvRound(imageSize.height * sf);
        canvas.create(h, w * 2, CV_8UC3);   //注意通道
        //左图像画到画布上
        Mat canvasPart = canvas(Rect(w * 0, 0, w, h));                                //得到画布的一部分  
        resize(rgbRectifyImageL, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);     //把图像缩放到跟canvasPart一样大小  
        Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y*sf),                //获得被截取的区域    
            cvRound(validROIL.width*sf), cvRound(validROIL.height*sf));
        //rectangle(canvasPart, vroiL, Scalar(0, 0, 255), 3, 8);                      //画上一个矩形  
        //cout << "Painted ImageL" << endl;
        //右图像画到画布上
        canvasPart = canvas(Rect(w, 0, w, h));                                      //获得画布的另一部分  
        resize(rgbRectifyImageR, canvasPart, canvasPart.size(), 0, 0, INTER_LINEAR);
        Rect vroiR(cvRound(validROIR.x * sf), cvRound(validROIR.y*sf),
            cvRound(validROIR.width * sf), cvRound(validROIR.height * sf));
        //rectangle(canvasPart, vroiR, Scalar(0, 0, 255), 3, 8);
        //cout << "Painted ImageR" << endl;
        //画上对应的线条
        for (int i = 0; i < canvas.rows; i += 16)
            line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
        imshow("rectified", canvas);
        /*
        立体匹配
        */
        namedWindow("disparity", CV_WINDOW_AUTOSIZE);
        // 创建SAD窗口 Trackbar
        createTrackbar("BlockSize:\n", "disparity", &blockSize, 8, stereo_match);
        // 创建视差唯一性百分比窗口 Trackbar
        createTrackbar("UniquenessRatio:\n", "disparity", &uniquenessRatio, 50, stereo_match);
        // 创建视差窗口 Trackbar
        createTrackbar("NumDisparities:\n", "disparity", &numDisparities, 16, stereo_match);
        //鼠标响应函数setMouseCallback(窗口名称, 鼠标回调函数, 传给回调函数的参数,一般取0)
        setMouseCallback("disparity", onMouse, 0);
        stereo_match(0, 0);
        waitKey(30);
    }
    cvDestroyAllWindows();
    return 0;
}

猜你喜欢

转载自blog.csdn.net/mengxiang2425/article/details/82559851