双目测距demo

  1 #include <iostream>
  2 #include <opencv2/core/core.hpp>
  3 #include <opencv2/features2d/features2d.hpp>
  4 #include <opencv2/highgui/highgui.hpp>
  5 
  6 using namespace std;
  7 using namespace cv;
  8 
  9 // the IntrinsicMatrix of the left camera
 10 
 11 const Mat IntrinsicMatrix = (Mat_<double>(3, 3) << 806.253555589359  ,  0,   311.16780116005,
 12 0,  805.667519352931 ,  244.341382241868,
 13 0 ,   0,      1);
 14 
 15 
 16 
 17 
 18 
 19 // baseline
 20 //stereoParams.TranslationOfCamera2 里第一个参数,注意双目标定时候,务必设置准确棋盘实际宽度
 21 double baseline = 88.8804297376557;//绝对值
 22 int main()
 23 {
 24     Mat img_1 = imread("Left.jpg", CV_LOAD_IMAGE_COLOR);
 25     Mat img_2 = imread("Right.jpg", CV_LOAD_IMAGE_COLOR);
 26 
 27     //-- 初始化
 28     std::vector<KeyPoint> keypoints_1, keypoints_2;
 29     Mat descriptors_1, descriptors_2;
 30     Ptr<FeatureDetector> detector = ORB::create(5);
 31     Ptr<DescriptorExtractor> descriptor = ORB::create(5);
 32     
 33     Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
 34 
 35     //-- 第一步:检测 Oriented FAST 角点位置
 36     detector->detect(img_1, keypoints_1);
 37     detector->detect(img_2, keypoints_2);
 38 
 39     //-- 第二步:根据角点位置计算 BRIEF 描述子
 40     descriptor->compute(img_1, keypoints_1, descriptors_1);
 41     descriptor->compute(img_2, keypoints_2, descriptors_2);
 42 
 43     Mat outimg1, outimg2;
 44     drawKeypoints(img_1, keypoints_1, outimg1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
 45     drawKeypoints(img_2, keypoints_2, outimg2, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
 46     imshow("ORB特征点-left", outimg1);
 47     imshow("ORB特征点-right", outimg2);
 48     //-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
 49     vector<DMatch> matches;
 50     //BFMatcher matcher ( NORM_HAMMING );
 51     matcher->match(descriptors_1, descriptors_2, matches);
 52 
 53     //-- 第四步:匹配点对筛选
 54     double min_dist = 10000, max_dist = 0;
 55 
 56     //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
 57     for (int i = 0; i < descriptors_1.rows; i++)
 58     {
 59         double dist = matches[i].distance;
 60         if (dist < min_dist) min_dist = dist;
 61         if (dist > max_dist) max_dist = dist;
 62     }
 63 
 64     //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
 65     std::vector< DMatch > good_matches;
 66     for (int i = 0; i < descriptors_1.rows; i++)
 67     {
 68         if (matches[i].distance <= max(2 * min_dist, 30.0))
 69         {
 70             good_matches.push_back(matches[i]);
 71         }
 72     }
 73 
 74     //-- 第五步:绘制匹配结果
 75     Mat img_match;
 76     Mat img_goodmatch;
 77     drawMatches(img_1, keypoints_1, img_2, keypoints_2, matches, img_match);
 78     drawMatches(img_1, keypoints_1, img_2, keypoints_2, good_matches, img_goodmatch);
 79     imshow("所有匹配点对", img_match);
 80     imshow("优化后匹配点对", img_goodmatch);
 81 
 82     
 83     if (1 <= good_matches.size())
 84     {
 85         Point2d pointleft  = keypoints_1[good_matches[0].queryIdx].pt;
 86         cout << "point of the left : " << pointleft.x << " ," << pointleft.y << endl;
 87         Point2d pointright = keypoints_2[good_matches[0].trainIdx].pt;
 88         cout << "point of the Right : " << pointright.x << " ," << pointright.y << endl;
 89 
 90         //视差
 91         double disparty = abs(pointleft.x - pointright.x);
 92         cout << "disparty of two points :" << disparty << endl;
 93 
 94         //参数读取 fx cx fy cy
 95         const double fx = IntrinsicMatrix.at<double>(0, 0);
 96         const double cx = IntrinsicMatrix.at<double>(0, 2);
 97         const double fy = IntrinsicMatrix.at<double>(1, 1);
 98         const double cy = IntrinsicMatrix.at<double>(1, 2);
 99         cout << fx<<"," << cx <<","<< fy<<"," << cy << endl;
100 
101         //焦距: f = (fx + fy)/2
102         double f = (fx + fy) / 2.0;
103         cout << "focal length = " << f << endl;
104 
105         //计算点到左边相机光心的  z  坐标
106         //公式: disparty * z = f * baseline (依次:视差、z坐标、焦距、基线;    单位:像素、mm、像素、mm)
107         double Z = (f * baseline) / disparty;
108         cout << "Z = " << Z << endl;
109 
110         //    Z*u      fx   0   cx  |  X        X = (Z*u - cx*Z)/fx
111         //    Z*v   =  0    fy  cy  |  Y    =>  Y = (Z*v - cy*Z)/fy
112         //    Z        0    0    1  |  Z
113 
114         double X = (pointleft.x - cx)*Z / fx;
115         double Y = (pointleft.y - cy)*Z / fy;
116         cout << "3D = " << "[" << X << "," << Y << "," << Z << "]" << endl;
117 
118 
119 
120         //test
121         
122         Point2d pointLeft_(581.535006749608,          155.058265533219);
123         Point2d pointRight_(505.767095783631,          138.316326128893);
124 
125         double disparty_ = abs(pointRight_.x - pointLeft_.x);
126         double Z_ = (f * baseline) / disparty_;
127         double X_ = (pointLeft_.x - cx)*Z_ / fx;
128         double Y_ = (pointLeft_.y - cy)*Z_ / fy;
129         cout << "test 3D = " << "[" << X_ << "," << Y_ << "," << Z_ << "]" << endl;
130 
131     }
132 
133 
134     waitKey(0);
135     return 0;
136 }
baseline 是matlab标定得出数据集中stereoParams.TranslationOfCamera2(是一个3维向量)的第一个元素
比如:以左边相机坐标系为世界坐标系,那个左边相机光心在此坐标系的坐标是

 
stereoParams.TranslationOfCamera2 =
[-154.991555371036         -1.15364615812498          16.6412087248481]
所以要想双目测距精度高,你还是要矫正右边的点之后,再求视差,因为就像你不能保证两个相机焦距严格相等一样,你不能保证两个相机光轴平行
(实际上是不能保证两个相机坐标系x轴共线,y、z轴平行)

猜你喜欢

转载自www.cnblogs.com/winslam/p/9091064.html