单应矩阵Homography及其实现(OpenCV)

单应矩阵Homography及其实现(OpenCV)

参考:https://docs.opencv.org/3.4.1/d9/dab/tutorial_homography.html

前言

  • 单应矩阵是图像中面与面的对应关系。
  • 在透视(射影)空间中,任意两个面都存在单应变换的关系。
  • 单应矩阵具有传递性
  • 单应矩阵是两张相片之间像素点的对应关系,它可以SVD分解为两相机R和T 1

  • 对于相机不共心的情况,两张相片中的共同拍到的平面都有其单应矩阵关系。

  • 根据匹配用RANSAC方法可以得到两张影像的单应矩阵(理论上这些点是在同一个平面上的)RANSAC方法找到了匹配点所在最多的平面的单应矩阵(这个平面可能是虚拟的,也可能能够在图像上找到,甚至可以是整个相片平面见下一条)。
  • 对于相机共心的情况(两张图片拍摄时,相机中心不动,旋转某个角度拍摄),其由匹配得到的单应矩阵就是两张相片之间的旋转变化,因为相机的位移是0,单应矩阵等于旋转矩阵。这种情况下,RANSAC找到的平面就是相片整个平面。
  • 对于拼接问题,如果想得到没有视差的拼接结果,两种情况:第一,两张相片共心,所有点乘以单应矩阵后,完成相片平面到相片平面的映射,所有要变换点都在同一平面上,是面到面的变换,不会产生视差。第二,两个相机可以不共心,但拍摄的影像是同一个平面,同样乘以单应矩阵映射变换后,还是满足面到面的对应关系,因此不会产生视差。
  • 对于两相机同时拍到一个平面的情况,相机A与平面P存在一个单应变换,相片B与P也存在一个单应变换,将相片平面想像成一个平面,即平面A与平面P存在单应关系,同理,平面B与P存在单应关系,根据单应矩阵的传递性,平面A与平面B具有单应关系。
  • 相片与平面的单应关系可以求解得到该平面到相片平面的法线的方向和大小,可用于相机标定(张正友标定法)。

实验

此实验是OpenCV3.4 Tutorial中的实例,稍加改进得到单应矩阵的值的程序。

#include <stdio.h>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
void readme();
/* @function main */
int main( int argc, char** argv )
{
    Mat img_object = imread("img1.ppm", IMREAD_GRAYSCALE );
    Mat img_scene  = imread("img2.ppm", IMREAD_GRAYSCALE );
    if( !img_object.data || !img_scene.data )
    { std::cout<< " --(!) Error reading images " << std::endl; return -1; }
    //-- Step 1: Detect the keypoints and extract descriptors using SURF
    int minHessian = 400;
    Ptr<SURF> detector = SURF::create( minHessian );
    std::vector<KeyPoint> keypoints_object, keypoints_scene;
    Mat descriptors_object, descriptors_scene;
    detector->detectAndCompute( img_object, Mat(), keypoints_object, descriptors_object );
    detector->detectAndCompute( img_scene, Mat(), keypoints_scene, descriptors_scene );
    //-- Step 2: Matching descriptor vectors using FLANN matcher
    FlannBasedMatcher matcher;
    std::vector< DMatch > matches;
    matcher.match( descriptors_object, descriptors_scene, matches );
    double max_dist = 0; double min_dist = 100;
    //-- Quick calculation of max and min distances between keypoints
    for( int i = 0; i < descriptors_object.rows; i++ )
    { double dist = matches[i].distance;
      if( dist < min_dist ) min_dist = dist;
      if( dist > max_dist ) max_dist = dist;
    }
    printf("-- Max dist : %f \n", max_dist );
    printf("-- Min dist : %f \n", min_dist );
    //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
    std::vector< DMatch > good_matches;
    for( int i = 0; i < descriptors_object.rows; i++ )
    { if( matches[i].distance <= 3*min_dist )
       { good_matches.push_back( matches[i]); }
    }
    Mat img_matches;
    drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
                 good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                 std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
    //-- Localize the object
    std::vector<Point2f> obj;
    std::vector<Point2f> scene;
    for( size_t i = 0; i < good_matches.size(); i++ )
    {
      //-- Get the keypoints from the good matches
      obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
      scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
    }
    Mat H = findHomography( obj, scene, RANSAC );

    for(int i = 0;i < H.cols;i++){
        for(int j = 0;j<H.rows;j++)
            std::cout<<H.at<double>(i,j)<< "  ";
            //std::cout<<((double *)H.data)[i*H.cols+j]<< "  ";
            //std::cout<<H.type()<< "  ";
        std::cout<<std::endl;
    }

    //-- Get the corners from the image_1 ( the object to be "detected" )
    std::vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
    obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
    std::vector<Point2f> scene_corners(4);
    perspectiveTransform( obj_corners, scene_corners, H);
    //-- Draw lines between the corners (the mapped object in the scene - image_2 )
    line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
    line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
    line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
    line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
    //-- Show detected matches
    cvNamedWindow("Good Matches & Object detection",CV_WINDOW_NORMAL);
    imshow( "Good Matches & Object detection", img_matches );
    waitKey(0);
    return 0;
}
/* @function readme */
void readme()
{ std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }

猜你喜欢

转载自blog.csdn.net/fb_help/article/details/79872317