【OpenCV】透视变换 Perspective Transformation

透视变换的原理和矩阵求解请参见前一篇《透视变换 Perspective Transformation》。在OpenCV中也实现了透视变换的公式求解和变换函数。

求解变换公式的函数:

Mat getPerspectiveTransform(const Point2f src[], const Point2f dst[])
    
    
输入原始图像和变换之后的图像的对应4个点,便可以得到变换矩阵。之后用求解得到的矩阵输入perspectiveTransform便可以对一组点进行变换:

void perspectiveTransform(InputArray src, OutputArray dst, InputArray m)
    
    
注意这里src和dst的输入并不是图像,而是图像对应的坐标。应用前一篇的例子,做个相反的变换:


    
    
  1. int main( )
  2. {
  3. Mat img=imread( "boy.png");
  4. int img_height = img.rows;
  5. int img_width = img.cols;
  6. vector<Point2f> corners( 4);
  7. corners[ 0] = Point2f( 0, 0);
  8. corners[ 1] = Point2f(img_width -1, 0);
  9. corners[ 2] = Point2f( 0,img_height -1);
  10. corners[ 3] = Point2f(img_width -1,img_height -1);
  11. vector<Point2f> corners_trans( 4);
  12. corners_trans[ 0] = Point2f( 150, 250);
  13. corners_trans[ 1] = Point2f( 771, 0);
  14. corners_trans[ 2] = Point2f( 0,img_height -1);
  15. corners_trans[ 3] = Point2f( 650,img_height -1);
  16. Mat transform = getPerspectiveTransform(corners,corners_trans);
  17. cout<<transform<< endl;
  18. vector<Point2f> ponits, points_trans;
  19. for( int i= 0;i<img_height;i++){
  20. for( int j= 0;j<img_width;j++){
  21. ponits.push_back(Point2f(j,i));
  22. }
  23. }
  24. perspectiveTransform( ponits, points_trans, transform);
  25. Mat img_trans = Mat::zeros(img_height,img_width,CV_8UC3);
  26. int count = 0;
  27. for( int i= 0;i<img_height;i++){
  28. uchar* p = img.ptr<uchar>(i);
  29. for( int j= 0;j<img_width;j++){
  30. int y = points_trans[count].y;
  31. int x = points_trans[count].x;
  32. uchar* t = img_trans.ptr<uchar>(y);
  33. t[x* 3] = p[j* 3];
  34. t[x* 3+ 1] = p[j* 3+ 1];
  35. t[x* 3+ 2] = p[j* 3+ 2];
  36. count++;
  37. }
  38. }
  39. imwrite( "boy_trans.png",img_trans);
  40. return 0;
  41. }

得到变换之后的图片:


注意这种将原图变换到对应图像上的方式会有一些没有被填充的点,也就是右图中黑色的小点。解决这种问题一是用差值的方式,再一种比较简单就是不用原图的点变换后对应找新图的坐标,而是直接在新图上找反向变换原图的点。说起来有点绕口,具体见前一篇《透视变换 Perspective Transformation》的代码应该就能懂啦。

除了getPerspectiveTransform()函数,OpenCV还提供了findHomography()的函数,不是用点来找,而是直接用透视平面来找变换公式。这个函数在特征匹配的经典例子中有用到,也非常直观:


    
    
  1. int main( int argc, char** argv )
  2. {
  3. Mat img_object = imread( argv[ 1], IMREAD_GRAYSCALE );
  4. Mat img_scene = imread( argv[ 2], IMREAD_GRAYSCALE );
  5. if( !img_object.data || !img_scene.data )
  6. { std:: cout<< " --(!) Error reading images " << std:: endl; return -1; }
  7. //-- Step 1: Detect the keypoints using SURF Detector
  8. int minHessian = 400;
  9. SurfFeatureDetector detector( minHessian );
  10. std:: vector<KeyPoint> keypoints_object, keypoints_scene;
  11. detector.detect( img_object, keypoints_object );
  12. detector.detect( img_scene, keypoints_scene );
  13. //-- Step 2: Calculate descriptors (feature vectors)
  14. SurfDescriptorExtractor extractor;
  15. Mat descriptors_object, descriptors_scene;
  16. extractor.compute( img_object, keypoints_object, descriptors_object );
  17. extractor.compute( img_scene, keypoints_scene, descriptors_scene );
  18. //-- Step 3: Matching descriptor vectors using FLANN matcher
  19. FlannBasedMatcher matcher;
  20. std:: vector< DMatch > matches;
  21. matcher.match( descriptors_object, descriptors_scene, matches );
  22. double max_dist = 0; double min_dist = 100;
  23. //-- Quick calculation of max and min distances between keypoints
  24. for( int i = 0; i < descriptors_object.rows; i++ )
  25. { double dist = matches[i].distance;
  26. if( dist < min_dist ) min_dist = dist;
  27. if( dist > max_dist ) max_dist = dist;
  28. }
  29. printf( "-- Max dist : %f \n", max_dist );
  30. printf( "-- Min dist : %f \n", min_dist );
  31. //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
  32. std:: vector< DMatch > good_matches;
  33. for( int i = 0; i < descriptors_object.rows; i++ )
  34. { if( matches[i].distance < 3*min_dist )
  35. { good_matches.push_back( matches[i]); }
  36. }
  37. Mat img_matches;
  38. drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
  39. good_matches, img_matches, Scalar::all( -1), Scalar::all( -1),
  40. vector< char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
  41. //-- Localize the object from img_1 in img_2
  42. std:: vector<Point2f> obj;
  43. std:: vector<Point2f> scene;
  44. for( size_t i = 0; i < good_matches.size(); i++ )
  45. {
  46. //-- Get the keypoints from the good matches
  47. obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
  48. scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
  49. }
  50. Mat H = findHomography( obj, scene, RANSAC );
  51. //-- Get the corners from the image_1 ( the object to be "detected" )
  52. std:: vector<Point2f> obj_corners( 4);
  53. obj_corners[ 0] = Point( 0, 0); obj_corners[ 1] = Point( img_object.cols, 0 );
  54. obj_corners[ 2] = Point( img_object.cols, img_object.rows ); obj_corners[ 3] = Point( 0, img_object.rows );
  55. std:: vector<Point2f> scene_corners( 4);
  56. perspectiveTransform( obj_corners, scene_corners, H);
  57. //-- Draw lines between the corners (the mapped object in the scene - image_2 )
  58. Point2f offset( (float)img_object.cols, 0);
  59. line( img_matches, scene_corners[ 0] + offset, scene_corners[ 1] + offset, Scalar( 0, 255, 0), 4 );
  60. line( img_matches, scene_corners[ 1] + offset, scene_corners[ 2] + offset, Scalar( 0, 255, 0), 4 );
  61. line( img_matches, scene_corners[ 2] + offset, scene_corners[ 3] + offset, Scalar( 0, 255, 0), 4 );
  62. line( img_matches, scene_corners[ 3] + offset, scene_corners[ 0] + offset, Scalar( 0, 255, 0), 4 );
  63. //-- Show detected matches
  64. imshow( "Good Matches & Object detection", img_matches );
  65. waitKey( 0);
  66. return 0;
  67. }

代码运行效果:



findHomography()函数直接通过两个平面上相匹配的特征点求出变换公式,之后代码又对原图的四个边缘点进行变换,在右图上画出对应的矩形。这个图也很好地解释了所谓透视变换的“Viewing Plane”。


(转载请注明作者和出处:http://blog.csdn.net/xiaowei_cqu 未经允许请勿用于商业用途)



透视变换的原理和矩阵求解请参见前一篇《透视变换 Perspective Transformation》。在OpenCV中也实现了透视变换的公式求解和变换函数。

求解变换公式的函数:

Mat getPerspectiveTransform(const Point2f src[], const Point2f dst[])
  
  
输入原始图像和变换之后的图像的对应4个点,便可以得到变换矩阵。之后用求解得到的矩阵输入perspectiveTransform便可以对一组点进行变换:

void perspectiveTransform(InputArray src, OutputArray dst, InputArray m)
  
  
注意这里src和dst的输入并不是图像,而是图像对应的坐标。应用前一篇的例子,做个相反的变换:


  
  
  1. int main( )
  2. {
  3. Mat img=imread( "boy.png");
  4. int img_height = img.rows;
  5. int img_width = img.cols;
  6. vector<Point2f> corners( 4);
  7. corners[ 0] = Point2f( 0, 0);
  8. corners[ 1] = Point2f(img_width -1, 0);
  9. corners[ 2] = Point2f( 0,img_height -1);
  10. corners[ 3] = Point2f(img_width -1,img_height -1);
  11. vector<Point2f> corners_trans( 4);
  12. corners_trans[ 0] = Point2f( 150, 250);
  13. corners_trans[ 1] = Point2f( 771, 0);
  14. corners_trans[ 2] = Point2f( 0,img_height -1);
  15. corners_trans[ 3] = Point2f( 650,img_height -1);
  16. Mat transform = getPerspectiveTransform(corners,corners_trans);
  17. cout<<transform<< endl;
  18. vector<Point2f> ponits, points_trans;
  19. for( int i= 0;i<img_height;i++){
  20. for( int j= 0;j<img_width;j++){
  21. ponits.push_back(Point2f(j,i));
  22. }
  23. }
  24. perspectiveTransform( ponits, points_trans, transform);
  25. Mat img_trans = Mat::zeros(img_height,img_width,CV_8UC3);
  26. int count = 0;
  27. for( int i= 0;i<img_height;i++){
  28. uchar* p = img.ptr<uchar>(i);
  29. for( int j= 0;j<img_width;j++){
  30. int y = points_trans[count].y;
  31. int x = points_trans[count].x;
  32. uchar* t = img_trans.ptr<uchar>(y);
  33. t[x* 3] = p[j* 3];
  34. t[x* 3+ 1] = p[j* 3+ 1];
  35. t[x* 3+ 2] = p[j* 3+ 2];
  36. count++;
  37. }
  38. }
  39. imwrite( "boy_trans.png",img_trans);
  40. return 0;
  41. }

得到变换之后的图片:


注意这种将原图变换到对应图像上的方式会有一些没有被填充的点,也就是右图中黑色的小点。解决这种问题一是用差值的方式,再一种比较简单就是不用原图的点变换后对应找新图的坐标,而是直接在新图上找反向变换原图的点。说起来有点绕口,具体见前一篇《透视变换 Perspective Transformation》的代码应该就能懂啦。

除了getPerspectiveTransform()函数,OpenCV还提供了findHomography()的函数,不是用点来找,而是直接用透视平面来找变换公式。这个函数在特征匹配的经典例子中有用到,也非常直观:


  
  
  1. int main( int argc, char** argv )
  2. {
  3. Mat img_object = imread( argv[ 1], IMREAD_GRAYSCALE );
  4. Mat img_scene = imread( argv[ 2], IMREAD_GRAYSCALE );
  5. if( !img_object.data || !img_scene.data )
  6. { std:: cout<< " --(!) Error reading images " << std:: endl; return -1; }
  7. //-- Step 1: Detect the keypoints using SURF Detector
  8. int minHessian = 400;
  9. SurfFeatureDetector detector( minHessian );
  10. std:: vector<KeyPoint> keypoints_object, keypoints_scene;
  11. detector.detect( img_object, keypoints_object );
  12. detector.detect( img_scene, keypoints_scene );
  13. //-- Step 2: Calculate descriptors (feature vectors)
  14. SurfDescriptorExtractor extractor;
  15. Mat descriptors_object, descriptors_scene;
  16. extractor.compute( img_object, keypoints_object, descriptors_object );
  17. extractor.compute( img_scene, keypoints_scene, descriptors_scene );
  18. //-- Step 3: Matching descriptor vectors using FLANN matcher
  19. FlannBasedMatcher matcher;
  20. std:: vector< DMatch > matches;
  21. matcher.match( descriptors_object, descriptors_scene, matches );
  22. double max_dist = 0; double min_dist = 100;
  23. //-- Quick calculation of max and min distances between keypoints
  24. for( int i = 0; i < descriptors_object.rows; i++ )
  25. { double dist = matches[i].distance;
  26. if( dist < min_dist ) min_dist = dist;
  27. if( dist > max_dist ) max_dist = dist;
  28. }
  29. printf( "-- Max dist : %f \n", max_dist );
  30. printf( "-- Min dist : %f \n", min_dist );
  31. //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
  32. std:: vector< DMatch > good_matches;
  33. for( int i = 0; i < descriptors_object.rows; i++ )
  34. { if( matches[i].distance < 3*min_dist )
  35. { good_matches.push_back( matches[i]); }
  36. }
  37. Mat img_matches;
  38. drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
  39. good_matches, img_matches, Scalar::all( -1), Scalar::all( -1),
  40. vector< char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
  41. //-- Localize the object from img_1 in img_2
  42. std:: vector<Point2f> obj;
  43. std:: vector<Point2f> scene;
  44. for( size_t i = 0; i < good_matches.size(); i++ )
  45. {
  46. //-- Get the keypoints from the good matches
  47. obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
  48. scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
  49. }
  50. Mat H = findHomography( obj, scene, RANSAC );
  51. //-- Get the corners from the image_1 ( the object to be "detected" )
  52. std:: vector<Point2f> obj_corners( 4);
  53. obj_corners[ 0] = Point( 0, 0); obj_corners[ 1] = Point( img_object.cols, 0 );
  54. obj_corners[ 2] = Point( img_object.cols, img_object.rows ); obj_corners[ 3] = Point( 0, img_object.rows );
  55. std:: vector<Point2f> scene_corners( 4);
  56. perspectiveTransform( obj_corners, scene_corners, H);
  57. //-- Draw lines between the corners (the mapped object in the scene - image_2 )
  58. Point2f offset( (float)img_object.cols, 0);
  59. line( img_matches, scene_corners[ 0] + offset, scene_corners[ 1] + offset, Scalar( 0, 255, 0), 4 );
  60. line( img_matches, scene_corners[ 1] + offset, scene_corners[ 2] + offset, Scalar( 0, 255, 0), 4 );
  61. line( img_matches, scene_corners[ 2] + offset, scene_corners[ 3] + offset, Scalar( 0, 255, 0), 4 );
  62. line( img_matches, scene_corners[ 3] + offset, scene_corners[ 0] + offset, Scalar( 0, 255, 0), 4 );
  63. //-- Show detected matches
  64. imshow( "Good Matches & Object detection", img_matches );
  65. waitKey( 0);
  66. return 0;
  67. }

代码运行效果:



findHomography()函数直接通过两个平面上相匹配的特征点求出变换公式,之后代码又对原图的四个边缘点进行变换,在右图上画出对应的矩形。这个图也很好地解释了所谓透视变换的“Viewing Plane”。


(转载请注明作者和出处:http://blog.csdn.net/xiaowei_cqu 未经允许请勿用于商业用途)



猜你喜欢

转载自blog.csdn.net/monk1992/article/details/83657523