C++ - opencv应用实例之 SURF 特征匹配用于图像拼接
- 通过获取相关联图像中的相似不变性特征来找出其位置对应关系
- 拼接过程中可能存在形变
- 完整算法:
SURF特征点提取 -> 特征匹配 -> 计算透视变换矩阵 ->图像拼接
- 效果如下:
- 完整代码实现:
#include "features2d.hpp"
#include "xfeatures2d.hpp"
#include<opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
using namespace cv::xfeatures2d;
void showImg(const char* w_name, const cv::Mat& img, int flg = 0)
{
cv::namedWindow(w_name, cv::WINDOW_NORMAL);
cv::imshow(w_name, img);
if (flg == 1)
{
cv::waitKey(0);
}
else if (flg > 1)
{
cv::waitKey(flg);
}
}
Mat stichingWithSURF(Mat mat1, Mat mat2);
void calCorners(const Mat& H, const Mat& src);
Mat extractFeatureAndMatch(Mat mat1, Mat mat2);
Mat splicImg(Mat& mat1, Mat& mat2, vector<DMatch> goodMatchPoints, vector<KeyPoint> keyPoint1, vector<KeyPoint> keyPoint2);
void stichingWithStitcher(Mat mat1, Mat mat2);
typedef struct
{
Point2f left_top;
Point2f left_bottom;
Point2f right_top;
Point2f right_bottom;
}four_corners_t;
four_corners_t corners;
void main()
{
Mat img1, img2;
img1 = imread("./imgs/1.jpg");
img2 = imread("./imgs/2.jpg");
resize(img1, img1, Size(img1.cols / 4, img1.rows / 4));
resize(img2, img2, Size(img2.cols / 4, img2.rows / 4));
Mat dst = stichingWithSURF(img1, img2);
showImg("拼好的图像", dst);
imwrite("result1.jpg", dst);
waitKey();
}
Mat stichingWithSURF(Mat mat1, Mat mat2)
{
return extractFeatureAndMatch(mat1, mat2);
}
void calCorners(const Mat& H, const Mat& src)
{
double v2[] = {
0,0,1 };
double v1[3];
Mat V2 = Mat(3, 1, CV_64FC1, v2);
Mat V1 = Mat(3, 1, CV_64FC1, v1);
V1 = H * V2;
cout << "0v1:" << v1[0] << endl;
cout << "V2: " << V2 << endl;
cout << "V1: " << V1 << endl;
corners.left_top.x = v1[0] / v1[2];
corners.left_top.y = v1[1] / v1[2];
v2[0] = 0;
v2[1] = src.rows;
v2[2] = 1;
V2 = Mat(3, 1, CV_64FC1, v2);
V1 = Mat(3, 1, CV_64FC1, v1);
V1 = H * V2;
cout << "1v1:" << v1[0] << endl;
cout << "V2: " << V2 << endl;
cout << "V1: " << V1 << endl;
corners.left_bottom.x = v1[0] / v1[2];
corners.left_bottom.y = v1[1] / v1[2];
v2[0] = src.cols;
v2[1] = 0;
v2[2] = 1;
V2 = Mat(3, 1, CV_64FC1, v2);
V1 = Mat(3, 1, CV_64FC1, v1);
V1 = H * V2;
cout << "2v1:" << v1 << endl;
cout << "V2: " << V2 << endl;
cout << "V1: " << V1 << endl;
corners.right_top.x = v1[0] / v1[2];
corners.right_top.y = v1[1] / v1[2];
v2[0] = src.cols;
v2[1] = src.rows;
v2[2] = 1;
V2 = Mat(3, 1, CV_64FC1, v2);
V1 = Mat(3, 1, CV_64FC1, v1);
V1 = H * V2;
cout << "3v1:" << v1 << endl;
cout << "V2: " << V2 << endl;
cout << "V1: " << V1 << endl;
corners.right_bottom.x = v1[0] / v1[2];
corners.right_bottom.y = v1[1] / v1[2];
cout << endl;
cout << "left_top:" << corners.left_top << endl;
cout << "left_bottom:" << corners.left_bottom << endl;
cout << "right_top:" << corners.right_top << endl;
cout << "right_bottom:" << corners.right_bottom << endl;
}
Mat extractFeatureAndMatch(Mat mat1, Mat mat2)
{
Mat matg1, matg2;
cvtColor(mat1, matg1, COLOR_BGR2GRAY);
cvtColor(mat2, matg2, COLOR_BGR2GRAY);
Ptr<SURF> surfDetector = SURF::create(1000);
vector<KeyPoint> keyPoint1, keyPoint2;
Mat imgDesc1, imgDesc2;
surfDetector->detectAndCompute(matg1, noArray(), keyPoint1, imgDesc1);
surfDetector->detectAndCompute(matg2, noArray(), keyPoint2, imgDesc2);
cout << "特征点描述矩阵1大小:(列*行) " << imgDesc1.cols << " * " << imgDesc1.rows << endl;
FlannBasedMatcher matcher;
vector<vector<DMatch>> matchPoints;
vector<DMatch> goodMatchPoints;
vector<Mat> train_disc(1, imgDesc2);
matcher.add(train_disc);
matcher.train();
matcher.knnMatch(imgDesc1, matchPoints, 2);
cout << "total match points: " << matchPoints.size() << endl;
for (int i = 0; i < matchPoints.size(); i++)
{
if (matchPoints[i][0].distance < 0.4f * matchPoints[i][1].distance)
{
goodMatchPoints.push_back(matchPoints[i][0]);
}
}
Mat firstMatch;
drawMatches(mat1, keyPoint1, mat2, keyPoint2, goodMatchPoints, firstMatch);
showImg("匹配", firstMatch);
imwrite("match1.jpg", firstMatch);
vector<Point2f> imagePoints1, imagePoints2;
for (int i = 0; i < goodMatchPoints.size(); i++)
{
imagePoints1.push_back(keyPoint1[goodMatchPoints[i].queryIdx].pt);
imagePoints2.push_back(keyPoint2[goodMatchPoints[i].trainIdx].pt);
}
return splicImg(mat1, mat2, goodMatchPoints, keyPoint1, keyPoint2);
}
Mat splicImg(Mat& mat_left, Mat& mat2, vector<DMatch> goodMatchPoints, vector<KeyPoint> keyPoint1, vector<KeyPoint> keyPoint2)
{
vector<Point2f> imagePoints1, imagePoints2;
for (int i = 0; i < goodMatchPoints.size(); i++)
{
imagePoints1.push_back(keyPoint1[goodMatchPoints[i].queryIdx].pt);
imagePoints2.push_back(keyPoint2[goodMatchPoints[i].trainIdx].pt);
}
Mat homo = findHomography(imagePoints2, imagePoints1, RANSAC);
cout << "变换矩阵为:\n" << homo << endl << endl;
calCorners(homo, mat2);
Mat imgTransform2;
warpPerspective(mat2, imgTransform2, homo,
Size(MAX(corners.right_top.x, corners.right_bottom.x), MAX(corners.left_bottom.y, corners.right_bottom.y)));
showImg("直接经过透视矩阵变换得到的img2", imgTransform2);
Mat dst(imgTransform2.size(), CV_8UC3);
dst.setTo(0);
imgTransform2.copyTo(dst(Rect(0, 0, imgTransform2.cols, imgTransform2.rows)));
mat_left.copyTo(dst(Rect(0, 0, mat_left.cols, mat_left.rows)));
return dst;
}