opencv 图像拼接

左右图单应性变换原理图

1.SURF特征点提取

#include "pch.h"
#include <opencv2\opencv.hpp>
#include <opencv2\xfeatures2d.hpp>
#include <iostream>
#include "StitcherTest.h"


using namespace cv;
using namespace cv::xfeatures2d;
using namespace std;


int main()
{
	Mat image_1 = imread("1.jpg");
	Mat image_2 = imread("2.jpg");
	Mat gray_image_1;
	Mat gray_image_2;

	cvtColor(image_1, gray_image_1, CV_RGB2GRAY);
	cvtColor(image_2, gray_image_2, CV_RGB2GRAY);

	// Check if image files can be read
	if (!gray_image_1.data) {
		std::cout << "Error Reading Image 1" << std::endl;
		return 0;
	}
	if (!gray_image_2.data) {
		std::cout << "Error Reading Image 2" << std::endl;
		return 0;
	}

	imshow("bag1 image", image_1);
	imshow("bag2 image", image_2);

	// Detect the keypoints using SURF Detector
	// Based from Anna Huaman's 'Features2D + Homography to find a known    object' Tutorial
	int minHessian = 50;
	Ptr<SurfFeatureDetector> detector  = SurfFeatureDetector::create(minHessian);;
	std::vector <KeyPoint> keypoints_2, keypoints_1;
	detector->detect(gray_image_2, keypoints_2);
	detector->detect(gray_image_1, keypoints_1);

	// Calculate Feature Vectors (descriptors)
	// Based from  Anna Huaman's 'Features2D + Homography to find a known object' Tutorial
	Ptr < SurfDescriptorExtractor> extractor = SurfDescriptorExtractor::create();
	Mat descriptors_2, descriptors_1;
	extractor->compute(gray_image_2, keypoints_2, descriptors_2);
	extractor->compute(gray_image_1, keypoints_1, descriptors_1);

	// Matching descriptor vectors using FLANN matcher
	// Based from  Anna Huaman's 'Features2D + Homography to find a known object' Tutorial
	FlannBasedMatcher matcher;
	std::vector <DMatch> matches;
	matcher.match(descriptors_2, descriptors_1, matches);

	double max_dist = 0;
	double min_dist = 100;

	// Quick calculation of max and min distances between keypoints
	// Based from  Anna Huaman's 'Features2D + Homography to find a known object' Tutorial
	for (int i = 0; i < descriptors_2.rows; i++) {

		double dist = matches[i].distance;

		if (dist < min_dist) {
			min_dist = dist;
		}
	}

	// Use matches that have a distance that is less than 3 * min_dist
	std::vector <DMatch> good_matches;

	for (int i = 0; i < descriptors_2.rows; i++) {
		if (matches[i].distance < 3 * min_dist) {
			good_matches.push_back(matches[i]);
		}
	}

	std::vector <Point2f> points2;
	std::vector <Point2f> points1;

	for (int i = 0; i < good_matches.size(); i++) {
		// Get the keypoints from the good matches
		points2.push_back(keypoints_2[good_matches[i].queryIdx].pt);
		points1.push_back(keypoints_1[good_matches[i].trainIdx].pt);
	}

	//以左边的图片为准,从右边进行拼接,变换原理见上图
	// Find the Homography Matrix
	Mat H = findHomography(points2, points1, CV_RANSAC);
	// Use the Homography Matrix to warp the images
	cv::Mat resultRight;
	warpPerspective(image_2, resultRight, H, cv::Size(2 * image_2.cols, image_2.rows));
	cv::Mat half(resultRight, cv::Rect(0, 0, image_1.cols, image_1.rows));
	image_1.copyTo(half);
	imshow("resultRight", resultRight);

	//以右边的图片为准,从左边进行拼接,变换原理见上图
	Mat homo = findHomography(points1, points2);
	//需要向右平移image_1的宽度
	Mat shftMat = (Mat_<double>(3, 3) << 1.0, 0, image_1.cols, 0, 1.0, 0, 0, 0, 1);
	Mat resultLeft;
	warpPerspective(image_1, resultLeft, shftMat * homo, Size(2 * image_2.cols, image_2.rows));
	image_2.copyTo(Mat(resultLeft, Rect(image_1.cols, 0, image_2.cols, image_2.rows)));

	// Write image
	imshow("resultLeft", resultLeft);
	waitKey(0);
	return 0;
}

2.ORB特征点提取

#include "pch.h"
#include <opencv2\opencv.hpp>
#include <opencv2\xfeatures2d.hpp>
#include <iostream>
#include "StitcherTest.h"


using namespace cv;
using namespace cv::xfeatures2d;
using namespace std;


int main()
{
	cv::Mat image1 = cv::imread("1.jpg", 1);
	cv::Mat image2 = cv::imread("2.jpg", 1);
	if (!image1.data || !image2.data)
		return 0;
	imshow("image1", image1);
	imshow("image2", image2);
	std::vector<cv::KeyPoint> keypoints1, keypoints2;

	//寻找ORB特针点对
	Ptr<DescriptorMatcher> descriptorMatcher;
	vector<DMatch> matches;
	vector<KeyPoint> keyImg1, keyImg2;
	Mat descImg1, descImg2;

	//创建ORB对象
	Ptr<Feature2D> b = ORB::create();

	//两种方法寻找特征点
	b->detectAndCompute(image1, Mat(), keyImg1, descImg1, false);
	b->detectAndCompute(image2, Mat(), keyImg2, descImg2, false);

	//匹配特征点
	descriptorMatcher = DescriptorMatcher::create("BruteForce");
	descriptorMatcher->match(descImg1, descImg2, matches, Mat());

	Mat index;
	int nbMatch = int(matches.size());
	Mat tab(nbMatch, 1, CV_32F);
	for (int i = 0; i < nbMatch; i++)
	{
		tab.at<float>(i, 0) = matches[i].distance;
	}
	sortIdx(tab, index, cv::SORT_EVERY_COLUMN + cv::SORT_ASCENDING);
	vector<DMatch> bestMatches;
	for (int i = 0; i < 60; i++)
	{
		bestMatches.push_back(matches[index.at<int>(i, 0)]);
	}
	Mat result;
	drawMatches(image1, keyImg1, image2, keyImg2, bestMatches, result);
	std::vector<Point2f> obj;
	std::vector<Point2f> scene;
	for (int i = 0; i < (int)bestMatches.size(); i++)
	{
		obj.push_back(keyImg1[bestMatches[i].queryIdx].pt);
		scene.push_back(keyImg2[bestMatches[i].trainIdx].pt);
	}

	//直接调用ransac,计算单应矩阵
	Mat H = findHomography(scene, obj, CV_RANSAC);
	
	cv::Mat resultRight;
	warpPerspective(image2, resultRight, H, cv::Size(image1.cols + image2.cols, max(image1.rows, image2.rows)));
	cv::Mat half(resultRight, cv::Rect(0, 0, image1.cols, image1.rows));
	image1.copyTo(half);
	imshow("resultRight", resultRight);

	waitKey(0);
	return 0;
}

猜你喜欢

转载自blog.csdn.net/zouxin_88/article/details/84256518
今日推荐