AKAZE/KAZE局部特征

AKAZE是KAZE的加速版

与SIFT、SURF相比,AKAZE更稳定,速度更快。

AKAZE特征检测示例:

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main(int argc, char** argv) 
{
	Mat src = imread("D:/cv400/data/lena.jpg", 0);
	if (src.empty())
	{
		cout<<"Load image error..."<<endl;
		return -1;
	}
	imshow("input image", src);

	// kaze detection
	Ptr<AKAZE> detector = AKAZE::create();
	vector<KeyPoint> keypoints;
	double t1 = getTickCount();
	detector->detect(src, keypoints, Mat());
	double t2 = getTickCount();
	double t = (t2 - t1) / getTickFrequency();
	cout << "spend time : " << t<<" s" << endl;

	Mat keypointImg;
	drawKeypoints(src, keypoints, keypointImg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
	imshow("kaze keypoints", keypointImg);

	waitKey(0);
	return 0;
}

运行截图:

速度很快,65ms

接下来做匹配,在自然场景中找出目标

代码示范:

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main(int argc, char** argv) 
{
	Mat img1 = imread("D:/cv400/data/box.png", IMREAD_GRAYSCALE);
	Mat img2 = imread("D:/cv400/data/box_in_scene.png", IMREAD_GRAYSCALE);
	if (img1.empty() || img2.empty()) 
	{
		cout << "Load image error..." << endl;
		return -1;
	}
	imshow("object image", img1);
	imshow("object in scene", img2);

	// surf featurs extraction
	double t1 = (double)getTickCount();
	//int minHessian = 400;
	Ptr<AKAZE> detector = AKAZE::create();
	vector<KeyPoint> keypoints_obj;
	vector<KeyPoint> keypoints_scene;
	Mat descriptor_obj, descriptor_scene;
	detector->detectAndCompute(img1, Mat(), keypoints_obj, descriptor_obj);
	detector->detectAndCompute(img2, Mat(), keypoints_scene, descriptor_scene);

	// matching
	FlannBasedMatcher matcher(new flann::LshIndexParams(20, 10, 2));
	vector<DMatch> matches;
	matcher.match(descriptor_obj, descriptor_scene, matches);
	double t2 = (double)getTickCount();
	double t = (t2 - t1) / getTickFrequency();
	cout << "spend time : " << t << "s" << endl;
	
	//求匹配点最近距离
	double minDist = 1000;
	for (int i = 0; i < descriptor_obj.rows; i++)
	{
		double dist = matches[i].distance;
		if (dist < minDist) 
			minDist = dist;	
	}
	cout<<"min distance : "<< minDist<<endl;

	//距离较近即匹配较好的点
	vector<DMatch> goodMatches;
	for (int i = 0; i < descriptor_obj.rows; i++)
	{
		double dist = matches[i].distance;
		if (dist < max(3 * minDist, 0.02)) 
			goodMatches.push_back(matches[i]);	
	}

	
	//寻找匹配上的关键点的变换
	vector<Point2f> obj;  //目标特征点
	vector<Point2f> objInScene;  //场景中目标特征点
	for (size_t t = 0; t < goodMatches.size(); t++) 
	{
		obj.push_back(keypoints_obj[goodMatches[t].queryIdx].pt);
		objInScene.push_back(keypoints_scene[goodMatches[t].trainIdx].pt);
	}
	Mat imgBH = findHomography(obj, objInScene, RANSAC);

	//映射点
	vector<Point2f> obj_corners(4);
	vector<Point2f> scene_corners(4);
	obj_corners[0] = Point(0, 0);
	obj_corners[1] = Point(img1.cols, 0);
	obj_corners[2] = Point(img1.cols, img1.rows);
	obj_corners[3] = Point(0, img1.rows);
	perspectiveTransform(obj_corners, scene_corners, imgBH);

	//四个点之间画线
	Mat dst;
	cvtColor(img2, dst, COLOR_GRAY2BGR);
	line(dst, scene_corners[0], scene_corners[1], Scalar(0, 0, 255), 2, 8, 0);
	line(dst, scene_corners[1], scene_corners[2], Scalar(0, 0, 255), 2, 8, 0);
	line(dst, scene_corners[2], scene_corners[3], Scalar(0, 0, 255), 2, 8, 0);
	line(dst, scene_corners[3], scene_corners[0], Scalar(0, 0, 255), 2, 8, 0);

	imshow("find object in sence", dst);
	waitKey(0);
	return 0;
}

运行截图:

 比SURF快一点

猜你喜欢

转载自blog.csdn.net/andylanzhiyong/article/details/84843076