SIFTアルゴリズムの動作についてA.ここでは、このブログを参照してください
SIFT特徴抽出と分析OpenCVのAPI呼び出し
https://blog.csdn.net/kuweicai/article/details/78876707
II。試験手順の例
#include <opencv2/opencv.hpp>
#include <opencv2/xfeatures2d.hpp>
#include <iostream>
using namespace std;
using namespace cv;
using namespace xfeatures2d;
int main(int argc, char** argv)
{
Mat srcImage = imread("E:\\pictures\\29.1.jpg");
if (srcImage.empty())
{
cout << "图片读取错误!" << endl;
return -1;
}
namedWindow("原图", WINDOW_AUTOSIZE);
imshow("原图", srcImage);
int numFeatures = 1000;
Ptr<SIFT> detector = SIFT::create(numFeatures);
vector<KeyPoint> keypoints;
detector->detect(srcImage, keypoints, Mat());
cout << "总特征点数:" << keypoints.size() << endl;
Mat keypoint_img;
drawKeypoints(srcImage, keypoints, keypoint_img, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
namedWindow("效果图", WINDOW_AUTOSIZE);
imshow("效果图", keypoint_img);
waitKey(0);
return 0;
}
出力:
3つのコードブック:包括的なサンプルプログラム:SIFTは暴力と一致し、抽出されたキーポイントを説明する
(自分のレビューを容易にするため、異なるバージョンに、いくつかの場所は、プログラムを書くと同じではないので、ここに貼り付けた後に変更)
あなたは次のプログラムを実行し、私の環境設定:VS2019 + Opencv4.1.1_contrib
#include <opencv2/opencv.hpp>
#include <opencv2/xfeatures2d.hpp>
#include <iostream>
using namespace std;
using namespace cv;
using namespace xfeatures2d;
int main()
{
Mat trainImage = imread("E:\\pictures\\36.jpg"), trainImage_gray;
imshow("原始图", trainImage);
cvtColor(trainImage, trainImage_gray, COLOR_BGR2GRAY);
//【2】检测SIFT关键点、提取训练图像描述符
vector<KeyPoint> train_keyPoint;
Mat trainDescription;
Ptr<SIFT> featureDetector = SIFT::create();
//SiftFeatureDetector featureDetector;
featureDetector->detect(trainImage_gray, train_keyPoint);
Ptr<SIFT> featureExtractor = SIFT::create();
//SiftDescriptorExtractor featureExtractor;
featureExtractor->compute(trainImage_gray, train_keyPoint, trainDescription);
// 【3】进行基于描述符的暴力匹配
BFMatcher matcher;
vector<Mat> train_desc_collection(1, trainDescription);
matcher.add(train_desc_collection);
matcher.train();
//【4】创建视频对象、定义帧率
VideoCapture cap(0);
unsigned int frameCount = 0;//帧数
//【5】不断循环,直到q键被按下
while (char(waitKey(1)) != 'q')
{
//<1>参数设置
double time0 = static_cast<double>(getTickCount());//记录起始时间
Mat captureImage, captureImage_gray;
cap >> captureImage;//采集视频到testImage中
if (captureImage.empty())
continue;
//<2>转化图像到灰度
cvtColor(captureImage, captureImage_gray, COLOR_BGR2GRAY);
//<3>检测SURF关键点、提取测试图像描述符
vector<KeyPoint> test_keyPoint;
Mat testDescriptor;
featureDetector->detect(captureImage_gray, test_keyPoint);
featureExtractor->compute(captureImage_gray, test_keyPoint, testDescriptor);
//<4>匹配训练和测试描述符
vector<vector<DMatch> > matches;
matcher.knnMatch(testDescriptor, matches, 2);
// <5>根据劳氏算法(Lowe's algorithm),得到优秀的匹配点
vector<DMatch> goodMatches;
for (unsigned int i = 0; i < matches.size(); i++)
{
if (matches[i][0].distance < 0.6 * matches[i][1].distance)
goodMatches.push_back(matches[i][0]);
}
//<6>绘制匹配点并显示窗口
Mat dstImage;
drawMatches(captureImage, test_keyPoint, trainImage, train_keyPoint, goodMatches, dstImage);
imshow("匹配窗口", dstImage);
//<7>输出帧率信息
cout << "\t>当前帧率为:" << getTickFrequency() / (getTickCount() - time0) << endl;
}
return 0;
}