调用摄像头,通过实时计算ORB+BF,得到场景突变程度

这段代码实现了 实时调用摄像头,获取了前后两帧图像,进行了ORB+BF。通过计算有效匹配数量的变化,能得到图像的突变程度。

代码ORB+BF部分参考了https://github.com/gaoxiang12/slambook/blob/master/ch7/feature_extraction.cpp

打开摄像头并压缩图像部分参考了http://www.magicandlove.com/blog/2011/08/26/people-detection-in-opencv-again/


#include <iostream>
#include <opencv2/opencv.hpp>
#include<windows.h>


using namespace std;
using namespace cv;
 
int main (int argc, const char * argv[])
{
    VideoCapture cap(CV_CAP_ANY);
    cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);    
    if (!cap.isOpened())
        return -1;
 
    Mat img_Curr, img_Last;
	
	int GoodMatchCnt;

	//-- initialize
	std::vector<KeyPoint> keypoints_Curr, keypoints_Last;
	Mat descriptors_Curr, descriptors_Last;
	Ptr<FeatureDetector> detector = ORB::create();
	Ptr<DescriptorExtractor> descriptor = ORB::create();
	// Ptr<FeatureDetector> detector = FeatureDetector::create(detector_name);
	// Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create(descriptor_name);
	Ptr<DescriptorMatcher> matcher  = DescriptorMatcher::create ( "BruteForce-Hamming" );

	//Sleep(1000);

    while (true)
    {		
		img_Curr.copyTo(img_Last);
        cap >> img_Curr;
        if (!img_Curr.data)
            continue;
        if (!img_Last.data)
			continue;

		detector->detect ( img_Curr,keypoints_Curr );
		detector->detect ( img_Last,keypoints_Last );

		descriptor->compute ( img_Curr, keypoints_Curr, descriptors_Curr );
		descriptor->compute ( img_Last, keypoints_Last, descriptors_Last );
 
		/// in case bug 
        if (!descriptors_Curr.data)
			continue;
        if (!descriptors_Last.data)
			continue;
				
		vector<DMatch> matches;
		//BFMatcher matcher ( NORM_HAMMING );
		matcher->match ( descriptors_Curr, descriptors_Last, matches );

		double min_dist=10000, max_dist=0;

		for ( int i = 0; i < descriptors_Curr.rows; i++ )
		{
			double dist = matches[i].distance;
			if ( dist < min_dist ) min_dist = dist;
			if ( dist > max_dist ) max_dist = dist;
		}
    
		min_dist = min_element( matches.begin(), matches.end(), [](const DMatch& m1, const DMatch& m2) {return m1.distance<m2.distance;} )->distance;
		max_dist = max_element( matches.begin(), matches.end(), [](const DMatch& m1, const DMatch& m2) {return m1.distance<m2.distance;} )->distance;
		
		//printf ( "-- Max dist : %f \n", max_dist );
		//printf ( "-- Min dist : %f \n", min_dist );

		GoodMatchCnt = 0;
		std::vector< DMatch > good_matches;
		for ( int i = 0; i < descriptors_Curr.rows; i++ )
		{
			if ( matches[i].distance <= max ( 2*min_dist, 30.0 ) )
			{
				good_matches.push_back ( matches[i] );
				GoodMatchCnt++;
			}
		}
		
		Mat img_match;
		Mat img_goodmatch;
		drawMatches ( img_Curr, keypoints_Curr, img_Last, keypoints_Last, matches, img_match );
		drawMatches ( img_Curr, keypoints_Curr, img_Last, keypoints_Last, good_matches, img_goodmatch );
		printf("GoodMatchCnt = %d\r\n",GoodMatchCnt);
		//imshow ( "all matches", img_match );
		imshow ( "all optimized matches", img_goodmatch );
				
        if (waitKey(20) >= 0)
            break;
    }
    return 0;
}

猜你喜欢

转载自blog.csdn.net/qq_27158179/article/details/81535947