OpenCV_处理视频序列

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/qq_30241709/article/details/78850225

视频有一系列图像构成,这些图像称为,帧是以固定的时间间隔获取的(称为帧速率,通常用帧/秒表示),据此可以显示运动中的场景。

*读取视频序列

可以用cv::VedioCapture类创建的实例从视频序列读取帧。

效果:


帧率为:

拍摄工具为小米4C

代码:

int main()
{
	cv::VideoCapture capture("library.mp4");
	if (!capture.isOpened())
	{
		std::cout << "读取失败" << std::endl;
		return 1;
	}
	double rate = capture.get(CV_CAP_PROP_FPS);
	std::cout << "帧率:" << rate << std::endl;
	bool stop(false);
	cv::Mat frame;
	int delay = 1000 / rate;
	cv::namedWindow("Extracted Frame");
	while (!stop)
	{
		if (!capture.read(frame))
		{
			break;
		}
		cv::imshow("Extracted Frame", frame);
		cv::waitKey(delay);
	}
	cvWaitKey();
	return 0;
}

*处理视频帧

采用函数指针指向回调函数处理视频帧。

效果:


代码:

VideoProcessor类:

class VideoProcessor
{
private:
	cv::VideoCapture capture;
	//处理每一帧都会用到的回调函数
	void(*process)(cv::Mat&, cv::Mat&);
	bool callIt;
	//输入窗口的显示名称
	std::string windowNameInput;
	std::string windowNameOutput;
	int delay;
	long fnumber;
	long frameToStop;
	bool stop;
public:
	VideoProcessor() :frameToStop(20), callIt(true) {};
	double getFrameRate()
	{
		return capture.get(CV_CAP_PROP_FPS);
	}
	void setFrameProcessor(void(*frameProcessingCallback)(cv::Mat&, cv::Mat&))
	{
		process = frameProcessingCallback;
	}
	bool setInput(std::string filename)
	{
		fnumber = 0;
		capture.release();
		return capture.open(filename);
	}
	void displayInput(std::string wn)
	{
		windowNameInput = wn;
		cv::namedWindow(windowNameInput);
	}
	void displayOutput(std::string wn)
	{
		windowNameOutput = wn;
		cv::namedWindow(windowNameOutput);
	}
	//抓取并处理序列中的帧
	void run()
	{
		cv::Mat frame;
		cv::Mat output;
		if (!capture.isOpened())
			return;
		stop = false;
		while (!stop)
		{
			if (!capture.read(frame))
				break;
			if (windowNameInput.length() != 0)
			{
				cv::resize(frame, frame, cv::Size(240, 160));
				cv::imshow(windowNameInput, frame);
			}
			if (callIt)
			{
				process(frame, output);
				fnumber++;
			}
			else
			{
				output = frame;
			}
			if (windowNameOutput.length() != 0)
			{
				cv::resize(output, output, cv::Size(240, 160));
				cv::imshow(windowNameOutput, output);
			}
			if (delay >= 0 && cvWaitKey(delay) >= 0)
				stop = true;
			if (frameToStop >= 0 && getFrameNumber() == frameToStop)
				stop = true;
		}
	}
	void setDelay(int d)
	{
		delay = d;
	}
	void callProcess()
	{
		callIt = true;
	}
	void dontCallProcess()
	{
		callIt = false;
	}
	void stopAtFrameNo(long frame)
	{
		frameToStop = frame;
	}
	long getFrameNumber()
	{
		long fnumber1 = static_cast<long>(capture.get(CV_CAP_PROP_POS_FRAMES));
		return fnumber1;
	}
};

回调函数:

void canny(cv::Mat& img, cv::Mat& out)
{
	if (img.channels() == 3)
		cv::cvtColor(img, out, CV_BGR2GRAY);
	else
	{
		img.copyTo(out);
	}
	cv::Canny(out, out, 100, 200);
	cv::threshold(out, out, 128, 255, cv::THRESH_BINARY_INV);
}
main函数:

int main()
{
	VideoProcessor processor;
	processor.setInput("library.mp4");
	processor.displayInput("Current Frame");
	processor.displayOutput("Output Frame");
	processor.setDelay(1000.0 / processor.getFrameRate());
	processor.setFrameProcessor(canny);
	processor.stopAtFrameNo(150);
	processor.run();
}



*跟踪视频中的特征点

在启动跟踪过程中,首先要在最初的帧中检测特征点,然后在下一帧中跟踪这些特征点。因为在跟踪特征点时要穿越视频序列,不可避免地会丢失部分特征点,最好经常性地检测新特征点。



可以看到,这里只跟踪到地砖上地少部分花纹,并没有跟踪到鼠标,于是我又换了一个场景尝试:


当直线移动时,跟踪效果较好。

代码:

FeatureTracker类:

class FeatureTracker
{
private:
	cv::Mat gray;
	cv::Mat gray_prev;
	std::vector<cv::Point2f> points[2];//被跟踪地特征
	std::vector<cv::Point2f> initial;
	std::vector<cv::Point2f> features;//被检测地特征
	int max_count;//检测特征点地最大个数
	double qlevel;//检测特征点地质量等级
	double minDist;//两个特征点之间地最小差距
	std::vector<uchar> status;//被跟踪特征地状态
	std::vector<float> err;//跟踪中出现地误差
public:
	FeatureTracker() :max_count(500), qlevel(0.01), minDist(10.0) {}
	void process(cv::Mat& frame, cv::Mat& output)
	{
		cv::cvtColor(frame, gray, CV_BGR2GRAY);
		frame.copyTo(output);
		//1、如果必须添加新的特征点
		if (addNewPoints())
		{
			detectFeaturePoints();
			points[0].insert(points[0].end(), features.begin(), features.end());
			initial.insert(initial.begin(), features.begin(), features.end());
		}
		if (gray_prev.empty())
			gray.copyTo(gray_prev);
		//2、跟踪特征
		cv::calcOpticalFlowPyrLK(gray_prev, gray, points[0], points[1], status, err);
		//3、循环检查被跟踪地特征点,剔除部分
		int k = 0;
		for ( int i	= 0; i < points[1].size(); i++)
		{
			//是否保留这个特征点
			if (acceptTrackPoints(i))
			{
				initial[k] = initial[i];
				points[1][k++] = points[1][i];
			}
		}
		//剔除跟踪失败地特征点
		points[1].resize(k);
		initial.resize(k);
		//4、处理已经认可地被跟踪特征点
		handleTrackedPoints(frame, output);
		//5、当前特征点和图像变成前一个
		std::swap(points[1], points[0]);
		cv::swap(gray_prev, gray);
	}
	void detectFeaturePoints()
	{
		cv::goodFeaturesToTrack(gray, features, max_count, qlevel, minDist);
		std::vector<cv::KeyPoint> keypoints;
		cv::KeyPoint::convert(features, keypoints);
		cv::Mat detectedFeature;
		cv::drawKeypoints(gray, keypoints,detectedFeature);
		cv::resize(detectedFeature, detectedFeature, cv::Size(320, 240));
		cv::imshow("feature", detectedFeature);
	}
	bool addNewPoints()
	{
		return points[0].size() <= 10;
	}
	bool acceptTrackPoints(int i)
	{
		return status[i] &&
			(abs(points[0][i].x - points[1][i].x) + abs(points[0][i].y - points[1][i].y) > 2);
	}
	void handleTrackedPoints(cv::Mat& frame, cv::Mat& output)
	{
		for (int i = 0; i < points[1].size(); i++)
		{
			cv::line(output, initial[i], points[1][i], cv::Scalar(255, 255, 255));
		}
	}
};

main函数:

int main()
{
	cv::VideoCapture capture("keyboard.mp4");
	double rate = capture.get(CV_CAP_PROP_FPS);
	int delay = 1000.0 / rate;
	cv::Mat frame;
	cv::Mat outputFrame;
	FeatureTracker tracker;
	cv::namedWindow("original frame");
	cv::namedWindow("tracking frame");
	while (true)
	{
		if (!capture.read(frame))
			break;
		tracker.process(frame, outputFrame);
		cv::resize(frame, frame, cv::Size(320, 240));
		cv::imshow("original frame", frame);
		cv::resize(outputFrame, outputFrame, cv::Size(320, 240));
		cv::imshow("tracking frame", outputFrame);
		cvWaitKey(delay);
	}
	cvWaitKey();
}



猜你喜欢

转载自blog.csdn.net/qq_30241709/article/details/78850225