分别用MATLAB和opencv实现 :KLT目标追踪

MATLAB:

% 作者:Ephemeroptera
% 时间:2018/11/24
% 联系qq:605686962


%% 读取视频以及视频第一帧
video=VideoReader('ww.mp4');
firstFrame=imresize(readFrame(video),[480 640]);

%% KLT的初始化设置
faceDetector = vision.CascadeObjectDetector; %开启前脸侦测器
faceBbox=faceDetector(firstFrame); %检测人脸
MinEigenPoints = detectMinEigenFeatures(rgb2gray(firstFrame), 'ROI', faceBbox);%使用最小特征值算法和返回cornerPoints检测角点
firstShow=insertObjectAnnotation(firstFrame,'rectangle',faceBbox,'Face','LineWidth',2,'Color','g'); %显示
firstShow=insertMarker(firstShow,MinEigenPoints.Location,'+','Color','w','Size',2);
figure(1);imshow(firstShow);title('the initial');

pointTracker = vision.PointTracker('MaxBidirectionalError', 2);%向前后错误阈值设置为2
initialize(pointTracker, MinEigenPoints.Location, firstFrame);%初始化追踪器

%% KLT追踪
oldPoints =MinEigenPoints.Location;% 获取初始化时特征角点
bboxPoints = bbox2points(faceBbox);% 获取人脸框的四个坐标
n=2;
while hasFrame(video)%遍历视频
    frame = imresize(readFrame(video),[480 640]); % 读取当前帧
    [newPoints,validity,score] = pointTracker(frame); % 获取可疑点
    
    oldPoints = oldPoints(validity, :);  % 获取高置信点匹配对
    newPoints = newPoints(validity, :);  % 获取高置信点匹配对(第一次过滤)
    
    %估计几何仿射关系,进一步获取高质量的点匹配对(第二次过滤)
    [xform, oldPoints, newPoints] = estimateGeometricTransform(oldPoints, newPoints, 'similarity', 'MaxDistance', 4);
    bboxPoints = transformPointsForward(xform, bboxPoints); %由仿射关系更新bboxPoints
    bboxPolygon = reshape(bboxPoints', 1, []);% 重构成行向量
    
    %显示每一帧
    trackShow = insertShape(frame,'Polygon',bboxPolygon,'LineWidth',2,'Color','g');
    trackShow = insertMarker(trackShow,newPoints,'+','Color','w','Size',2);
    figure(2);imshow(trackShow);title(strcat('NO. ',num2str(n),' Frame..'));drawnow;
    
    %新旧轮替
    oldPoints = newPoints;
    setPoints(pointTracker, oldPoints);
    n=n+1;
end
       

结果演示:

          初始:

     追踪:

C++:

/****************************************************************/
//作者:Ephemeroptera
//最后修改时间:2018/11/26
//地点:AHU;
//联系qq:605686962
/****************************************************************/
#include <iostream>
#include"opencv2/opencv.hpp"
#include "opencv2/video/tracking.hpp"
#include "dlib/image_processing/frontal_face_detector.h"
#include <dlib/image_processing.h>
#include <dlib/opencv.h>

using namespace cv;
using namespace std;

int main()
{
	try
	{
		//读取视频
		VideoCapture capture("eyes.avi");

		//声明前脸检测器detector
		dlib::frontal_face_detector faceDetector = dlib::get_frontal_face_detector();

		//捕获第一帧
		Mat  firstFrame;
		capture >> firstFrame;
		resize(firstFrame, firstFrame, Size(640, 480));

		//cv to dlib
		dlib::cv_image<dlib::bgr_pixel> cframe(firstFrame);

		//检测人脸
		std::vector<dlib::rectangle> facebbox = faceDetector(cframe);

		//facebbox格式由dlib转换为cv
		Rect faceRect(facebbox[0].left(), facebbox[0].top(), facebbox[0].width(), facebbox[0].height());

		//再变换为Mat形式
		Mat faceROI = (Mat_<double>(4, 2) << faceRect.x, faceRect.y,
			faceRect.x + faceRect.width, faceRect.y,
			faceRect.x + faceRect.width, faceRect.y + faceRect.height,
			faceRect.x, faceRect.y + faceRect.height);
		faceROI = faceROI.t();//转置
		

		//特征点初始化
		const ushort MAX_COUNT = 300;//特征点数量上限
		vector<Point2f> initCorners;//初始特征点容器
		Mat firstGray(firstFrame.size(), CV_8UC1);//灰度化
		cvtColor(firstFrame, firstGray, CV_RGB2GRAY);
		//指定ROI
		Mat mask = Mat::zeros(firstGray.size(), CV_8UC1);//mask初始化全为0
		mask(faceRect).setTo(255);//将非roi区域置为255
		goodFeaturesToTrack(firstGray, initCorners, MAX_COUNT, 0.01, 10, mask, 3, 3, 0, 0.04);//特征点初步检测
																							 //亚像素再次检测
		Size subPixWinSize(10, 10);
		TermCriteria termcrit(TermCriteria::COUNT | TermCriteria::EPS, 20, 0.03);//声明迭代属性(最大次数或者极小波动)
		cornerSubPix(firstGray, initCorners, subPixWinSize, Size(-1, -1), termcrit);//进一步满足亚像素特性筛选

		//遍历视频
		int numOfframes = 1;//当前帧数
		Mat oldFrame;//定义新帧和旧帧
		Mat newFrame;
		vector<Point2f> oldCorners;
		vector<Point2f> newCorners;

		while (waitKey(30) != 27)
		{
			++numOfframes;//帧+1
			//获取每一帧
			Mat rgbFrame;
			capture >> rgbFrame;
			//读取结束跳出
			if (rgbFrame.empty())
			{
				break;
			}
			//灰度化
			Mat grayFrame(rgbFrame.size(), CV_8UC1);
			cvtColor(rgbFrame, grayFrame, CV_RGB2GRAY);
			//图像大小归一
			resize(grayFrame, grayFrame, Size(640, 480));


			if (numOfframes == 2)//如果遍历开始
			{
				oldFrame = firstGray;
				oldCorners = initCorners;
			}
			newFrame = grayFrame;//当前帧为新帧

			//KLT核心算法
			vector<uchar> validity;//置信
			vector<float> err;//有效光流的误差
			Size winSize(31, 31);//搜索窗大小
			calcOpticalFlowPyrLK(oldFrame, newFrame, oldCorners, newCorners, validity, err, winSize, 3, termcrit, 0, 0.001);

			//标记点(遍历所有特征点)
			int i; int k;
			for (i = k = 0; i < newCorners.size(); ++i)
			{
				if (!validity[i])//如果没找到对应点跳转下一个
					continue;
				oldCorners[k] = oldCorners[i];
				newCorners[k++] = newCorners[i];//提取高质量点
				circle(rgbFrame, newCorners[i], 2, Scalar(0, 255, 0), -1, 8);//标记
			}
			oldCorners.resize(k);  //滤除劣质特征点
			newCorners.resize(k);

			//预测特诊点的变化
			Mat transEstimate = estimateRigidTransform(oldCorners, newCorners, 0);
			//bbox变化
			Mat bias = Mat::ones(Size(4, 1), CV_64FC1);//添加偏置,(2x4)->(3x4)
			faceROI.push_back(bias);
			faceROI = transEstimate * faceROI;//(2x3)*(3x4)->(2x4)
			

			//画出bbox
			Point2f point0(faceROI.at<double>(0, 0), faceROI.at<double>(1, 0));
			Point2f point1(faceROI.at<double>(0, 1), faceROI.at<double>(1, 1));
			Point2f point2(faceROI.at<double>(0, 2), faceROI.at<double>(1, 2));
			Point2f point3(faceROI.at<double>(0, 3), faceROI.at<double>(1, 3));
			
			line(rgbFrame, point0, point1, Scalar(255, 0, 0),3,16);
			line(rgbFrame, point1, point2, Scalar(255, 0, 0),3,16);
			line(rgbFrame, point2, point3, Scalar(255, 0, 0),3,16);
			line(rgbFrame, point3, point0, Scalar(255, 0, 0),3,16);
			imshow("KLT", rgbFrame);

			//新旧轮替
			oldFrame = newFrame;
			oldCorners = newCorners;
		}
		
	}
	catch (const std::exception& e)
	{
		cout << "\nexception thrown!" << endl;
		cout << e.what() << endl;
	}
	return 0;
}


结果展示:

 

おすすめ

転載: blog.csdn.net/Ephemeroptera/article/details/84452439