OpenCV之级联分类器训练与使用(三) 视频中人脸检测与眼睛跟踪

人脸的生物学特征
    HAAR级联分类器实现人脸检测
    人脸的生物学特征
        - 两个眼睛之间的宽度大致等于一个眼睛的距离
        - 左右对称
        - 眼睛到嘴巴之间的距离大致在两个眼睛的宽度大小左右
        - 鼻子到嘴唇距离,大致等于两个嘴唇的厚度
Haar人脸级联特征数据
    haarcascade_eye.xml
    haarcascade_lefteye_2splits.xml
    haarcascade_righteye_2splits.xml
    any custom cascade data??
眼睛检测与跟踪
    眼睛检测
    简单跟踪    通过模板对比

代码

    #include "../common/common.hpp"

    static String facefile = "project/workspace_vs/OpenCV310Sources_contrib/install/etc/haarcascades/haarcascade_frontalface_alt.xml";
    static String lefteyefile = "project/workspace_vs/OpenCV310Sources_contrib/install/etc/haarcascades/haarcascade_lefteye_2splits.xml";
    static String righteyefile = "project/workspace_vs/OpenCV310Sources_contrib/install/etc/haarcascades/haarcascade_righteye_2splits.xml";
    static CascadeClassifier face_detector;
    static CascadeClassifier leftyeye_detector;
    static CascadeClassifier righteye_detector;
    static Rect leftEye, rightEye;

    static void trackEye(Mat&  im, Mat& tpl, Rect& rect) // 眼睛跟踪,参数:输入的图像,模板图像,匹配出来的模板所在输入图像的区域
    {
        if (im.empty() || tpl.empty() || im.rows < tpl.rows || im.cols < tpl.cols) return; // 为空,或者模板尺寸大于输入图像尺寸
        Mat result;
        int result_cols = im.cols - tpl.cols + 1;
        int result_rows = im.rows - tpl.rows + 1;

        // 模板匹配
        result.create(result_rows, result_cols, CV_32FC1);
        matchTemplate(im, tpl, result, TM_CCORR_NORMED); // 计算归一化相关性,计算出来的值越接近1,越相关

        // 寻找位置
        double minval, maxval;
        Point minloc, maxloc;
        minMaxLoc(result, &minval, &maxval, &minloc, &maxloc);
        if (maxval > 0.75) { // 相关度大于0.75 就认为找到了眼睛
            rect.x = rect.x + maxloc.x;
            rect.y = rect.y + maxloc.y;
        }
        else {
            rect.x = rect.y = rect.width = rect.height = 0;
        }
    }

    void main(int argc, char** argv) {
        face_detector.load(getCVImagesPath(facefile));
        leftyeye_detector.load(getCVImagesPath(lefteyefile));
        righteye_detector.load(getCVImagesPath(righteyefile));

        Mat frame, gray;
        VideoCapture capture(0);
        vector<Rect> faces;
        vector<Rect> eyes;
        Mat lefttpl, righttpl; // 左右眼的模板
        while (capture.read(frame)) {
            flip(frame, frame, 1); // 参数flipCode: 1表示左右镜像,0表示上下镜像,-1表示上下左右都镜像
            cvtColor(frame, gray, COLOR_BGR2GRAY);
            equalizeHist(gray, gray);
            face_detector.detectMultiScale(gray, faces, 1.1, 3, 0, Size(30, 30));
            cout << "faces.size=" << faces.size() << endl;
            for (size_t t = 0; t < faces.size(); t++) {
                rectangle(frame, faces[t], Scalar(255, 0, 0), 2, 8, 0); // 绘制人脸框

                // 计算 offset ROI
                int offsety = faces[t].height / 4;
                int offsetx = faces[t].width / 8;
                int eyeheight = faces[t].height / 2 - offsety;
                int eyewidth = faces[t].width / 2 - offsetx;

                // 截取左眼区域
                Rect leftRect;
                leftRect.x = faces[t].x + offsetx;
                leftRect.y = faces[t].y + offsety;
                leftRect.width = eyewidth;
                leftRect.height = eyeheight;
                Mat leftRoi = gray(leftRect);
                // 检测左眼
                leftyeye_detector.detectMultiScale(leftRoi, eyes, 1.1, 1, 0, Size(20, 20));
                cout << "lefteye.size=" << eyes.size() << endl;
                if (lefttpl.empty()) {
                    if (eyes.size()) {
                        leftRect = eyes[0] + Point(leftRect.x, leftRect.y);
                        lefttpl = gray(leftRect); // 获取左眼模板
                        rectangle(frame, leftRect, Scalar(0, 0, 255), 2, 8, 0);
                    }
                }
                else {
                    // 跟踪, 基于模板匹配
                    leftEye.x = leftRect.x; // 设置左眼的区域,在trackEye算法中会用到
                    leftEye.y = leftRect.y;
                    trackEye(leftRoi, lefttpl, leftEye); // 在leftRoi范围内匹配lefttpl模板,匹配的结果放到leftEye中
                    if (leftEye.x > 0 && leftEye.y > 0) {
                        leftEye.width = lefttpl.cols;
                        leftEye.height = lefttpl.rows;
                        rectangle(frame, leftEye, Scalar(0, 0, 255), 2, 8, 0);
                    }
                }

                // 截取右眼区域
                Rect rightRect;
                rightRect.x = faces[t].x + faces[t].width / 2;
                rightRect.y = faces[t].y + offsety;
                rightRect.width = eyewidth;
                rightRect.height = eyeheight;
                Mat rightRoi = gray(rightRect);
                // 检测右眼
                righteye_detector.detectMultiScale(rightRoi, eyes, 1.1, 1, 0, Size(20, 20));
                cout << "righteye.size=" << eyes.size() << endl;
                if (righttpl.empty()) {
                    if (eyes.size()) {
                        rightRect = eyes[0] + Point(rightRect.x, rightRect.y);
                        righttpl = gray(rightRect); // 获取右眼模板
                        rectangle(frame, rightRect, Scalar(0, 255, 255), 2, 8, 0);
                    }
                }
                else {
                    // 跟踪, 基于模板匹配
                    rightEye.x = rightRect.x; // 设置右眼的区域,在trackEye算法中会用到
                    rightEye.y = rightRect.y;
                    trackEye(rightRoi, righttpl, rightEye); // 在rightRoi范围内匹配righttpl模板,匹配的结果放到rightEye中
                    if (rightEye.x > 0 && rightEye.y > 0) {
                        rightEye.width = righttpl.cols;
                        rightEye.height = righttpl.rows;
                        rectangle(frame, rightEye, Scalar(0, 255, 255), 2, 8, 0);
                    }
                }
            }
            imshow("src4-7", frame);
            if (waitKey(100) == 27) break; // ESC
        }
        capture.release();

        waitKey(0);
    }

效果图

这里写图片描述

猜你喜欢

转载自blog.csdn.net/huanghuangjin/article/details/81413271