OpenCV人脸识别插件初步使用方法

1、下载OpenCV插件导入到新建的项目中,并在项目中新建一个RawImage,如下图:

 

2、将插件里的WebCamTextureToMatHelper脚本挂在RawImage上,并新建脚本命名为 FaceDetectTest2,将此脚本也挂在RawImage上,其他的什么都不用做。FaceDetectTest2脚本如下:

using OpenCVForUnity.CoreModule;
using OpenCVForUnity.ImgcodecsModule;
using OpenCVForUnity.ImgprocModule;
using OpenCVForUnity.ObjdetectModule;
using OpenCVForUnity.UnityUtils;
using OpenCVForUnity.UnityUtils.Helper;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.UI;

public class FaceDetectTest2 : MonoBehaviour
{
    public RawImage targetRaw;
    public static string imageExtent = ".jpg";
    private float counter = 0;
    private float WAIT_TO_DETECT = 2f;
    public string userLoginImagesPath;
    private const int DETECT_IMG_COUNT = 5;
    private int detectCounter;
    public int DetectCounter { get => detectCounter; set => detectCounter = value; }

    private int minNeighbors = 2; //最少检测到2次才算检测到
    private int flags = 2;  //性能参数
    private float sizeScale = 0.2f; //阈值

    /// <summary>
    /// The texture.
    /// </summary>
    Texture2D texture;

    /// <summary>
    /// The gray mat.
    /// </summary>
    Mat grayMat;

    /// <summary>
    /// The faces.
    /// </summary>
    MatOfRect faces;

    /// <summary>
    /// The cascade.
    /// </summary>
    CascadeClassifier cascade;

    /// <summary>
    /// LBP_CASCADE_FILENAME
    /// </summary>
    protected static readonly string LBP_CASCADE_FILENAME = "lbpcascade_frontalface.xml";

    WebCamTextureToMatHelper webCamTextureToMatHelper;

    // Start is called before the first frame update
    void Start()
    {
        userLoginImagesPath = Application.streamingAssetsPath + "/JieTu/";
        //webCamTextureToMatHelper = gameObject.GetComponent<WebCamTextureToMatHelper>();
        //webCamTextureToMatHelper.Initialize();
        //Mat webCamTextureMat = webCamTextureToMatHelper.GetMat();
        //texture = new Texture2D(webCamTextureMat.cols(), webCamTextureMat.rows(), TextureFormat.RGBA32, false);
        Utils.fastMatToTexture2D(webCamTextureMat, texture);
        //targetRaw.texture = texture;

        OnWebCamTextureToMatHelperInitialized();
        cascade = new CascadeClassifier();
        cascade.load(Utils.getFilePath(LBP_CASCADE_FILENAME));
        if (cascade.empty())
        {
            cascade.load(Utils.getFilePath("haarcascade_frontalface_alt.xml"));
            Debug.Log("cascade file is not loaded. Please copy from “OpenCVForUnity/StreamingAssets/” to “Assets/StreamingAssets/” folder. ");

            Debug.Log(cascade.empty());
        }
    }

    public void OnWebCamTextureToMatHelperInitialized()
    {
        Debug.Log("OnWebCamTextureToMatHelperInitialized");
        if (webCamTextureToMatHelper == null)
            webCamTextureToMatHelper = gameObject.GetComponent<WebCamTextureToMatHelper>();

        if (!webCamTextureToMatHelper.IsInitialized())
            webCamTextureToMatHelper.Initialize(); //这个初始化就是获取摄像头画面
        if (webCamTextureToMatHelper)
        {
            Mat webCamTextureMat = webCamTextureToMatHelper.GetMat(); //获取当前帧的画面
            if (webCamTextureMat != null)
            {
                texture = new Texture2D(webCamTextureMat.cols(), webCamTextureMat.rows(), TextureFormat.RGBA32, false);  //根据当前帧画面决定texture的摄像头贴图的大小
                Utils.fastMatToTexture2D(webCamTextureMat, texture); //将Mat类型的贴图信息给texture

                grayMat = new Mat(webCamTextureMat.rows(), webCamTextureMat.cols(), CvType.CV_8UC1);

                faces = new MatOfRect();
            }

        }

        if (targetRaw)
            targetRaw.texture = texture; //texture赋值给RawImage就能显示出来了

        Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);

        if (webCamTextureToMatHelper)
        {
            webCamTextureToMatHelper.Play();
        }

    }

    // Update is called once per frame
    void Update()
    {
        if (webCamTextureToMatHelper && webCamTextureToMatHelper.IsPlaying())
        {
            counter += Time.deltaTime;
            Mat rgbaMat = webCamTextureToMatHelper.GetMat();//获取当前帧的画面

            Imgproc.cvtColor(rgbaMat, grayMat, Imgproc.COLOR_RGBA2GRAY);  //色彩转换 如果不加 图片颜色会不对
            Imgproc.equalizeHist(grayMat, grayMat); //直方图均衡化(也是图像处理的一部分),均衡化之后使图片对比更明显
            if (cascade != null)
                cascade.detectMultiScale(grayMat, faces, 1.1d, minNeighbors, flags, new Size(grayMat.cols() * sizeScale, grayMat.rows() * sizeScale), new Size()); //检测镜头里多张脸   /* 1.1d为scaleFactor,必须大于1,值越大速度越快精度越低,反之值越小速度越慢精度越高。  minNeighbors:最少检测两次才算检测到。 flags:为性能参数。 new Size(grayMat.cols() * sizeScale:为检测目标的最小尺寸,低于这个尺寸的检测不到,或者不设置阈值。 grayMat.rows() * sizeScale:为检测目标的最大尺寸,高于这个尺寸的不检测,或者不设置阈值*/
            OpenCVForUnity.CoreModule.Rect[] rects = faces.toArray();

            if (rects.Length == 1 && counter > WAIT_TO_DETECT)
            {
                InterceptImg(rects[0], texture);
                DetectCounter++;
                if (DetectCounter == DETECT_IMG_COUNT)
                {
                    OnPauseButtonClick();//停止
                                         // onDetectImgsCallback?.Invoke(userLoginImagesPath + "3" + imageExtent);
                }
            }

            //正常模式画线---镜头里框脸的绿色线
            for (int i = 0; i < rects.Length; i++)
            {
                Debug.Log("detect faces " + rects[i]);
                //绿色线
                Imgproc.rectangle(rgbaMat, rects[i], new Scalar(14, 252, 123, 255), 3);
            }
            Utils.fastMatToTexture2D(rgbaMat, texture); //将处理过的贴图信息赋给texture
            // onDetectFaceCallback?.Invoke(rects.Length > 0);
        }
    }

    /// <summary>
    /// 截取图片
    /// </summary>
    /// <param name="rect">要截取的区域</param>
    void InterceptImg(OpenCVForUnity.CoreModule.Rect rect, Texture2D c_texture)
    {
        Mat cameraMat = new Mat(c_texture.height, c_texture.width, CvType.CV_8UC4);
        Utils.texture2DToMat(c_texture, cameraMat);
        //截取需要的部分 rect为上面检测的人脸区域
        Mat croppedImage = new Mat(cameraMat, rect);
        //色彩转换 如果不加 图片颜色会不对
        Imgproc.cvtColor(croppedImage, croppedImage, Imgproc.COLOR_RGBA2BGRA);
        //这里截取到的图片为倒的 使用这个方法翻转一下
        Core.flip(croppedImage, croppedImage, 1);
        //保存到Assets目录下
        Imgcodecs.imwrite(userLoginImagesPath + DetectCounter + imageExtent, croppedImage);
    }

    /// <summary>
    /// Raises the pause button click event.
    /// </summary>
    public void OnPauseButtonClick()
    {
        if (webCamTextureToMatHelper)
            webCamTextureToMatHelper.Pause();
        counter = 0;
    }
}

3、代码解释:

webCamTextureToMatHelper.Initialize();   初始化,这个方法就已经获取到了摄像头画面 

Mat webCamTextureMat = webCamTextureToMatHelper.GetMat(); 获取当前帧的画面,获取到画面信息就可以进行各种骚操作了  

OpenCV插件大体思路:

a、初始化调用摄像头

b、使用webCamTextureToMatHelper.GetMat()获取到当前帧动画

c、把 webCamTextureMat 赋给Texture2D类型的texture,然后再把texture给RawImage----这一步类似于做小地图一样将texture给RawImage,其实是Mat拿到相机每帧画面给texture,texture再给RawImage。在Update里做就可以每帧更新了,这样就有了相机画面了

猜你喜欢

转载自blog.csdn.net/meibianchuidi123/article/details/125030499
今日推荐