前面几篇文章中介绍了OpenCVforUnity的使用方法,在导入了这个包之后,我们就可以在C#代码当中利用opencv的库了。
以下是在unity中编写OpenCV图像处理代码所用的一般格式:
using UnityEngine; using UnityEngine.EventSystems; using System.Collections; using System.Collections.Generic; using UnityEngine.UI; #if UNITY_5_3 || UNITY_5_3_OR_NEWER using UnityEngine.SceneManagement; #endif using OpenCVForUnity; namespace OpenCVForUnityExample { [RequireComponent(typeof(WebCamTextureToMatHelper))] public class HandPoseEstimationExample : MonoBehaviour { //详细内容 } }
首先阐述一下我们利用opencv库对手指进行识别的主要思路。利用摄像头对手部进行识别,有一个非常重要的工具:WebCamTexture,这是在unity中实现摄像头拍照和录像的必备工具。
一、启用摄像头:
该工具在unity当中的初始化过程:
//初始化WebCamTextureToMat public void OnWebCamTextureToMatHelperInitialized () { Debug.Log ("OnWebCamTextureToMatHelperInitialized"); Mat webCamTextureMat = webCamTextureToMatHelper.GetMat (); texture = new Texture2D (webCamTextureMat.cols (), webCamTextureMat.rows (), TextureFormat.RGBA32, false); gameObject.GetComponent<Renderer> ().material.mainTexture = texture; gameObject.transform.localScale = new Vector3 (webCamTextureMat.cols (), webCamTextureMat.rows (), 1); Debug.Log ("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation); if (fpsMonitor != null){ fpsMonitor.Add ("width", webCamTextureMat.width ().ToString()); fpsMonitor.Add ("height", webCamTextureMat.height ().ToString()); fpsMonitor.Add ("orientation", Screen.orientation.ToString()); fpsMonitor.consoleText = "Please touch the area of the open hand."; } float width = webCamTextureMat.width(); float height = webCamTextureMat.height(); float widthScale = (float)Screen.width / width; float heightScale = (float)Screen.height / height; if (widthScale < heightScale) { Camera.main.orthographicSize = (width * (float)Screen.height / (float)Screen.width) / 2; } else { Camera.main.orthographicSize = height / 2; } detector = new ColorBlobDetector (); spectrumMat = new Mat (); //blobColorRgba = new Scalar (255); blobColorHsv = new Scalar (255); SPECTRUM_SIZE = new Size (200, 64); CONTOUR_COLOR = new Scalar (255, 0, 0, 255); CONTOUR_COLOR_WHITE = new Scalar (255, 255, 255, 255); }
二、点击选择手部识别区域:
因为我们实现的是基于肤色的手部识别,所以需要确定手部的肤色。一开始考虑过使用固定值,但是后来发现,使用固定值容易收到环境的影响,光线一变化就不容易识别出来了。所以最后选择了使用鼠标点击,点击部分周围的平均颜色的方法。
这里需要注意的是,在进行图像处理时,需要转换到HSV颜色空间上。首先解释为什么不在RGB中设置范围寻找物体信息,而是在HSV中:
因为RGB通道并不能很好地反映出物体具体的颜色信息 , 而相对于RGB空间,HSV空间能够非常直观的表达色彩的明暗,色调,以及鲜艳程度,方便进行颜色之间的对比,比如红色在HSV空间中H维度的范围为0~10和160~180。但在RGB空间中很难表达清楚。
private void OnTouch (Mat img, Point touchPoint) { int cols = img.cols (); int rows = img.rows (); int x = (int)touchPoint.x; int y = (int)touchPoint.y; //Debug.Log ("Touch image coordinates: (" + x + ", " + y + ")"); //如果点击位置在屏幕之外 if ((x < 0) || (y < 0) || (x > cols) || (y > rows)) return; OpenCVForUnity.Rect touchedRect = new OpenCVForUnity.Rect (); if (x > 5) touchedRect.x = x - 5; else touchedRect.x = 0; if (y > 5) touchedRect.y = y - 5; else touchedRect.y = 0; if (x + 5 < cols) touchedRect.width = x + 5 - touchedRect.x; else touchedRect.width = cols - touchedRect.x; if (y + 5 < rows) touchedRect.height = y + 5 - touchedRect.y; else touchedRect.height = rows - touchedRect.y; using (Mat touchedRegionRgba = img.submat (touchedRect)) using (Mat touchedRegionHsv = new Mat ()) { //转换颜色到HSV空间 Imgproc.cvtColor (touchedRegionRgba, touchedRegionHsv, Imgproc.COLOR_RGB2HSV_FULL); // 计算点击位置的平均颜色 blobColorHsv = Core.sumElems (touchedRegionHsv); int pointCount = touchedRect.width * touchedRect.height; for (int i = 0; i < blobColorHsv.val.Length; i++) blobColorHsv.val [i] /= pointCount; //Debug.Log ("" + touchedRegionRgba); //blobColorRgba = ConverScalarHsv2Rgba (blobColorHsv); //Debug.Log ("Touched rgba color: (" + mBlobColorRgba.val [0] + ", " + mBlobColorRgba.val [1] + // ", " + mBlobColorRgba.val [2] + ", " + mBlobColorRgba.val [3] + ")"); detector.SetHsvColor (blobColorHsv); Imgproc.resize (detector.GetSpectrum (), spectrumMat, SPECTRUM_SIZE); isColorSelected = true; } }
计算完平均颜色之后,将该颜色储存下来,这里需要用到一个自己编写的用于进行HSV颜色处理的工具。不同的环境中,HSV颜色的范围不同,所以需要用自己编写的监视器来修改对应的范围,确定准确的HSV颜色。
比如:
HSV颜色空间规定的是,H范围0~360,S范围0~1,V范围0~1
但PS中的HSV范围,H是0-360,S是0-1,V(B)是0-1
而opencv中的HSV范围,H是0-180,S是0-255,V是0-255
所以使用的时候,需要进行如下的转换:
public class ColorBlobDetector { // HSV颜色空间中的范围检查的下界和上界 private Scalar mLowerBound = new Scalar (0); private Scalar mUpperBound = new Scalar (0); // 轮廓过滤时最小的轮廓区域 private static double mMinContourArea = 0.1; // HSV颜色空间中用于范围检查的颜色半径 private Scalar mColorRadius = new Scalar (25, 50, 50, 0); private Mat mSpectrum = new Mat (); private List<MatOfPoint> mContours = new List<MatOfPoint> (); // 缓存 private Mat mPyrDownMat = new Mat (); private Mat mHsvMat = new Mat (); private Mat mMask = new Mat (); private Mat mDilatedMask = new Mat (); private Mat mHierarchy = new Mat (); //设置颜色半径 public void SetColorRadius (Scalar radius) { mColorRadius = radius; } //设置HSV颜色 public void SetHsvColor (Scalar hsvColor) { //限定hsv中H的范围 double minH,maxH; if (hsvColor.val [0] >= mColorRadius.val [0]) minH = hsvColor.val [0] - mColorRadius.val [0]; else minH = 0; if (hsvColor.val [0] + mColorRadius.val [0] <= 255) maxH = hsvColor.val [0]; else maxH = 255; mLowerBound.val [0] = minH; mUpperBound.val [0] = maxH; mLowerBound.val [1] = hsvColor.val [1] - mColorRadius.val [1]; mUpperBound.val [1] = hsvColor.val [1] + mColorRadius.val [1]; mLowerBound.val [2] = hsvColor.val [2] - mColorRadius.val [2]; mUpperBound.val [2] = hsvColor.val [2] + mColorRadius.val [2]; mLowerBound.val [3] = 0; mUpperBound.val [3] = 255; using (Mat spectrumHsv = new Mat (1, (int)(maxH - minH), CvType.CV_8UC3)) { for (int j = 0; j < maxH - minH; j++) { byte[] tmp = { (byte)(minH + j), (byte)255, (byte)255 }; spectrumHsv.put (0, j, tmp); } Imgproc.cvtColor (spectrumHsv, mSpectrum, Imgproc.COLOR_HSV2RGB_FULL, 4); } }
最后利用确定好的颜色,进行识别图像的边缘处理:
public void Process (Mat rgbaImage) { //进行高斯平滑然后降采样(将图像尺寸行和列方向缩减一半) Imgproc.pyrDown (rgbaImage, mPyrDownMat); Imgproc.pyrDown (mPyrDownMat, mPyrDownMat); //转化颜色模式 Imgproc.cvtColor (mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL); //检测在该范围内的数组元素 Core.inRange (mHsvMat, mLowerBound, mUpperBound, mMask); //进行图像膨胀 Imgproc.dilate (mMask, mDilatedMask, new Mat ()); List<MatOfPoint> contours = new List<MatOfPoint> (); //找到边缘轮廓 Imgproc.findContours (mDilatedMask, contours, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE); // 寻找最大的轮廓区域 double maxArea = 0; foreach (MatOfPoint each in contours) { MatOfPoint wrapper = each; double area = Imgproc.contourArea (wrapper); if (area > maxArea) maxArea = area; } //用模板去处理轮廓 mContours.Clear (); foreach (MatOfPoint each in contours) { MatOfPoint contour = each; if (Imgproc.contourArea (contour) > mMinContourArea * maxArea) { Core.multiply (contour, new Scalar (4, 4), contour); mContours.Add (contour); } } }
以上只是UNITY3D中手部识别的第一部分,剩余部分将在下一篇博客中讲解。敬请期待!
用识别图和大家说再见!