奥比中光3d摄像头活体检测测试

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/mhsszm/article/details/89153292
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include <stdio.h>
#include "OpenNI.h"
#include <sys/time.h>
#include <stdlib.h>    
#include <iostream>    
#include <string>   
#include <fstream> 
#include <unistd.h>
#include <vector>
#include "mtcnn.hpp"
#include "utils.hpp"

#include <math.h>
#include "RGBDCamera.h"
#include "RGB-D_calibration_sample_code.h"

using namespace std;

using namespace cv;
using namespace openni;
static VideoCapture capture;
static Device device;
static VideoStream depth;
static VideoStream color;
static VideoStream ir;
static VideoFrameRef depthFrame;
static VideoFrameRef colorFrame;
static VideoFrameRef irFrame;

#define CAMERA_PARAMS_INI   "./camera_params.ini"
#define CONFIG_INI          "./config.ini"

const static int MAX_DEPTH_VALUE = 0xffff;

const static int OPENNI_READ_WAIT_TIMEOUT = 500;

static int mirrorColor = 0;
Mat depthImg(480, 640, CV_8UC3);
Mat depthRaw(480, 640, CV_16UC1, Scalar(0));
Mat irImg(480, 640, CV_8UC3, Scalar(0));
Mat colorImg(480, 640, CV_8UC3);

Mat  CaliDepth(480, 640, CV_8UC3);
float   mat[16] = { 0 };
static bool quit = false;
static int openAllStream = 0;
static bool isUvcCamera = true;

int num = 0;
string dirname = "./";

const char *type = "tensorflow";
mtcnn * p_mtcnn = mtcnn_factory::create_detector(type);
std::vector<face_box> face_info;
int save_chop = 0;

typedef struct {
	float x;
	float y;
	float z;
} vector3f;

void screenshot(vector<vector3f> point,string dirname,string filenames)
{
	ofstream outfile (dirname +"/"+ filenames + ".txt");
	//string file = dirname +"/"+ filenames + ".xyz";
	// outfile.open(file);
	if (outfile.is_open()) 
	{

		for (int ii = 0; ii < 640 * 400; ii++)
		{
			if (point[ii].z != 0 && point[ii].z < 800)
			{
				outfile << point[ii].x << " " << point[ii].y << " " << point[ii].z << std::endl;
			}
		}
	}
	outfile.close();
	cout << dirname << "/" <<filenames << ".xyz ,save successful!"<<endl;
}

int load_model()
{
	std::string model_dir = "../models";
    

    if (p_mtcnn == nullptr) {
        std::cerr << type << " is not supported" << std::endl;
        std::cerr << "supported types: ";
        std::vector<std::string> type_list = mtcnn_factory::list();

        for (unsigned int i = 0; i < type_list.size(); i++)
            std::cerr << " " << type_list[i];
        std::cerr << std::endl;
        return 1;
    }
    p_mtcnn->load_model(model_dir);
}


int   do_mtcnn()
{
	if (!colorImg.data) {
        std::cerr << "failed to read image file "<< std::endl;
        return 1;
    }

    
    face_info.clear();
	
    p_mtcnn->detect(colorImg,face_info);

    if (face_info.size() > 0)
    {
    	return 1;
    }
    return 0;
}

void FramFace(Mat &rgbImg,int mode,int move_x,int move_y)
{
	for(unsigned int i = 0; i < face_info.size(); i++) {
        face_box& box = face_info[i];

        printf("face %d: x0,y0 %2.5f %2.5f  x1,y1 %2.5f  %2.5f conf: %2.5f\n",i,
                box.x0,box.y0,box.x1,box.y1, box.score);
        printf("landmark: ");

        for(unsigned int j = 0; j < 5; j++)
            printf(" (%2.5f %2.5f)",box.landmark.x[j], box.landmark.y[j]);

        printf("\n");

		
        if (save_chop) {
            cv::Mat corp_img=rgbImg(cv::Range(box.y0,box.y1),cv::Range(box.x0,box.x1));
            char title[128];
            sprintf(title,"id%d.jpg",i);
            cv::imwrite(title,corp_img);
        }

        /*draw box */
        cv::rectangle(rgbImg, cv::Point(box.x0 + move_x, box.y0 + move_y), cv::Point(box.x1 + move_x, box.y1 + move_y), cv::Scalar(0, 255, 0), 1);

        /* draw landmark */
        for (int l = 0; l < 5; l++) {
            cv::circle(rgbImg,cv::Point(box.landmark.x[l],box.landmark.y[l]),1,cv::Scalar(0, 0, 255),1.8);
        }

    }
    // if (mode == 0)
    // {
    // 	cv::imwrite("color.jpg",rgbImg);
    // }
    // else 
    // {
    // 	cv::imwrite("depth.jpg",rgbImg);
    // }   

}
int waitForColorFrame()
{
	capture >> colorImg;
	cv::flip(colorImg, colorImg, 1);
	if (colorImg.empty()){
		printf("Capture UVC color failed.\n");
		return -1;
	}

	return 0;
}

int waitForOpenNIFrame()
{
	int streamIndex;
	VideoFrameRef frame;

	VideoStream* pStream[] = {&depth, &color, &ir};
	Status rc = OpenNI::waitForAnyStream(pStream, 3, &streamIndex, OPENNI_READ_WAIT_TIMEOUT);
	if (rc != STATUS_OK)
	{
		printf("Wait failed! (timeout is %d ms)\n%s\n", OPENNI_READ_WAIT_TIMEOUT, OpenNI::getExtendedError());
		return 1;
	}


	// printf ("streamIndex = %d\n",streamIndex);
	switch(streamIndex) {
		case 0:
			rc = depth.readFrame(&depthFrame);
			break;
		case 1:
			rc = color.readFrame(&colorFrame);
			break;
		case 2:
			rc = ir.readFrame(&irFrame);
			break;
		default:
			printf("Wait frame error. Stream index:%d\n", streamIndex);
			return 1;
	}

	if (rc != STATUS_OK)
	{
		printf("Read failed!\n%s\n", OpenNI::getExtendedError());
		return 2;
	}

	// printf ("depthFrame.isValid() = %d\n",depthFrame.isValid());
	if(depthFrame.isValid()) {
		if (depthFrame.getVideoMode().getPixelFormat() != PIXEL_FORMAT_DEPTH_1_MM && depthFrame.getVideoMode().getPixelFormat() != PIXEL_FORMAT_DEPTH_100_UM)
		{
			printf("Unexpected depthFrame format\n");
			return 3;
		}

		DepthPixel* pDepth = (DepthPixel*)depthFrame.getData();

		// vector3f xyzdata;
		// vector<vector3f> point;
 
		// float x = 0.0, y = 0.0, z = 0.0, depthv = 0.0;
		// float i, j;
		// for (i = 0; i < depthFrame.getHeight(); i++)
		// {
		// 	for (j = 0; j < depthFrame.getWidth(); j++)
		// 	{
		// 		int k = i;
		// 		int m = j;
		// 		depthv = pDepth[k*depthFrame.getWidth() + m];
		// 		CoordinateConverter::convertDepthToWorld(depth, j, i, depthv, &x, &y, &z);
		// 		xyzdata.x = x;
		// 		xyzdata.y = y;
		// 		xyzdata.z = z;
		// 		point.push_back(xyzdata);
		// 	}
		// }

		
		string filenames = std::to_string(num);
		// screenshot(point,dirname,filenames);

		//printf ("pDepth = %s\n",*pDepth);
		depthRaw = Mat(depthFrame.getVideoMode().getResolutionY(), depthFrame.getVideoMode().getResolutionX(), CV_16UC1, (unsigned char*)pDepth);

		float* pDepthHist = NULL;
		if (pDepthHist == NULL) {
			pDepthHist = new float[MAX_DEPTH_VALUE];
		}
		memset(pDepthHist, 0, MAX_DEPTH_VALUE * sizeof(float));


		int numberOfPoints = 0;
		openni::DepthPixel nValue;

		int totalPixels = depthFrame.getVideoMode().getResolutionY() * depthFrame.getVideoMode().getResolutionX();

		for (int i = 0; i < totalPixels; i ++) {
			nValue = pDepth[i];
			if (nValue != 0) {
				pDepthHist[nValue] ++;
				numberOfPoints ++;
			}
		}

		for (int i = 1; i < MAX_DEPTH_VALUE; i ++) {
			pDepthHist[i] += pDepthHist[i - 1];
		}

		for (int i = 1; i < MAX_DEPTH_VALUE; i ++) {
			if (pDepthHist[i] != 0) {
				pDepthHist[i] = (numberOfPoints - pDepthHist[i]) / (float)numberOfPoints;
			}
		}
		// printf ("pDepthHist = %s\n",pDepthHist);
		int height = depthFrame.getVideoMode().getResolutionY();
		int width = depthFrame.getVideoMode().getResolutionX();
		for (int row = 0; row < height; row++) {
			DepthPixel* depthCell = pDepth + row * width;
			uchar * showcell = (uchar *)depthImg.ptr<uchar>(row);
			for (int col = 0; col < width; col++)
			{
				char depthValue = pDepthHist[*depthCell] * 255;
				*showcell++ = 0;
				*showcell++ = depthValue;
				*showcell++ = depthValue;
				depthCell++;
			}
		}
	}

	// // printf ("irFrame.isValid() = %d\n",irFrame.isValid());
	// if(irFrame.isValid()) {
	// 	int height = irFrame.getVideoMode().getResolutionY();
	// 	int width = irFrame.getVideoMode().getResolutionX();
	// 	const openni::RGB888Pixel* pImageRow = (const openni::RGB888Pixel*)irFrame.getData();
	// 	memcpy(irImg.data, pImageRow, height * width * 3);
	// }

	// // printf ("colorFrame.isValid() = %d\n",colorFrame.isValid());
	// if(colorFrame.isValid()) {
	// 	int height = colorFrame.getVideoMode().getResolutionY();
	// 	int width = colorFrame.getVideoMode().getResolutionX();
	// 	const openni::RGB888Pixel* pImageRow = (const openni::RGB888Pixel*)colorFrame.getData();
	// 	memcpy(colorImg.data, pImageRow, height * width * 3);
	// 	cv::cvtColor(colorImg, colorImg, COLOR_RGB2BGR);
	// }

	return 0;
}

long long current_timestamp() {
    struct timeval te; 
    gettimeofday(&te, NULL); // get current time
    long long milliseconds = te.tv_sec*1000LL + te.tv_usec/1000; // calculate milliseconds
    return milliseconds;
}

void captureImage(int flag, Mat& img, int num)
{
	std::vector<int> png_params;
	png_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
	png_params.push_back(0);    // 无损
	char csName[512];
	char fileName[64] = {0};
	if (flag == 0) {
		strcpy(fileName, "Depth");
	}
	else if (flag == 1){
		strcpy(fileName, "Color");
	}

	else {
		strcpy(fileName, "IR");
	}

	sprintf(csName, "./Capture/%s_%d.png", fileName, num);
	cv::imwrite(csName, img, png_params);
}


int isLiveFace()
{
	for(unsigned int i = 0; i < face_info.size(); i++) 
	{
        face_box& box = face_info[i]; 

        // float x = box.x0 - 9  > 0 ? box.x0 - 9 : 0;
        // float y = box.y0 - 10 > 0 ? box.y0 - 10 :0;
        float x = box.x0  > 0 ? box.x0 : 0;
        float y = box.y0  > 0 ? box.y0 : 0;
        float w = box.x1 - box.x0 > 0 ? box.x1 - box.x0 : box.x0 - box.x1;
        float h = box.y1 - box.y0 > 0 ? box.y1 - box.y0 : box.y0 - box.y1;

        w =  x + w < depthImg.cols ? w : depthImg.cols - x;

        h =  y + h < depthImg.rows ? h : depthImg.rows - y;
        Mat out = depthImg(Rect(x, y, w, h));
        Mat means, stddev, covar;
		meanStdDev(out, means, stddev);
		double stddev_sum = 0;
		double stddev_avg = 0;
		for (int row = 0; row < means.rows; row++)
		{
			stddev_sum = stddev_sum + stddev.at<double>(row);
		}
		stddev_avg = stddev_sum / means.rows;
		stringstream ss,sss;
		ss << "real_" << stddev_avg;
		sss << "unreal_" << stddev_avg;
		int font = cv::FONT_HERSHEY_COMPLEX;
		if (stddev_avg > 58)
		{
			printf("真人!标准差 = %.3f\n", stddev_avg);
			cv::rectangle(depthImg, cv::Point(box.x0 , box.y0 ), cv::Point(box.x1, box.y1 ), cv::Scalar(0, 255, 0), 1);
			cv::putText(depthImg, ss.str(), cvPoint(box.x0, box.y0),font,1.5, cv::Scalar(255, 0, 0),2);
		}
		else
		{
			printf("假人!标准差 = %.3f\n", stddev_avg);
			cv::rectangle(depthImg, cv::Point(box.x0, box.y0), cv::Point(box.x1, box.y1 ), cv::Scalar(0, 255, 0), 1);
			cv::putText(depthImg, sss.str(), cvPoint(box.x0, box.y0), font, 1.5, cv::Scalar(0, 255, 0),2);
		}
    }
}




int waitForFrame(Mat& previewImg)
{
	if(isUvcCamera) {
		waitForColorFrame();
	}

	int is_has_face = do_mtcnn();
	if (is_has_face)
	{
		FramFace(colorImg,0,0,0);		
	}
	else 
		return 0;
	waitForOpenNIFrame();
	static long long lastTimeStamp = 0;
	long long currentTimeStamp = current_timestamp();
	long timepast = currentTimeStamp - lastTimeStamp;
	static int num = 0;
	if (mirrorColor != 0) {
		cv::flip(colorImg, colorImg, 1);
	}

	// printf("timepast = %ld \n",timepast);
	// if (timepast > 100.0) {
	// 	captureImage(0, depthRaw, num);
	// 	captureImage(1, colorImg, num);
	// 	if(irFrame.isValid()) {
	// 		captureImage(2, irImg, num);
	// 	}
	// 	lastTimeStamp = currentTimeStamp;
	// 	num ++;
	// }

	previewImg = colorImg.clone();
	
	
	FramFace(depthImg,1,0,0);
	hconcat(previewImg, depthImg, previewImg);

	int is_live_face = isLiveFace();
	if (is_has_face && is_live_face)
	{
		/* code */
	}
	if(irFrame.isValid()) {
		hconcat(previewImg, irImg, previewImg);
	}
	return 0;
}

int keyboardEvent(int key)
{
	if(key == 27) {
		quit = true;
	}
	return 0;
}

bool checkFile(char * file)
{
    bool r = false;

    if (access(file, 0) != -1)
    {
        cout << "-- find " << file << endl << endl;
        r = true;
    }
    else
    {
        cout << "-- Didn't find " << file << endl << endl;
    }
    cout<<"r   "<<r<<endl;
    return r;
}
int main(int argc, char*argv[])
{
	load_model();
	int camNum = 1;

	if(argc >= 2) {
		sscanf(argv[1], "%d", &openAllStream);
	}

	if(argc >= 3) {
		sscanf(argv[2], "%d", &camNum);
	}

	if(argc >= 4) {
		sscanf(argv[3], "%d", &mirrorColor); 
	}

	Status rc;
	rc = openni::OpenNI::initialize();
	if(rc != STATUS_OK) {
		printf("init failed:%s\n", OpenNI::getExtendedError());
		return 1;
	}

	rc = device.open(ANY_DEVICE);
	if(rc != STATUS_OK) {
		printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
		return 2;
	}

	if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
	{
		rc = depth.create(device, SENSOR_DEPTH);
		if (rc != STATUS_OK)
		{
			printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
			return 3;
		}
	}
  
	rc = depth.start();
	if (rc != STATUS_OK)
	{
		printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
		return 4;
	}

	// printf("isUvcCamera = %d \n",isUvcCamera);
	// printf("openAllStream = %d \n",openAllStream);
	//bool hasOpenNIColor = device.hasSensor(openni::SENSOR_COLOR);
	const SensorInfo* colorSensorInfo = device.getSensorInfo(openni::SENSOR_COLOR);
	if (colorSensorInfo != NULL){

		isUvcCamera = false;
		rc = color.create(device, SENSOR_COLOR);
		if (rc != STATUS_OK)
		{
			printf("Couldn't create ir stream\n%s\n", OpenNI::getExtendedError());
			return 3;
		}

		rc = color.start();
		if (rc != STATUS_OK)
		{
			printf("Couldn't start the ir stream\n%s\n", OpenNI::getExtendedError());
			return 4;
		}
	}

	if (isUvcCamera && openAllStream) {


		if (device.getSensorInfo(SENSOR_IR) != NULL)
		{

			rc = ir.create(device, SENSOR_IR);
			if (rc != STATUS_OK)
			{
				printf("Couldn't create ir stream\n%s\n", OpenNI::getExtendedError());
				return 3;
			}
		}

		rc = ir.start();
		if (rc != STATUS_OK)
		{
			printf("Couldn't start the ir stream\n%s\n", OpenNI::getExtendedError());
			return 4;
		}

	}

	if (isUvcCamera) {


		capture.set(6, CV_FOURCC('M', 'J', 'P', 'G'));
		if (!capture.open(1))
		{
			capture.open(0);
		}

		if (!capture.isOpened())
		{
			return -1;
		}
	} 

	Status result = STATUS_OK;

    cout << " *********** Astra Viewer v1.1*********" << endl << endl;
    cout << " O - RGBD Over display" << endl;
    cout << " R - toggle register" << endl;
    cout << " C - increase the Color proportion (after 'O')" << endl;
    cout << " D - increase the Depth proportion (after 'O')" << endl;
    cout << " Esc - Exit" << endl << endl;

    int REC_WIDTH = 640;
    int REC_HEIGHT = 480;

    if (checkFile(CONFIG_INI))
    {
        int Scale = 2;//GetPrivateProfileInt(TEXT("Resolution"), TEXT("RES"), 2, TEXT(CONFIG_INI));

        if (Scale == 2)
        {
            REC_WIDTH = 640;
            REC_HEIGHT = 480;
        }
        if (Scale == 3)
        {
            REC_WIDTH = 1280;
            REC_HEIGHT = 1024;
        }
    }

    cout<<"REC_WIDTH: "<<REC_WIDTH<<"REC_HEIGHT: "<<REC_HEIGHT<<endl;
    // Mat     cvRGBImg(REC_HEIGHT, REC_WIDTH, CV_8UC3);
    // Mat     cvBGRImg(REC_HEIGHT, REC_WIDTH, CV_8UC3);
    // Mat     cvIRImg(REC_HEIGHT, REC_WIDTH, CV_16UC1);
    // Mat     c24bitIR(REC_HEIGHT, REC_WIDTH, CV_8UC3);
    // Mat     cvDepthImg(REC_HEIGHT, REC_WIDTH, CV_16UC1);
    // Mat     registerMat;

    // Mat     CaliDepthHistogram(REC_HEIGHT, REC_WIDTH, CV_16UC1);
    
   
    bool    rgbMode         = true;
    bool    isCameraParamsExist = true;
    bool    IsOver          = true;
    bool    IsRegister      = false;

    IplImage    IplColor, IplDepth, IplReg;
    char        temp_alpha[32] = {0};
    char        key = 0;
    CvFont      font;
    cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0, 1, CV_AA);
    uint16_t alphaC = 5, alphaD = 5;

    cout<<"----------------1111111111111111-------------------"<<endl;
    // CRGBDCamera camera(REC_WIDTH, REC_HEIGHT);
    // camera.InitializeDevice();
    // camera.OpenDevice();
    // camera.InitializeRGBorIRStream(rgbMode, REC_HEIGHT, REC_WIDTH);
    // camera.StartRGBorIRStream();
    // camera.StartDepthStream();  
    

    // cout<<"------------------222222222222222-----------------"<<endl;
    // isCameraParamsExist = checkFile(CAMERA_PARAMS_INI);
    // cout<<"isCameraParamsExist :  "<<isCameraParamsExist<<endl;
    // if (isCameraParamsExist)
    // {
    //     CameraParameter camera;
    //     camera.loadCameraParams(CAMERA_PARAMS_INI);
    //     CalcALLMat(mat, camera);
    // }


    // float *p;
    // for( p = mat; p < (mat+16) ;p++ )
    // {
    //    printf("%f ",*p);

    // }
    // printf("\n\n");

if (isCameraParamsExist)
     {
        IsRegister = !IsRegister;
        if (device.isImageRegistrationModeSupported(openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR))
    	{
    		printf("ImageRegistrationModeSupported 222222222222222222222222\n");
        	if (IsRegister)
        	{
           		 device.setImageRegistrationMode(openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR);
        	}
       		else
       		{
            device.setImageRegistrationMode(openni::IMAGE_REGISTRATION_OFF);
        	}        
      }
}
	const char* title = "UVC Color";
	cvNamedWindow(title, 1);

	char fpsStr[64] = "30.0";
	while (!quit)
	{

		Mat previewImg(480, 640, CV_8UC3);
		waitForFrame(previewImg);
		IplImage image = previewImg;
		cvShowImage(title, &image);
		usleep(300000);

		int key = cvWaitKey(10);
		if(key >= 0) {
			keyboardEvent(key);
		}

		// num++;
	}

	if (isUvcCamera) {
		capture.release();
	}
	device.close();
	openni::OpenNI::shutdown();

	return 0;
}

先把代码撸上来,注释慢慢加,基本思想是先获得摄像头的ir,rgb图像,对齐,在rgb图像上进行检测,通位置一致,将特征点平移到ir图像上,再通过算法,对ir图像上的点进行活体判别。

猜你喜欢

转载自blog.csdn.net/mhsszm/article/details/89153292