Kinect V2.0得到RGB图像各个像素点对应的三维坐标(相机坐标系)

Kinect2.0 SDK中有函数原型如下的映射机制,可得到1920*1080的RGB图像中各个像素点对应的相机坐标系下的三维坐标

public:
HRESULT MapColorFrameToCameraSpace(
         UINT depthPointCount,
         const UINT16 *depthFrameData,
         UINT cameraPointCount,
         CameraSpacePoint *cameraSpacePoints
)

简单Demo如下

#include <iostream>
#include <opencv2/opencv.hpp>
#include <Kinect.h> 
using namespace std;
using namespace cv;
UINT16 *depthData = new UINT16[424 * 512];//深度图输出为512*424
CameraSpacePoint* m_pColorCoordinates = new CameraSpacePoint[1920 * 1080];//RGB图输出为1920*1080
int main(void)
{
    HANDLE DetectorServiceThreadHandle = CreateThread(NULL, 0, DetectorServiceThread, NULL, 0, NULL);
    IKinectSensor   * mySensor = nullptr;
    GetDefaultKinectSensor(&mySensor);
    mySensor->Open();

    IColorFrameSource   * myColorSource = nullptr;
    mySensor->get_ColorFrameSource(&myColorSource);

    IColorFrameReader   * myColorReader = nullptr;
    myColorSource->OpenReader(&myColorReader);

    int colorHeight = 0, colorWidth = 0;
    IFrameDescription   * myDescription = nullptr;
    myColorSource->get_FrameDescription(&myDescription);
    myDescription->get_Height(&colorHeight);
    myDescription->get_Width(&colorWidth);

    IColorFrame * myColorFrame = nullptr;
    Mat original(colorHeight, colorWidth, CV_8UC4);;
    //**********************ColorFrame**************************

    IDepthFrameSource   * myDepthSource = nullptr;   
    mySensor->get_DepthFrameSource(&myDepthSource);
    int depthheight = 0, depthwidth = 0;
    IFrameDescription   * myDepthDescription = nullptr;  
    myDepthSource->get_FrameDescription(&myDepthDescription);
    myDepthDescription->get_Height(&depthheight);
    myDepthDescription->get_Width(&depthwidth);
    myDepthDescription->Release();

    IDepthFrameReader   * myDepthReader = nullptr;
    myDepthSource->OpenReader(&myDepthReader);    

    IDepthFrame * myDepthFrame = nullptr;
    while1){
    while (myDepthReader->AcquireLatestFrame(&myDepthFrame) != S_OK);
        myDepthFrame->CopyFrameDataToArray(depthheight * depthwidth, depthData);
        Mat depth_show_img = ConvertMat(depthData, 512, 424);
        //用于展示
        equalizeHist(depth_show_img, depth_show_img);
        imshow("Show_Depth", depth_show_img);
        waitKey(50);
        //MAP_OPERATATION
        HRESULT hr = myMapper->MapColorFrameToCameraSpace(512 * 424, depthData, 1920 * 1080, m_pColorCoordinates);
        //设置定点(100,100)
        Point static_p1;
        static_p1.x=100;
        static_p1.y=100;
        float static_p1_X = m_pColorCoordinates[static_p1.y * 1920 + static_p1.x].X;
        float static_p1_Y = m_pColorCoordinates[static_p1.y * 1920 + static_p1.x].Y;
        float static_p1_Z = m_pColorCoordinates[static_p1.y * 1920 + static_p1.x].Z;
        //输出RGB图像中定点(100,100)对应相机坐标系下的三维点(X,Y,Z)
        cout<<"X:"<<static_p1_X<<endl;
        cout<<"Y:"<<static_p1_Y<<endl;
        cout<<"Z:"<<static_p1_Z<<endl;
        myDepthFrame->Release();
        myColorFrame->Release();
    }
    myMapper->Release();
    myDescription->Release();
    myColorReader->Release();
    myColorSource->Release();
    mySensor->Close();
    mySensor->Release();

    return  0;
}

猜你喜欢

转载自blog.csdn.net/l297969586/article/details/78930661