openpsoe 代码解读(一)

1.存储骨骼关键点信息的结构体。为了找到存储骨骼关键点数组,研究人员使用VS2017在整个项目文件里搜索了poseKeypoints的引用,通过研究发现所有骨骼、脸部、手部坐标数组都定义在这个Datum结构体中。
该结构体定义位于 D:\projects\jack\openpose\include\openpose\core\datum.hpp文件中

struct OP_API Datum
    {
    ...
         Array<float> poseKeypoints; // (x,y,score)
     Array<long long> poseIds;
     Array<float> poseScores;
     Array<float> faceKeypoints;
     std::array<Array<float>, 2> handKeypoints;
    ...
    }

以上都是2D数据,并且 poseKeypoints是以 (x,y,score)形式存储的。
2.并且存储有3D坐标,和相机的内参、外参矩阵。

Array<float> poseKeypoints3D; //(x,y,z,score)
Array<float> faceKeypoints3D; //(x,y,z,score)
std::array<Array<float>, 2> handKeypoints3D;
/**
* 3x4 camera matrix of the camera (equivalent to cameraIntrinsics * cameraExtrinsics).
 */
cv::Mat cameraMatrix;
/**
* 3x4 extrinsic parameters of the camera.
 */
cv::Mat cameraExtrinsics;
/**
* 3x3 intrinsic parameters of the camera.
 */
cv::Mat cameraIntrinsics;

3.将坐标读写到文件里的代码的模板类定义在D:\projects\jack\openpose\include\openpose\filestream\wPeopleJsonSaver.hpp
模板TDatums的派生类WPeopleJsonSaver 内的workConsumer函数里

const std::vector<std::pair<Array<float>, std::string>> keypointVector{
                        // Pose IDs
                        std::make_pair(poseIds, "person_id"),
                        // 2D
                        std::make_pair(tDatumPtr->poseKeypoints, "pose_keypoints_2d"),
                        std::make_pair(tDatumPtr->faceKeypoints, "face_keypoints_2d"),
                        std::make_pair(tDatumPtr->handKeypoints[0], "hand_left_keypoints_2d"),
                        std::make_pair(tDatumPtr->handKeypoints[1], "hand_right_keypoints_2d"),
                        // 3D
                        std::make_pair(tDatumPtr->poseKeypoints3D, "pose_keypoints_3d"),
                        std::make_pair(tDatumPtr->faceKeypoints3D, "face_keypoints_3d"),
                        std::make_pair(tDatumPtr->handKeypoints3D[0], "hand_left_keypoints_3d"),
                        std::make_pair(tDatumPtr->handKeypoints3D[1], "hand_right_keypoints_3d")
                    };
                    // Save keypoints
                    spPeopleJsonSaver->save(
                        keypointVector, tDatumPtr->poseCandidates, fileName, humanReadable);

3.将图片的Mat和探测出的PosePoint融合在一起的代码位C:\Users\jack\Desktop\openpose\src\openpose\gui\guiInfoAdder.cppaddPeopleIds函数里

void addPeopleIds(
        cv::Mat& cvOutputData, const Array<long long>& poseIds, const Array<float>& poseKeypoints,
        const int borderMargin)
    {
        try
        {
            if (!poseIds.empty())
            {
                const auto poseKeypointsArea = poseKeypoints.getSize(1)*poseKeypoints.getSize(2);
                const auto isVisible = 0.05f;
                for (auto i = 0u ; i < poseIds.getVolume() ; i++)
                {
                    if (poseIds[i] > -1)
                    {
                        const auto indexMain = i * poseKeypointsArea;
                        const auto indexSecondary = i * poseKeypointsArea + poseKeypoints.getSize(2);
                        if (poseKeypoints[indexMain+2] > isVisible || poseKeypoints[indexSecondary+2] > isVisible)
                        {
                            const auto xA = positiveIntRound(poseKeypoints[indexMain]);
                            const auto yA = positiveIntRound(poseKeypoints[indexMain+1]);
                            const auto xB = positiveIntRound(poseKeypoints[indexSecondary]);
                            const auto yB = positiveIntRound(poseKeypoints[indexSecondary+1]);
                            int x;
                            int y;
                            if (poseKeypoints[indexMain+2] > isVisible && poseKeypoints[indexSecondary+2] > isVisible)
                            {
                                const auto keypointRatio = positiveIntRound(
                                    0.15f * std::sqrt((xA-xB)*(xA-xB) + (yA-yB)*(yA-yB)));
                                x = xA + 3*keypointRatio;
                                y = yA - 3*keypointRatio;
                            }
                            else if (poseKeypoints[indexMain+2] > isVisible)
                            {
                                x = xA + positiveIntRound(0.25f*borderMargin);
                                y = yA - positiveIntRound(0.25f*borderMargin);
                            }
                            else //if (poseKeypoints[indexSecondary+2] > isVisible)
                            {
                                x = xB + positiveIntRound(0.25f*borderMargin);
                                y = yB - positiveIntRound(0.5f*borderMargin);
                            }
                            putTextOnCvMat(cvOutputData, std::to_string(poseIds[i]), {x, y}, WHITE_SCALAR, false, cvOutputData.cols);
                        }
                    }
                }
            }
        }
        catch (const std::exception& e)
        {
            error(e.what(), __LINE__, __FUNCTION__, __FILE__);
        }
    }

4.实时显示 “处理帧数/秒”,“当前帧数”,"帮助h"等text文本的代码位于C:\Users\jack\Desktop\openpose\src\openpose\gui\guiInfoAdder.cppaddInfo函数

void GuiInfoAdder::addInfo(cv::Mat& cvOutputData, const int numberPeople, const unsigned long long id,
                               const std::string& elementRenderedName, const unsigned long long frameNumber,
                               const Array<long long>& poseIds, const Array<float>& poseKeypoints)
    {
        try
        {
            // Sanity check
            if (cvOutputData.empty())
                error("Wrong input element (empty cvOutputData).", __LINE__, __FUNCTION__, __FILE__);
            // Size
            const auto borderMargin = positiveIntRound(fastMax(cvOutputData.cols, cvOutputData.rows) * 0.025);
            // Update fps
            updateFps(mLastId, mFps, mFpsCounter, mFpsQueue, id, mNumberGpus);
            // Fps or s/gpu
            char charArrayAux[15];
            std::snprintf(charArrayAux, 15, "%4.1f fps", mFps);
            // Recording inverse: sec/gpu
            // std::snprintf(charArrayAux, 15, "%4.2f s/gpu", (mFps != 0. ? mNumberGpus/mFps : 0.));
            putTextOnCvMat(
                cvOutputData, charArrayAux, {positiveIntRound(cvOutputData.cols - borderMargin), borderMargin},
                WHITE_SCALAR, true, cvOutputData.cols);
            // Part to show
            // Allowing some buffer when changing the part to show (if >= 2 GPUs)
            // I.e. one GPU might return a previous part after the other GPU returns the new desired part, it looks
            // like a mini-bug on screen
            // Difference between Titan X (~110 ms) & 1050 Ti (~290ms)
            if (mNumberGpus == 1 || (elementRenderedName != mLastElementRenderedName
                                     && mLastElementRenderedCounter > 4))
            {
                mLastElementRenderedName = elementRenderedName;
                mLastElementRenderedCounter = 0;
            }
            mLastElementRenderedCounter = fastMin(mLastElementRenderedCounter, std::numeric_limits<int>::max() - 5);
            mLastElementRenderedCounter++;
            // Add each person ID
            addPeopleIds(cvOutputData, poseIds, poseKeypoints, borderMargin);
            // OpenPose name as well as help or part to show
            putTextOnCvMat(cvOutputData, "OpenPose - " +
                           (!mLastElementRenderedName.empty() ?
                                mLastElementRenderedName : (mGuiEnabled ? "'h' for help" : "")),
                           {borderMargin, borderMargin}, WHITE_SCALAR, false, cvOutputData.cols);
            // Frame number
            putTextOnCvMat(cvOutputData, "Frame: " + std::to_string(frameNumber),
                           {borderMargin, (int)(cvOutputData.rows - borderMargin)}, WHITE_SCALAR, false, cvOutputData.cols);
            // Number people
            putTextOnCvMat(cvOutputData, "People: " + std::to_string(numberPeople),
                           {(int)(cvOutputData.cols - borderMargin), (int)(cvOutputData.rows - borderMargin)},
                           WHITE_SCALAR, true, cvOutputData.cols);
        }
        catch (const std::exception& e)
        {
            error(e.what(), __LINE__, __FUNCTION__, __FILE__);
        }
    }

6。添加文本的调用的是putTextOnCvMat函数,该函数位于C:\Users\jack\Desktop\openpose\src\openpose\utilities\openCv.cpp下。

void putTextOnCvMat(cv::Mat& cvMat, const std::string& textToDisplay, const Point<int>& position,
                        const cv::Scalar& color, const bool normalizeWidth, const int imageWidth)
    {
        try
        {
            const auto font = cv::FONT_HERSHEY_SIMPLEX;
            const auto ratio = imageWidth/1280.;
            // const auto fontScale = 0.75;
            const auto fontScale = 0.8 * ratio;
            const auto fontThickness = std::max(1, positiveIntRound(2*ratio));
            const auto shadowOffset = std::max(1, positiveIntRound(2*ratio));
            int baseline = 0;
            const auto textSize = cv::getTextSize(textToDisplay, font, fontScale, fontThickness, &baseline);
            const cv::Size finalPosition{position.x - (normalizeWidth ? textSize.width : 0),
                                         position.y + textSize.height/2};
            cv::putText(cvMat, textToDisplay,
                        cv::Size{finalPosition.width + shadowOffset, finalPosition.height + shadowOffset},
                        font, fontScale, cv::Scalar{0,0,0}, fontThickness);
            cv::putText(cvMat, textToDisplay, finalPosition, font, fontScale, color, fontThickness);
        }
        catch (const std::exception& e)
        {
            error(e.what(), __LINE__, __FUNCTION__, __FILE__);
        }
    }

猜你喜欢

转载自blog.csdn.net/u010451780/article/details/111177389
今日推荐