ubuntu下 Qt+OpenCV4.4测试YOLOV4目标检测模型

1、在github下载opencv4.4.tar.gz+opencv_contrib-4.4.0.tar.gz,然后解压,把opencv_contrib-4.4.0放入到oepncv4.4.0文件夹中。

2、在opencv4.4根文件目录下创建build目录,这里编译opencv4.4同时编译cuda模块(需要装好cuda和cudnn),cmake的选项如下。

cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D WITH_TBB=ON -D BUILD_NEW_PYTHON_SUPPORT=ON -D WITH_V4L=ON -D INSTALL_C_EXAMPLES=ON -D INSTALL_PYTHON_EXAMPLES=ON -D INSTALL_C_EXAMPLES=OFF -D OPENCV_EXTRA_MODULES_PATH=../opencv_contrib-4.4.0/modules -D BUILD_EXAMPLES=ON -D WITH_TBB=ON -D WITH_V4L=ON -D WITH_QT=ON -D WITH_OPENGL=ON -D ENABLE_FAST_MATH=1 -D CUDA_FAST_MATH=1 -D CUDA_NVCC_FLAGS="-D_FORCE_INLINES" 	-D WITH_CUBLAS=1 -D BUILD_TIFF=ON ..

3、make -j4编译

4、sudo make install 安装,默认安装到/usr/local/目录下

5、配置opencv库环境,使得Qt能找得到库

5.1 在/etc/ld.so.conf.d/目录下建立opencv.conf文件,即

sudo gedit /etc/ld.so.conf.d/opencv.conf  

加入

/usr/local/lib

使配置文件生效

sudo ldconfig

5.2 添加环境变量PKG_CONFIG_PATH到/etc/profile

 sudo vim /etc/profile

 尾部加入如下内容

export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig

source一下profile文件使之生效

source /etc/profile

6、https://github.com/AlexeyAB/darknet/wiki/YOLOv4-model-zoo下载cfg和weights文件,需要梯子,没有梯子会很慢,也可以到百度云盘下载整个项目:链接: https://pan.baidu.com/s/1K1KGeVhA669SIhtZUQqQNw  密码: ts0e

7、新建Qt项目,.pro文件中加入如下的链接库指令

INCLUDEPATH += /usr/local/include/opencv4
LIBS += /usr/local/lib/libopencv*.so

最终测试代码如下

#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <fstream>
#include <iostream>

int main()
{
    std::ifstream classNamesFile("../model/OpenCV4YOLO4/coco.names");

    std::vector<std::string> classNamesVec;
    if (classNamesFile.is_open())
    {
        std::string className = "";
        while (std::getline(classNamesFile, className))
            classNamesVec.push_back(className);
    }
    for (int i = 0; i < classNamesVec.size(); i++) {
        std::cout << i + 1 << "\t" << classNamesVec[i].c_str() << std::endl;
    }

    cv::String cfg = "../model/OpenCV4YOLO4/yolov4.cfg";
    cv::String weight = "../model/OpenCV4YOLO4/yolov4.weights";

    cv::dnn::Net net = cv::dnn::readNetFromDarknet(cfg, weight);

    int currFrame = 0;

    cv::VideoCapture cap;
    cap.open("../video/Walking_Shibuya_Crossing_at_Night_Binaural_City.mp4");
    //    cv::VideoCapture cap(0);//open camera
        cap.set(cv::CAP_PROP_FOURCC, VideoWriter::fourcc('M', 'J', 'P', 'G'));

    cv::Mat frame;
    //Mat frame = imread("./image/1.jpg"); //read image
    //imshow("img", frame);
    while (true)
    {
        std::cout<<"read "<<currFrame<<" frame"<<std::endl;
        ++currFrame;
        cap >> frame;
        if (frame.empty())
        {
            break;
        }

        cv::Mat inputBlob = cv::dnn::blobFromImage(frame, 1.0 / 255, cv::Size(608, 608), cv::Scalar());
        net.setInput(inputBlob);

        //
        std::vector<cv::String> outNames = net.getUnconnectedOutLayersNames();
        for (int i = 0; i < outNames.size(); i++) {
            std::cout << "output layer name : " << outNames[i].c_str() << std::endl;
        }

        std::vector<cv::Mat> outs;
        net.forward(outs, outNames);

        float* data;
        cv::Mat scores;

        std::vector<cv::Rect> boxes;
        std::vector<int> classIds;
        std::vector<float> confidences;
        int centerX, centerY, width, height, left, top;

        float confidenceThreshold = 0.2;
        double confidence;

        cv::Point classIdPoint;

        //
        for (size_t i = 0; i<outs.size(); ++i){
            data = (float*)outs[i].data;
            for (int j = 0; j < outs[i].rows; ++j, data += outs[i].cols){
                scores = outs[i].row(j).colRange(5, outs[i].cols);
                minMaxLoc(scores, 0, &confidence, 0, &classIdPoint);
                if (confidence > confidenceThreshold){
                    centerX = (int)(data[0] * frame.cols);
                    centerY = (int)(data[1] * frame.rows);
                    width = (int)(data[2] * frame.cols);
                    height = (int)(data[3] * frame.rows);
                    left = centerX - width / 2;
                    top = centerY - height / 2;

                    classIds.push_back(classIdPoint.x);
                    confidences.push_back((float)confidence);
                    boxes.push_back(cv::Rect(left, top, width, height));
                }
            }
        }

        std::vector<int> indices;
        cv::dnn::NMSBoxes(boxes, confidences, 0.3, 0.2, indices);

        cv::Scalar rectColor, textColor; //
        cv::Rect box, textBox;
        int idx;
        cv::String className;
        cv::Size labelSize;

        for (size_t i = 0; i < indices.size(); ++i) {
            idx = indices[i];
            className = classNamesVec[classIds[idx]];

            labelSize = getTextSize(className, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, 0);
            box = boxes[idx];
            textBox = cv::Rect(cv::Point(box.x - 1, box.y),
                           cv::Point(box.x + labelSize.width, box.y - labelSize.height));
            rectColor = cv::Scalar(idx * 11 % 256, idx * 22 % 256, idx * 33 % 256);
            textColor = cv::Scalar(255 - idx * 11 % 256, 255 - idx * 22 % 256, 255 - idx * 33 % 256);

            rectangle(frame, box, rectColor, 2, 8, 0);
            rectangle(frame, textBox, rectColor, -1, 8, 0);
            putText(frame, className.c_str(), cv::Point(box.x, box.y - 2), cv::FONT_HERSHEY_SIMPLEX, 0.5, textColor, 1, 8);
        }

        cv::imshow("OpenCV - YOLO", frame);
        cv::waitKey(1);
        //        if (waitKey(33) >= 0) break;
    }

    return 0;
}

检测结果:

参考:

猜你喜欢

转载自blog.csdn.net/sunlin972913894/article/details/108168380