opencv dnn模块调用darknet的dll文件封装与c++接口调用测试完整代码

OPENCV的dll封装

MyVisionDetect.h

#pragma once

/************************************************************************/
/* 以C++接口为基础,对常用函数进行二次封装,方便用户使用                */
/************************************************************************/

#ifndef _MY_VISION_DETECT_H_
#define _MY_VISION_DETECT_H_

#include <fstream>
#include <sstream>
#include <iostream>
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>

//#define SHOW_DEBUG_INFO

using namespace cv;
using namespace dnn;
using namespace std;


typedef struct
{
    
    
	Rect roi;
	string species;
	float confidence;

}boxParameters;

class CMyVisionDetect
{
    
    
    public:
	// CMyCamera();
	CMyVisionDetect()       //B函数体内初始化  
	{
    
    
		string classesFile = "yolov3.names";
		ifstream ifs(classesFile.c_str());
		string line;
		while (getline(ifs, line)) classes.push_back(line);

		// Give the configuration and weight files for the model
		String modelConfiguration = "yolov3.cfg";
		String modelWeights = "yolov3.weights";

		// Load the network
		net = readNetFromDarknet(modelConfiguration, modelWeights);
		net.setPreferableBackend(DNN_BACKEND_OPENCV);
		net.setPreferableTarget(DNN_TARGET_CPU);
		
	}

	~CMyVisionDetect();

    private:
	// Initialize the parameters
	float confThreshold = 0.5; // Confidence threshold
	float nmsThreshold = 0.4;  // Non-maximum suppression threshold
	int inpWidth = 416;  // Width of network's input image
	int inpHeight = 416; // Height of network's input image
	vector<string> classes;
	Net net;
	// Remove the bounding boxes with low confidence using non-maxima suppression
	void postprocess(Mat& frame, const vector<Mat>& out, vector<boxParameters>& boxsResult);

	// Draw the predicted bounding box
	void drawPred(int classId, float conf, int left, int top, int right, int bottom, Mat& frame);

	// Get the names of the output layers
	vector<String> getOutputsNames(const Net& net);
    public:

	void detectPicture(Mat frame, Mat &result, vector<boxParameters>& boxsResult);

};

#endif

MyVisionDetect.cpp

#include"MyVisionDetect.h"
//#define USECOLOR 1
//
--------------------------------------------
//int iPicNum = 0;//Set channel NO.
//LONG nPort = -1;
//HWND hWnd = NULL;
//CMyCamera::CMyCamera(int weight)
//{
    
    
//	m_bIsLogin = FALSE;
//	//	m_lLoginID = -1;
//	m_bIsPlaying = FALSE;
//	m_bIsRecording = FALSE;
//}

CMyVisionDetect::~CMyVisionDetect()
{
    
    


}

void CMyVisionDetect::detectPicture(Mat frame, Mat &result, vector<boxParameters>& boxsResult)
{
    
    
	//string classesFile = "yolov3.names";
	//ifstream ifs(classesFile.c_str());
	//string line;
	//while (getline(ifs, line)) classes.push_back(line);

	 Give the configuration and weight files for the model
	//String modelConfiguration = "yolov3.cfg";
	//String modelWeights = "yolov3.weights";

	 Load the network
	//Net net = readNetFromDarknet(modelConfiguration, modelWeights);
	//net.setPreferableBackend(DNN_BACKEND_OPENCV);
	//net.setPreferableTarget(DNN_TARGET_CPU);

	// Open a video file or an image file or a camera stream.
	string str, outputFile;
	//VideoCapture cap("run.mp4");
	//VideoWriter video;
	Mat blob;
	/*frame = imread("1.jpg");*/


	 Process frames.
	//while (waitKey(1) != 27)
	//{
    
    
	//	// get frame from the video
	//	cap >> frame;

	// Stop the program if reached end of video
	//if (frame.empty()) {
    
    
	//	//waitKey(3000);
	//	return 0;
	//}
	// Create a 4D blob from a frame.

	blobFromImage(frame, blob, 1 / 255.0, cv::Size(inpWidth, inpHeight), Scalar(0, 0, 0), true, false);

	//Sets the input to the network
	net.setInput(blob);


	/*std::vector<String> outNames = net.getUnconnectedOutLayersNames();*/
	// Runs the forward pass to get output of the output layers
	vector<Mat> outs;
	vector<String> names00 = getOutputsNames(net);
	net.forward(outs, names00);

	//保存输出结果
	//vector<boxParameters> boxsResult;

	// Remove the bounding boxes with low confidence
	postprocess(frame, outs, boxsResult);

	// Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
	vector<double> layersTimes;
	double freq = getTickFrequency() / 1000;
	double t = net.getPerfProfile(layersTimes) / freq;
	string label = format("Inference time for a frame : %.2f ms", t);
#ifdef SHOW_DEBUG_INFO
	std::cout << "检测时间:" << label << std::endl;
#endif

	putText(frame, label, Point(0, 15), FONT_HERSHEY_SIMPLEX, 1.5, Scalar(0, 0, 255));

	// Write the frame with the detection boxes
	Mat detectedFrame;
	frame.convertTo(detectedFrame, CV_8U);

#ifdef SHOW_DEBUG_INFO
    // Create a window
	static const string kWinName = "Deep learning object detection in OpenCV";
	namedWindow(kWinName, WINDOW_NORMAL);
	imshow(kWinName, frame);
#endif // SHOW_DEBUG_INFO
	result = frame;
	//return frame;
}

// Remove the bounding boxes with low confidence using non-maxima suppression
void CMyVisionDetect::postprocess(Mat& frame, const vector<Mat>& outs, vector<boxParameters>& boxsResult)
{
    
    
	vector<int> classIds;
	vector<float> confidences;
	vector<Rect> boxes;
#ifdef SHOW_DEBUG_INFO
	std::cout << "检测到的box数:" << outs.size() << std::endl;
#endif
	for (size_t i = 0; i < outs.size(); ++i)
	{
    
    
		// Scan through all the bounding boxes output from the network and keep only the
		// ones with high confidence scores. Assign the box's class label as the class
		// with the highest score for the box.
		float* data = (float*)outs[i].data;
		for (int j = 0; j < outs[i].rows; ++j, data += outs[i].cols)
		{
    
    
			Mat scores = outs[i].row(j).colRange(5, outs[i].cols);
			Point classIdPoint;
			double confidence;
			// Get the value and location of the maximum score
			minMaxLoc(scores, 0, &confidence, 0, &classIdPoint);
			if (confidence > confThreshold)
			{
    
    
				int centerX = (int)(data[0] * frame.cols);
				int centerY = (int)(data[1] * frame.rows);
				int width = (int)(data[2] * frame.cols);
				int height = (int)(data[3] * frame.rows);
				int left = centerX - width / 2;
				int top = centerY - height / 2;

				classIds.push_back(classIdPoint.x);
				confidences.push_back((float)confidence);
				boxes.push_back(Rect(left, top, width, height));
			}
		}
	}
	// Perform non maximum suppression to eliminate redundant overlapping boxes with
	// lower confidences
	vector<int> indices;
	NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices);
#ifdef SHOW_DEBUG_INFO
	std::cout << "符合要求的box数:" << indices.size() << std::endl;
#endif
	for (size_t i = 0; i < indices.size(); ++i)
	{
    
    
		int idx = indices[i];
		Rect box = boxes[idx];
		drawPred(classIds[idx], confidences[idx], box.x, box.y,
			box.x + box.width, box.y + box.height, frame);
		//保存符合条件的box
		boxParameters midBox;
		midBox.confidence = confidences[idx];
		midBox.roi = box;
		midBox.species = classes[classIds[idx]];
		boxsResult.push_back(midBox);
	}
}

// Draw the predicted bounding box
void CMyVisionDetect::drawPred(int classId, float conf, int left, int top, int right, int bottom, Mat& frame)
{
    
    
	//Draw a rectangle displaying the bounding box
	rectangle(frame, Point(left, top), Point(right, bottom), Scalar(255, 178, 50), 3);

	//Get the label for the class name and its confidence
	string label = format("%.2f", conf);
	if (!classes.empty())
	{
    
    
		CV_Assert(classId < (int)classes.size());
		label = classes[classId] + ":" + label;
	}

	//Display the label at the top of the bounding box
	int baseLine;
	Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
	top = max(top, labelSize.height);
	rectangle(frame, Point(left, top - round(1.5*labelSize.height)), Point(left + round(1.5*labelSize.width), top + baseLine), Scalar(255, 255, 255), FILLED);
	putText(frame, label, Point(left, top), FONT_HERSHEY_SIMPLEX, 1.5, Scalar(0, 0, 0), 2);
}

// Get the names of the output layers
vector<String> CMyVisionDetect::getOutputsNames(const Net& net)
{
    
    
	static vector<String> names = {
    
    };
	/*vector<String> names(0);
	vector<String> names1;*/
	if (names.empty())
	{
    
    
		//Get the indices of the output layers, i.e. the layers with unconnected outputs
		vector<int> outLayers = net.getUnconnectedOutLayers();
		//get the names of all the layers in the network
		vector<String> layersNames = net.getLayerNames();

		// Get the names of the output layers in names
		if (outLayers.size() == 0)
		{
    
    
			return names;
		}
		names.resize(outLayers.size());

		for (size_t i = 0; i < outLayers.size(); ++i)
		{
    
    
			//names[i] = layersNames[outLayers[i] - 1];
			names.push_back(layersNames[outLayers[i] - 1]);
		}
	}
	//vector<String> names1;
	//for (size_t i = 0; i < names.size(); ++i)
	//{
    
    
	//	//names[i] = layersNames[outLayers[i] - 1];
	//	names1.push_back(names[i]);
	//}
	return names;
}

visionDetect.h

#pragma once  //避免重复编译
//#ifndef __CDLL_H__
//#define __CDLL_H__
#include"MyVisionDetect.h"
typedef unsigned char  byte;
#ifndef _OUT
#define _OUT
#endif
#ifndef _IN
#define _IN
#endif

struct detectParameter
{
    
    
	uint inputSize;//缓冲区大小
	detectParameter()
	{
    
    
		inputSize = 0;
	}
}; 

struct detectedBox
{
    
    
	int x;
	int y;
	int width;
	int height;
	double confidence;
	string species;
	detectedBox()
	{
    
    
		x = 0;
		y = 0;
		width = 0;
		height = 0;
		confidence = 0;
		species = "";
	}
};
struct detectResult
{
    
    
	byte* resultImage;//输出结果
	uint resultSize;//输出大小
	int boxCount;
	detectedBox boxs[64];
	detectResult()
	{
    
    
		resultImage = NULL;
		resultSize = 0;
		boxCount = 0;
	}
};
extern "C" _declspec(dllexport) int __stdcall MV_SDK_ObjectiveDetect(byte *inputImage, _IN detectParameter input, _OUT detectResult* output);
	//#endif

visionDetect.cpp

#include"visionDetect.h"


CMyVisionDetect* m_pcMyVisionDetect = new CMyVisionDetect();

int __stdcall MV_SDK_ObjectiveDetect(byte *inputImage, _IN detectParameter input, _OUT detectResult* output)
{
    
    
	//解码
	vector<byte> buff;
	for (uint i = 0; i < input.inputSize; i++)
	{
    
    
		buff.push_back(inputImage[i]);
	}
	Mat srcImage = imdecode(buff, IMREAD_COLOR);
	if (srcImage.empty())
	{
    
    
		return -1;
	}
#ifdef SHOW_DEBUG_INFO
	namedWindow("原图", WINDOW_NORMAL);
	imshow("原图", srcImage);
#endif
	Mat result;
	vector<boxParameters> boxsResult;
	m_pcMyVisionDetect->detectPicture(srcImage, result, boxsResult);

#ifdef SHOW_DEBUG_INFO
	std::cout << "最后输出的box数:" << boxsResult.size() << std::endl;
#endif
	output->boxCount = boxsResult.size();
	for (size_t i = 0; i < boxsResult.size(); ++i)
	{
    
    
		//保存符合条件的box
		output->boxs[i].confidence = boxsResult[i].confidence;
		output->boxs[i].species = boxsResult[i].species;
		output->boxs[i].x = boxsResult[i].roi.x;
		output->boxs[i].y = boxsResult[i].roi.y;
		output->boxs[i].width = boxsResult[i].roi.width;
		output->boxs[i].height = boxsResult[i].roi.height;
	}

	//编码
	vector<int> param = vector<int>(2);
	param[0] = IMWRITE_JPEG_QUALITY;
	param[1] = 95;//default(95) 0-100

	vector<byte> inImage;
	imencode(".jpg", result, inImage, param);
	output->resultSize = inImage.size();
	output->resultImage = new byte[output->resultSize];
	for (uint i = 0; i < output->resultSize; i++)
	{
    
    
		output->resultImage[i] = inImage[i];
		//cout << resultImage[i] << endl;
	}

	//解码
	/*vector<byte> buff1;
	for (uint i = 0; i<resultSize; i++)
	{
		buff1.push_back(resultImage[i]);
	}
	Mat show = imdecode(buff1, IMREAD_COLOR);
	namedWindow("结果图", WINDOW_NORMAL);
	imshow("结果图", show);*/

	//delete m_pcMyVisionDetect;
	//m_pcMyVisionDetect = NULL;

	//cv::waitKey(0);

	return 0;
}

c++接口调用测试

#include <fstream>
#include <sstream>
#include <iostream>
#include<Windows.h> 
//#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>

//#define SHOW_DEBUG_INFO

using namespace cv;
//using namespace dnn;
using namespace std;
typedef unsigned char  byte;

#ifndef _OUT
#define _OUT
#endif

#ifndef _IN
#define _IN
#endif

struct detectParameter
{
    
    
	uint inputSize;
	detectParameter()
	{
    
    
		inputSize = 0;
	}
};

struct detectedBox
{
    
    
	int x;
	int y;
	int width;
	int height;
	double confidence;
	string species;

	detectedBox()
	{
    
    
		x = 0;
		y = 0;
		width = 0;
		height = 0;
		confidence = 0;
		species = "";
	}
};

struct detectResult
{
    
    
	byte* resultImage;
	uint resultSize;
	int boxCount;
	detectedBox boxs[64];

	detectResult()
	{
    
    
		resultImage = NULL;
		resultSize = 0;
		boxCount = 0;
	}
};

#pragma comment(lib,"VisionDetect.lib")
extern "C" _declspec(dllimport) int __stdcall MV_SDK_ObjectiveDetect(byte *inputImage, _IN detectParameter input, _OUT detectResult* output);     //   加载模型


int main()
{
    
    
	Mat tstMat = imread("test.jpg");
	// imshow("picture",tstMat);
	namedWindow("原图", WINDOW_NORMAL);
	imshow("原图", tstMat);
	//编码
	vector<byte> inImage;
	vector<int> param = vector<int>(2);
	param[0] = IMWRITE_JPEG_QUALITY;
	param[1] = 95;//default(95) 0-100
	imencode(".jpg", tstMat, inImage, param);
	uint inputSize = inImage.size();
	std::cout << "编码大小:" << inputSize << std::endl;
	byte *inputImage = new byte[inputSize];
	for (uint i = 0; i<inputSize; i++)
	{
    
    
		inputImage[i] = inImage[i];
		//cout << inputSize[i] << endl;
	}
	//byte* resultImage=new byte[900000];
	//uint resultSize;

	detectParameter input;
	detectResult* output=new detectResult();

	input.inputSize = inputSize;
	output->resultImage = new byte[900000];

	DWORD start_time = GetTickCount();//开始计时

	//detect(inputImage,inputSize, resultImage, resultSize);
	MV_SDK_ObjectiveDetect(inputImage, input, output);

	DWORD end_time = GetTickCount();//结束计时
	cout << "The run time is:" << (end_time - start_time) << "ms!" << endl;

	std::cout << "输出box数:" << output->boxCount << std::endl;
	for (int i=0;i<output->boxCount;i++)
	{
    
    
		std::cout << "第" << i+1 << "个:"<<std::endl;
		std::cout << "类别:" << output->boxs[i].species << std::endl;
		std::cout << "置信度:" << output->boxs[i].confidence << std::endl;
		std::cout << "(x,y,width,height)=(" << output->boxs[i].x<<","<< output->boxs[i].y<<","<< output->boxs[i].width<<","<< output->boxs[i].height<< ")"<<std::endl;
	}
	//解码
	std::cout << "解码大小:" << output->resultSize << std::endl;
	vector<byte> buff;
	for (uint i = 0; i<output->resultSize; i++)
	{
    
    
		buff.push_back(output->resultImage[i]);
	}
	Mat show = imdecode(buff, IMREAD_COLOR);
	namedWindow("结果图", WINDOW_NORMAL);
	imshow("结果图", show);

	imwrite("save.jpg",show);

	waitKey();

	system("pause");
	return 0;
}

猜你喜欢

转载自blog.csdn.net/qq_40118285/article/details/126776534
今日推荐