私は以前に記事を書きました:openvinoのPythonバージョンを使用する
引き続き同じモデルを使用します。コードは次のとおりです。
#include<iostream>
#include<opencv2/core.hpp>
#include<opencv2/highgui.hpp>
#include<opencv2/imgproc.hpp>
#include<opencv2/opencv.hpp>
#include<inference_engine.hpp>
#include<ie_extension.h>
#include<ie_blob.h>
using namespace std;
using namespace InferenceEngine;
int main() {
string modepayh ="C:\\ctdet_coco_dlav0_512\\ctdet_coco_dlav0_512.xml";
string imagepath ="C:\\123.jpg";
//read image
auto im=cv::imread(imagepath);
cv::Mat image;
cv::resize(im, image, cv::Size(512, 512));
cout << "image loaded" << endl;
//read net
Core ie;
CNNNetwork network = ie.ReadNetwork(modepayh);
//input info
InputInfo::Ptr input_info = network.getInputsInfo().begin()->second;
std::string input_name = network.getInputsInfo().begin()->first;
input_info->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
input_info->setLayout(Layout::NCHW);
input_info->setPrecision(Precision::U8);
cout << input_name << endl;
//output info
DataPtr output_info = network.getOutputsInfo().begin()->second;
std::string output_name = network.getOutputsInfo().begin()->first;
output_info->setPrecision(Precision::FP32);
//load net
ExecutableNetwork executable_network = ie.LoadNetwork(network, "CPU");
//create infer request
InferRequest infer_request = executable_network.CreateInferRequest();
//load input data
InferenceEngine::TensorDesc tDesc(InferenceEngine::Precision::U8,
{ 1, 3, 512, 512 },
InferenceEngine::Layout::NCHW);
Blob::Ptr imgBlob = InferenceEngine::make_shared_blob<uint8_t>(tDesc, image.data);
infer_request.SetBlob(input_name, imgBlob);
//infer
clock_t time_start = clock();
infer_request.Infer();
//get output
clock_t time_end = clock();
cout << "infer time is:" << 1000 * (time_end - time_start) / (double)CLOCKS_PER_SEC << "ms" << endl;
OutputsDataMap outputsInfo(network.getOutputsInfo());
const float* output = infer_request.GetBlob(output_name)->buffer().as<PrecisionTrait<Precision::FP32>::value_type*>();
for (int i=0;i<5;i++) {
cout << output[i] << endl;
}
return 0;
}
演算結果: