【代码】openvino api 推理代码及将onnx转.bin和.xml

参考文档:openvino api

# coding:UTF-8
import time
import cv2
import numpy as np

from openvino.runtime import Core


def openvino_inference(model_xml, model_bin, imgpath):
    
    # 初始化推理引擎并显示信息
    ie = Core()
    for device in ie.available_devices:
        device_name = ie.get_property(device_name=device, name="FULL_DEVICE_NAME")
        print(f"{
      
      device}: {
      
      device_name}")
     
    # 加载模型
    model = ie.read_model(model=model_xml, weights=model_bin)
    compiled_model = ie.compile_model(model=model, device_name="CPU")
    
    # 模型输入
    input_layer = next(iter(compiled_model.inputs)) # 获取第一个输入层的名称
    # 打印输入布局、精度和形状
    print(f"input precision: {
      
      input_layer.element_type}")
    print(f"input shape: {
      
      input_layer.shape}") # {n, c, h, w}
    
    # 模型输出
    output_layer = next(iter(compiled_model.outputs))
    print(f"output precision: {
      
      output_layer.element_type}")
    print(f"output shape: {
      
      output_layer.shape}")
    
   
    # exec_net = ie.load_network(network=net, device_name="CPU")
    # 数据预处理部分,根据不同的模型进行改变
    cvimg = cv2.imread(imgpath)   # [112,112,3]
    # resize image to [128, 128]
    resized = cv2.resize(cvimg, (128, 128))

    # center crop image
    a = int((128 - 112) / 2)  # x start
    b = int((128 - 112) / 2 + 112)  # x end
    c = int((128 - 112) / 2)  # y start
    d = int((128 - 112) / 2 + 112)  # y end
    ccropped = resized[a:b, c:d]  # center crop the image
    ccropped = ccropped[..., ::-1]  # BGR to RGB

    image_mean = np.array([127, 127, 127])
    ccropped = (ccropped - image_mean) / 128
    image = np.transpose(ccropped, [2, 0, 1])   # [112,112,3] -> [3,112,112]
    image = np.expand_dims(image, axis=0)    #  [N, 3,112,112]
    input_data = image.astype(np.float32)

    # 模型推理
    result = compiled_model([input_data])[output_layer]
    
    return result



imgpath = '0.jpg'

model_xml = '.xml的路径'
model_bin = '.bin的路径'

openvino_feat = openvino_inference(model_xml, model_bin, imgpath)

# print(openvino_feat)
print(openvino_feat.shape)

#-----------------------
#   onnx模型导出到IR
# ----------------------
from openvino.offline_transformations import serialize
ie = Core()
onnx_model_path = "要转为.bin和.xml格式的.onnx路径"
model_onnx = ie.read_model(model=onnx_model_path)
# compiled_model_onnx = ie.compile_model(model=model_onnx, device_name="CPU")
serialize(model=model_onnx, model_path="model/exported_onnx_model.xml", weights_path="model/exported_onnx_model.bin")

猜你喜欢

转载自blog.csdn.net/weixin_45392674/article/details/126468232