onnx、openvino和mnn推理速度对比

onnx、openvino和mnn都可以作为cpu推理的框架,推理速度这里进行对比一下

模型使用的python版openvino使用这篇 文章onnx文件

注意:这里使用的mnn和onnx文件均未量化等操作,是直接转换而来的,都是直接进行读取的

对比代码如下:

from openvino.inference_engine import IECore
import onnxruntime
import MNN
import numpy as np
import cv2
import time

model="ctdet_coco_dlav0_512.onnx"
mnnmodel="ctdet_coco_dlav0_512.mnn"

#openvino
ie = IECore()
net = ie.read_network(model=model)
input_blob = next(iter(net.input_info))
out_blob = next(iter(net.outputs))
net.batch_size=1#batchsize
n, c, h, w = net.input_info[input_blob].input_data.shape
print(n, c, h, w)
images = np.ndarray(shape=(n, c, h, w))
for i in range(n):
        image = cv2.imread("123.jpg")
        if image.shape[:-1] != (h, w):
            image = cv2.resize(image, (w, h))
        image = image.transpose((2, 0, 1))
        images[i] = image
images=images.astype(np.float32)
exec_net = ie.load_network(network=net, device_name="CPU")

#onnxruntime
session = onnxruntime.InferenceSession(model)
inputs = {session.get_inputs()[0].name: images.astype(np.float32)}

#mnn
interpreter = MNN.Interpreter(mnnmodel)
mnn_session = interpreter.createSession()
input_tensor = interpreter.getSessionInput(mnn_session)
tmp_input = MNN.Tensor((1, 3, 512, 512),\
MNN.Halide_Type_Float, images[0], MNN.Tensor_DimensionType_Tensorflow)  
input_tensor.copyFrom(tmp_input)

#onnxruntime infer
start=time.time()
out=session.run(None, inputs)
print('onnxruntime infer total time is %.4f s'%(time.time()-start))
#openvino infer
start=time.time()
res = exec_net.infer(inputs={input_blob: images})
#print(res)
print('openvino infer total time is %.4f s'%(time.time()-start))
#mnn infer
start=time.time()
interpreter.runSession(mnn_session)
print('mnn infer total time is %.4f s'%(time.time()-start))

运行结果:

结论:可以看到openvino确实优化速度很多,提升大于30%,mnn速度提升不明显 

猜你喜欢

转载自blog.csdn.net/zhou_438/article/details/112860138