onnx 推理语义分割代码

onnx 推理语义分割代码

import os
import onnxruntime
import numpy as np
import cv2

img_path = './jianzhu.tif'
#device = 'gpu'
device = 'cpu'

def load_model():
    weight_path = "./weights/jianzhu.onnx"
    onnx_model = weight_path
    sess = onnxruntime.InferenceSession(onnx_model, providers=["CPUExecutionProvider"])  # cpu
    sess_gpu = onnxruntime.InferenceSession(onnx_model, providers=["CUDAExecutionProvider"])  # gpu
    return sess_gpu, sess

sess_gpu, sess_cpu = load_model()
if device == 'cpu':
    in_name = [input.name for input in sess_cpu.get_inputs()][0]
    out_name = [output.name for output in sess_cpu.get_outputs()]
    model = {
    
    'predict': sess_cpu, 'in_name': in_name, 'out_name': out_name}
else:
    in_name = [input.name for input in sess_gpu.get_inputs()][0]
    out_name = [output.name for output in sess_gpu.get_outputs()]
    model = {
    
    'predict': sess_gpu, 'in_name': in_name, 'out_name': out_name}

img = cv2.imread(img_path)

img_mean = [92.92403, 96.31168, 91.85767]
img_std = [44.074368, 44.840057, 51.984196]
img = (img - np.array(img_mean).astype(np.float32)) / np.array(img_std).astype(np.float32)
img = np.transpose(img, [2, 0, 1])
img = np.expand_dims(img, axis=0)

output_img = model['predict'].run(model['out_name'], {
    
    model['in_name']: img})[0]
output_img = np.asarray(output_img)
output_img = output_img.squeeze(axis=0)
pred = output_img.squeeze()
pred = pred * 255
pred = pred.astype(np.uint8)
cv2.imwrite("./jianzhu_out.tif",pred)
print("output_img:",pred.shape)

猜你喜欢

转载自blog.csdn.net/weixin_38353277/article/details/129031002