pth转为pb过程记录

pth转为pb过程记录

环境

pip install torch==1.12.1+cu116 torchvision==0.13.1+cu116 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu116
pip install tensorflow==2.11.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install onnx==1.14.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install onnx-tf==1.10.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install protobuf==3.20.2 -i https://pypi.tuna.tsinghua.edu.cn/simple

过程

现将pth转为onnx,再转为pb

import torch
import onnx
from onnx_tf.backend import prepare

input_tensor = torch.randn(1, 3, 32, 32)

###########2. 加载pytorch模型
model_path = "./vgg16_cifar.pth"
model = torch.load(model_path)
model = model.to('cpu')
model.eval()

# ###########3. 先转换为onnx
export_onnx_file = "./vgg16_cifar.onnx"	#输出文件
#
input_names = ["input"]
output_names = ["output"]
#设置动态维度,此处指明input节点的第0维度可变,命名为batch_size
dynamic_axes = {
    
    'input': {
    
    0: 'batch_size'}, 'output': {
    
    0: 'batch_size'}}

torch.onnx.export(model,            # 要导出的模型
                  input_tensor,     # 模型输入
                  export_onnx_file, # 返回文件
                  verbose=True,     # 打印正在导出到标准输出的模型的说明
                  opset_version=12, #
                  input_names=input_names,      # 按顺序分配给图的输入节点的名称
                  output_names=output_names,    # 按顺序分配给图的输出节点的名称
                  dynamic_axes=dynamic_axes,    # 设置动态维度
                  keep_initializers_as_inputs=True  # 导出图中的所有初始化程序(通常对应于参数)也将作为输入添加到图中
                  )


###########4. onnx转换为pb
save_path = "./vgg16_cifar.pb"
onnx_model = onnx.load(export_onnx_file)  # load onnx model
# 修改:将输入张量格式从NCHW转换为NHWC
# tf_rep = prepare(onnx_model, strict=True, input_shapes={"input": (1, 32, 32, 3)})
tf_rep = prepare(onnx_model)
tf_rep.export_graph(save_path)

检查模型转换效果

import torch
import torch.onnx
from PIL import Image
import torchvision.transforms as transforms
import onnxruntime as rt
import numpy as np
import tensorflow as tf

physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)

######################################################################################################################
# 采用torch进行预测
def pth_predict(image, pth_path):
    # 读取模型
    model = torch.load(pth_path)  # 并不能像tf一样直接导入,同一级目录下需要有模型的定义文件
    # print(model)

    # 单张图片推理
    model.cpu().eval()  # .eval()用于通知BN层和dropout层,采用推理模式而不是训练模式
    parameters = model.parameters()
    for p in parameters:
        numpy_para = p.detach().cpu().numpy()
        print(type(numpy_para), numpy_para.shape)

    with torch.no_grad():  # torch.no_grad()用于整体修改模型中每一层的requires_grad属性,使得所有可训练参数不能修改,且正向计算时不保存中间过程,以节省内存
        output = torch.squeeze(model(image))
        predict = torch.softmax(output, dim=0)
        predict_cla = torch.argmax(predict).numpy()

    # 输出结果
    print('Pth Predicted:', predict.numpy())
    # return predict.numpy()
    return output.numpy()


# 采用onnx进行预测
def onnx_predict(image, onnx_path):
    def softmax(x, axis=None):
        x = x - x.max(axis=axis, keepdims=True)
        y = np.exp(x)
        return y / y.sum(axis=axis, keepdims=True)

    # 读取onnx模型,安装GPUonnx,并设置providers = ['GPUExecutionProvider'],可以实现GPU运行onnx
    providers = ['CPUExecutionProvider']
    m = rt.InferenceSession(onnx_path, providers=providers)

    # 推理onnx模型
    output_names = ["output"]
    onnx_pred = m.run(output_names, {
    
    "input": image})

    # 输出结果
    # print('ONNX Predicted:', softmax(onnx_pred[0][0]))
    # return softmax(onnx_pred[0][0])
    return onnx_pred[0]

# 采用pb进行预测
def tf_predict(image, pb_path):
    with tf.device('cpu'):
        # 加载 TensorFlow 模型
        tf_model = tf.saved_model.load(pb_path)
        tf_model = tf_model.signatures['serving_default']
        # 将输入张量转换为 TensorFlow 张量
        input_tensor = tf.convert_to_tensor(image)
        tf_output = tf_model(input_tensor)['output'].numpy()
        # tf_output = tf.nn.softmax(tf_output, dim=0)

    return tf_output


# 获取图片
def get_image(image_path):
    # 等比例拉伸图片,多余部分填充value
    def resize_padding(image, target_length, value=0):
        h, w = image.size  # 获得原始尺寸
        ih, iw = target_length, target_length  # 获得目标尺寸
        scale = min(iw / w, ih / h)  # 实际拉伸比例
        nw, nh = int(scale * w), int(scale * h)  # 实际拉伸后的尺寸
        image_resized = image.resize((nh, nw), Image.ANTIALIAS)  # 实际拉伸图片
        image_paded = Image.new("RGB", (ih, iw), value)
        dw, dh = (iw - nw) // 2, (ih - nh) // 2
        image_paded.paste(image_resized, (dh, dw, nh + dh, nw + dw))  # 居中填充图片
        return image_paded

    # 变换函数
    transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.5], std=[0.5])])

    # 读取图片并预处理
    image = resize_padding(Image.open(image_path), 32)
    image = transform(image)
    image = image.reshape(1, 3, 32, 32)

    return image

if __name__ == '__main__':
    pth_path = "vgg16_cifar.pth"
    onnx_path = "vgg16_cifar.onnx"
    pb_path = "vgg16_cifar_tf/"
    image_path = "1.jpg"

    image = get_image(image_path)
    image_numpy = image.numpy()

    pth_result = pth_predict(image, pth_path)
    onnx_result = onnx_predict(image_numpy, onnx_path)
    tf_result = tf_predict(image_numpy, pb_path)
    print(pth_result)
    print(onnx_result)
    print(tf_result)

猜你喜欢

转载自blog.csdn.net/weixin_39379635/article/details/131131076
今日推荐