ONNX获取每层输出值
import onnxruntime
import collections
import numpy as np
import copy
import cv2
model = onnx.load('model.onnx')
img = cv2.imread("test.jpg")[None]
ori_output = copy.deepcopy(model.graph.output)
for node in model.graph.node:
for output in node.output:
model.graph.output.extend([onnx.ValueInfoProto(name=output)])
ort_session = onnxruntime.InferenceSession(model.SerializeToString())
ort_inputs = {
ort_session.get_inputs()[0].name:inputs}
ort_outs = ort_session.run(None, ort_inputs)
outputs = [x.name for x in ort_session.get_outputs()]
ort_outs = collections.OrderedDict(zip(outputs, ort_outs))
for name, output in ort_outs.items():
print(name, output)
Pytorch获取每层输出值
import torch
import torch.utils.hooks as hooks
from torchvison.models.resnet import resnet18
def get_output_param(module, datasets):
output_param = {
}
def hook(module, input, output):
name = list(module._forward_hooks.keys())[0]
output_param[name] = output.data
for name, layer in module.named_modules():
if name != "":
handle = hooks.RemovableHandle(layer._forward_hooks)
layer._forward_hooks[f"{
name}"] = hook
module(datasets)
return output_param
TensorFlow2.x获取每层输出值
for data in datasets:
for layer in tf_net.layers:
temp_tf_net = tf.keras.models.Model(inputs=tf_net.input, outputs=layer.output)
per_layer_output = temp_tf_net(data)
output_parameters[layer.name] = output
TFLite 获取每层输出
import flatbuffers
from tensorflow.lite.python import schema_py_generated as schema_fb
def OutputsOffset(subgraph, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(subgraph._tab.Offset(8))
if o != 0:
a = subgraph._tab.Vector(o)
return a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)
return 0
#reference: https://github.com/raymond-li/tflite_tensor_outputter/blob/master/tflite_tensor_outputter.py
def buffer_change_output_tensor_to(model_buffer, new_tensor_i):
root = schema_fb.Model.GetRootAsModel(model_buffer, 0)
output_tensor_index_offset = OutputsOffset(root.Subgraphs(0), 0)
# Flatbuffer scalars are stored in little-endian.
new_tensor_i_bytes = bytes([new_tensor_i & 0x000000FF, (new_tensor_i & 0x0000FF00) >> 8, (new_tensor_i & 0x00FF0000) >> 16, (new_tensor_i & 0xFF000000) >> 24])
# Replace the 4 bytes corresponding to the first output tensor index
return model_buffer[:output_tensor_index_offset] + new_tensor_i_bytes + model_buffer[output_tensor_index_offset + 4:]
# Read the model.
with open('net.tflite', 'rb') as f:
model_buffer = f.read()
idx = 20
model_buffer = buffer_change_output_tensor_to(model_buffer, idx)
interpreter = tf.lite.Interpreter(model_content=model_buffer)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
interpreter.set_tensor(input_index, input_data)
interpreter.invoke()
out_val = interpreter.get_tensor(output_index)