TVM部署TensorFlow模型

本文是对TVM官方关于如何使用TVM编译TensorFlow模型文档的翻译整理,并记录了实现时遇到的小坑。

TVM部署TensorFlow模型

本文介绍如何使用TVM部署TensorFlow模型。
在开始之前,首先需要安装TensorFlow的Python包。

Python及TensorFlow环境

以下例程需要Python3.5以上的环境才能运行,请使用3.5以上Python的版本。
我在使用Python3.6.5与TensorFlow1.14.0运行例程时遇到如下报错:

Traceback (most recent call last):
  File "from_tensorflow.py", line 129, in <module>
    shape=shape_dict)
  File "/home/$USER/local/tvm/python/tvm/relay/frontend/tensorflow.py", line 2413, in from_tensorflow
    mod, params = g.from_tensorflow(graph, layout, shape, outputs)
  File "/home/$USER/local/tvm/python/tvm/relay/frontend/tensorflow.py", line 2058, in from_tensorflow
    op = self._convert_operator(node.op, inputs, attr, graph)
  File "/home/$USER/local/tvm/python/tvm/relay/frontend/tensorflow.py", line 2376, in _convert_operator
    sym = convert_map[op_name](inputs, attrs, self._params)
  File "/home/$USER/local/tvm/python/tvm/relay/frontend/tensorflow.py", line 562, in _impl
    extras={'method': "BILINEAR"})(inputs, attr)
  File "/home/$USER/local/tvm/python/tvm/relay/frontend/tensorflow.py", line 155, in __call__
    return _get_relay_op(op_name)(*inputs, **new_attrs)
TypeError: resize() got an unexpected keyword argument 'half_pixel_centers'

回退TensorFlow版本至1.12.0后报错消失。TVM社区关于本问题的探讨:https://discuss.tvm.ai/t/typeerror-when-running-the-from-tensorflow-example/3046

关于TensorFlow的安装请参考https://www.tensorflow.org/install
此处通过pip进行安装即可:

pip3 install TensorFlow==1.12.0
# 导入 tvm, relay  
import tvm  
from tvm import relay  

# 导入 os and numpy  
import numpy as np  
import os.path  

# 导入 Tensorflow imports  
import tensorflow as tf  

# Tensorflow 效用函数
import tvm.relay.testing.tf as tf_testing  

# 相关文件的在线地址(此处使用了dmlc在GitHub上的数据)  
repo_base = 'https://github.com/dmlc/web-data/raw/master/tensorflow/models/InceptionV1/'  

# 测试用图  
img_name = 'elephant-299.jpg'  
image_url = os.path.join(repo_base, img_name)  

教程

有关TensorFlow的各种模型的更多详细信息,请参阅 docs/frontend/tensorflow.md 。

model_name = 'classify_image_graph_def-with_shapes.pb'  
model_url = os.path.join(repo_base, model_name)  

# 图像标签  
map_proto = 'imagenet_2012_challenge_label_map_proto.pbtxt'  
map_proto_url = os.path.join(repo_base, map_proto)  

# 可读的图像标签  
label_map = 'imagenet_synset_to_human_label_map.txt'  
label_map_url = os.path.join(repo_base, label_map)  

# 目标设置  
# 如果使用cuda,可以使用以下推荐配置。  
#target = 'cuda'  
#target_host = 'llvm'  
#layout = "NCHW"  
#ctx = tvm.gpu(0)  
target = 'llvm'  
target_host = 'llvm'  
layout = None  
ctx = tvm.cpu(0)  

下载所需文件

下列程序将下载上面所列的所需文件

from tvm.contrib.download import download_testdata  

img_path = download_testdata(image_url, img_name, module='data')  
model_path = download_testdata(model_url, model_name, module=['tf', 'InceptionV1'])  
map_proto_path = download_testdata(map_proto_url, map_proto, module='data')  
label_path = download_testdata(label_map_url, label_map, module='data')  

导入模型

从protobuf文件创建TensorFlow图定义

with tf.gfile.FastGFile(model_path, 'rb') as f:  
    graph_def = tf.GraphDef()  
    graph_def.ParseFromString(f.read())  
    graph = tf.import_graph_def(graph_def, name='')  
    # 调用效用函数,将图定义导入默认图。  
    graph_def = tf_testing.ProcessGraphDefParam(graph_def)  
    # 向图增加shape。  
    with tf.Session() as sess:  
        graph_def = tf_testing.AddShapesToGraphDef(sess, 'softmax')  

图像解码

官方注解

TensorFlow前端导入不支持JpegDecode等处理操作,所以我们绕过JpegDecode(只返回源节点)。因此,我们需要向TVM提供已解码的帧。

from PIL import Image  
image = Image.open(img_path).resize((299, 299))  

x = np.array(image)  

向Relay导入图

向Relay前端导入TensorFlow图定义。

运行结果:
sym: relay expr for given tensorflow protobuf. params: params
converted from tensorflow params (tensor protobuf).
shape_dict = {'DecodeJpeg/contents': x.shape}  
dtype_dict = {'DecodeJpeg/contents': 'uint8'}  
mod, params = relay.frontend.from_tensorflow(graph_def,  
                                             layout=layout,  
                                             shape=shape_dict)  

print("Tensorflow protobuf imported to relay frontend.")  

构建Relay

根据给定的输入规范,将图送向LLVM目标编译。

运行结果:
graph: Final graph after compilation. params: final params after
compilation. lib: target library which can be deployed on target
with TVM runtime.
with relay.build_config(opt_level=3):  
    graph, lib, params = relay.build(mod,  
                                     target=target,  
                                     target_host=target_host,  
                                     params=params)  

在TVM上执行编译完成的模型

现在我们可以在目标上部署已编译的模型了。

from tvm.contrib import graph_runtime  
dtype = 'uint8'  
m = graph_runtime.create(graph, lib, ctx)  
# 设置输入  
m.set_input('DecodeJpeg/contents', tvm.nd.array(x.astype(dtype)))  
m.set_input(**params)  
# 执行  
m.run()  
# 获得输出  
tvm_output = m.get_output(0, tvm.nd.empty(((1, 1008)), 'float32'))  

处理输出结果

处理模型的输出使之可读。

predictions = tvm_output.asnumpy()  
predictions = np.squeeze(predictions)  

# 创建节点ID-->英文字符串查找表。  
node_lookup = tf_testing.NodeLookup(label_lookup_path=map_proto_path,  
                                    uid_lookup_path=label_path)  

# 打印前五个预测结果  
top_k = predictions.argsort()[-5:][::-1]  
for node_id in top_k:  
    human_string = node_lookup.id_to_string(node_id)  
    score = predictions[node_id]  
    print('%s (score = %.5f)' % (human_string, score))  

TensorFlow运行结果

在TensorFlow上运行相应模型。

def create_graph():  
    """Creates a graph from saved GraphDef file and returns a saver."""  
    # 从已保存的 graph_def.pb 中生成图.  
    with tf.gfile.FastGFile(model_path, 'rb') as f:  
        graph_def = tf.GraphDef()  
        graph_def.ParseFromString(f.read())  
        graph = tf.import_graph_def(graph_def, name='')  
        # 调用效用函数,将图定义导入默认图。  
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)  

def run_inference_on_image(image):  
    """Runs inference on an image.  

    Parameters  
    ----------  
    image: String  
        Image file name.  

    Returns  
    -------  
        Nothing  
    """  
    if not tf.gfile.Exists(image):  
        tf.logging.fatal('File does not exist %s', image)  
    image_data = tf.gfile.FastGFile(image, 'rb').read()  

    # 从已保存的 GraphDef 中创建图  
    create_graph()  

    with tf.Session() as sess:  
        softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')  
        predictions = sess.run(softmax_tensor,  
                               {'DecodeJpeg/contents:0': image_data})  

        predictions = np.squeeze(predictions)  

        # 创建节点ID-->英文字符串查找表。  
        node_lookup = tf_testing.NodeLookup(label_lookup_path=map_proto_path,  
                                            uid_lookup_path=label_path)  

        # 打印前五个预测结果  
        top_k = predictions.argsort()[-5:][::-1]  
        print ("===== TENSORFLOW RESULTS =======")  
        for node_id in top_k:  
            human_string = node_lookup.id_to_string(node_id)  
            score = predictions[node_id]  
            print('%s (score = %.5f)' % (human_string, score))  

run_inference_on_image(img_path)  

完整代码

以下为完整代码:
建议直接在官方文档页面底部下载完整Python源代码及Jupyter notebook:https://docs.tvm.ai/tutorials/frontend/from_tensorflow.html#sphx-glr-download-tutorials-frontend-from-tensorflow-py

# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile Tensorflow Models
=========================
This article is an introductory tutorial to deploy tensorflow models with TVM.

For us to begin with, tensorflow python module is required to be installed.

Please refer to https://www.tensorflow.org/install
"""

# tvm, relay
import tvm
from tvm import relay

# os and numpy
import numpy as np
import os.path

# Tensorflow imports
import tensorflow as tf

# Tensorflow utility functions
import tvm.relay.testing.tf as tf_testing

# Base location for model related files.
repo_base = 'https://github.com/dmlc/web-data/raw/master/tensorflow/models/InceptionV1/'

# Test image
img_name = 'elephant-299.jpg'
image_url = os.path.join(repo_base, img_name)

######################################################################
# Tutorials
# ---------
# Please refer docs/frontend/tensorflow.md for more details for various models
# from tensorflow.

model_name = 'classify_image_graph_def-with_shapes.pb'
model_url = os.path.join(repo_base, model_name)

# Image label map
map_proto = 'imagenet_2012_challenge_label_map_proto.pbtxt'
map_proto_url = os.path.join(repo_base, map_proto)

# Human readable text for labels
label_map = 'imagenet_synset_to_human_label_map.txt'
label_map_url = os.path.join(repo_base, label_map)

# Target settings
# Use these commented settings to build for cuda.
#target = 'cuda'
#target_host = 'llvm'
#layout = "NCHW"
#ctx = tvm.gpu(0)
target = 'llvm'
target_host = 'llvm'
layout = None
ctx = tvm.cpu(0)

######################################################################
# Download required files
# -----------------------
# Download files listed above.
from tvm.contrib.download import download_testdata

img_path = download_testdata(image_url, img_name, module='data')
model_path = download_testdata(model_url, model_name, module=['tf', 'InceptionV1'])
map_proto_path = download_testdata(map_proto_url, map_proto, module='data')
label_path = download_testdata(label_map_url, label_map, module='data')

######################################################################
# Import model
# ------------
# Creates tensorflow graph definition from protobuf file.

with tf.gfile.FastGFile(model_path, 'rb') as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    graph = tf.import_graph_def(graph_def, name='')
    # Call the utility to import the graph definition into default graph.
    graph_def = tf_testing.ProcessGraphDefParam(graph_def)
    # Add shapes to the graph.
    with tf.Session() as sess:
        graph_def = tf_testing.AddShapesToGraphDef(sess, 'softmax')

######################################################################
# Decode image
# ------------
# .. note::
#
#   tensorflow frontend import doesn't support preprocessing ops like JpegDecode.
#   JpegDecode is bypassed (just return source node).
#   Hence we supply decoded frame to TVM instead.
#

from PIL import Image
image = Image.open(img_path).resize((299, 299))

x = np.array(image)

######################################################################
# Import the graph to Relay
# -------------------------
# Import tensorflow graph definition to relay frontend.
#
# Results:
#   sym: relay expr for given tensorflow protobuf.
#   params: params converted from tensorflow params (tensor protobuf).
shape_dict = {'DecodeJpeg/contents': x.shape}
dtype_dict = {'DecodeJpeg/contents': 'uint8'}
mod, params = relay.frontend.from_tensorflow(graph_def,
                                             layout=layout,
                                             shape=shape_dict)

print("Tensorflow protobuf imported to relay frontend.")
######################################################################
# Relay Build
# -----------
# Compile the graph to llvm target with given input specification.
#
# Results:
#   graph: Final graph after compilation.
#   params: final params after compilation.
#   lib: target library which can be deployed on target with TVM runtime.

with relay.build_config(opt_level=3):
    graph, lib, params = relay.build(mod,
                                     target=target,
                                     target_host=target_host,
                                     params=params)

######################################################################
# Execute the portable graph on TVM
# ---------------------------------
# Now we can try deploying the compiled model on target.

from tvm.contrib import graph_runtime
dtype = 'uint8'
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input('DecodeJpeg/contents', tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
tvm_output = m.get_output(0, tvm.nd.empty(((1, 1008)), 'float32'))

######################################################################
# Process the output
# ------------------
# Process the model output to human readable text for InceptionV1.
predictions = tvm_output.asnumpy()
predictions = np.squeeze(predictions)

# Creates node ID --> English string lookup.
node_lookup = tf_testing.NodeLookup(label_lookup_path=map_proto_path,
                                    uid_lookup_path=label_path)

# Print top 5 predictions from TVM output.
top_k = predictions.argsort()[-5:][::-1]
for node_id in top_k:
    human_string = node_lookup.id_to_string(node_id)
    score = predictions[node_id]
    print('%s (score = %.5f)' % (human_string, score))

######################################################################
# Inference on tensorflow
# -----------------------
# Run the corresponding model on tensorflow

def create_graph():
    """Creates a graph from saved GraphDef file and returns a saver."""
    # Creates graph from saved graph_def.pb.
    with tf.gfile.FastGFile(model_path, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        graph = tf.import_graph_def(graph_def, name='')
        # Call the utility to import the graph definition into default graph.
        graph_def = tf_testing.ProcessGraphDefParam(graph_def)

def run_inference_on_image(image):
    """Runs inference on an image.

    Parameters
    ----------
    image: String
        Image file name.

    Returns
    -------
        Nothing
    """
    if not tf.gfile.Exists(image):
        tf.logging.fatal('File does not exist %s', image)
    image_data = tf.gfile.FastGFile(image, 'rb').read()

    # Creates graph from saved GraphDef.
    create_graph()

    with tf.Session() as sess:
        softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
        predictions = sess.run(softmax_tensor,
                               {'DecodeJpeg/contents:0': image_data})

        predictions = np.squeeze(predictions)

        # Creates node ID --> English string lookup.
        node_lookup = tf_testing.NodeLookup(label_lookup_path=map_proto_path,
                                            uid_lookup_path=label_path)

        # Print top 5 predictions from tensorflow.
        top_k = predictions.argsort()[-5:][::-1]
        print ("===== TENSORFLOW RESULTS =======")
        for node_id in top_k:
            human_string = node_lookup.id_to_string(node_id)
            score = predictions[node_id]
            print('%s (score = %.5f)' % (human_string, score))

run_inference_on_image(img_path)

猜你喜欢

转载自blog.csdn.net/weixin_43953703/article/details/94479191
今日推荐