【Tensorflow_C++_API_2】 加载预先训练网络进行图像识别(label_image)

这个例子展示了如何加载预先训练 tensorflow 网络并使用它来识别图像中的对象。源代码在 `tensorflow/examples/label_image` 目录下。

使用默认的图片 Admiral Grace Hopper,使用 Google Inception 模型对在命令行中传递的图像文件进行分类。

首先将包含模型定义和权重的 TensorFlow `GraphDef` 文件下载到 `tensorflow/examples/label_image/data/` 目录中。

#include <fstream>
#include <vector>
 
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/command_line_flags.h"
 
using tensorflow::Flag;
using tensorflow::Tensor;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::int32;
 
// Takes a file name, and loads a list of labels from it, one per line, and
// returns a vector of the strings. It pads with empty strings so the length
// of the result is a multiple of 16, because our model expects that.
// 获取一个文件名,并加载一个标签列表,每行一个,并返回一个字符串的向量。
// 用空字符串填充,按照模型的预期,保证结果的长度是16的倍数。
Status ReadLabelsFile(string file_name, std::vector<string>* result,
                      size_t* found_label_count) {
  std::ifstream file(file_name);
  if (!file) {
    return tensorflow::errors::NotFound("Labels file ", file_name,
                                        " not found.");
  }
  result->clear();
  string line;
  while (std::getline(file, line)) {
    result->push_back(line);
  }
  *found_label_count = result->size();
  const int padding = 16;
  while (result->size() % padding) {
    result->emplace_back();
  }
  return Status::OK();
}
 
// Given an image file name, read in the data, try to decode it as an image,
// resize it to the requested size, and then scale the values as desired.
// 给定一个图像文件名,读取数据,尝试将其解码为图像,将其重新调整为请求的大小,然后根据需要缩放。
Status ReadTensorFromImageFile(string file_name, const int input_height,
                               const int input_width, const float input_mean,
                               const float input_std,
                               std::vector<Tensor>* out_tensors) {
  // Scope 表示一组相关的 TensorFlow 操作,具有相同的属性,如通用名称前缀。具体见 tensorflow/cc/framework/scope.h
  // NewRootScope() 创建了一个 root scope ,包括创建了一个新的 graph ,一个新的状态和名称映射。
  // A root scope is created by calling the Scope::NewRootScope function, which creates a new graph, a new status and the name maps.  
  auto root = tensorflow::Scope::NewRootScope();
  using namespace ::tensorflow::ops;  // NOLINT(build/namespaces)
 
  string input_name = "file_reader";
  string output_name = "normalized";
  auto file_reader = tensorflow::ops::ReadFile(root.WithOpName(input_name),
                                               file_name);
  // Now try to figure out what kind of file it is and decode it.
  // 尝试找出它是什么样的文件,并解码它。
  const int wanted_channels = 3;
  tensorflow::Output image_reader;
  if (tensorflow::StringPiece(file_name).ends_with(".png")) {
    image_reader = DecodePng(root.WithOpName("png_reader"), file_reader,
                             DecodePng::Channels(wanted_channels));
  } else if (tensorflow::StringPiece(file_name).ends_with(".gif")) {
    image_reader = DecodeGif(root.WithOpName("gif_reader"), file_reader);
  } else {
    // Assume if it's neither a PNG nor a GIF then it must be a JPEG.
    image_reader = DecodeJpeg(root.WithOpName("jpeg_reader"), file_reader,
                              DecodeJpeg::Channels(wanted_channels));
  }
  
  // Now cast the image data to float so we can do normal math on it.
  // 将图像数据转化为 float ,以便我们可以对其进行正常的数学运算。
  auto float_caster =
      Cast(root.WithOpName("float_caster"), image_reader, tensorflow::DT_FLOAT);
  // The convention for image ops in TensorFlow is that all images are expected
  // to be in batches, so that they're four-dimensional arrays with indices of
  // [batch, height, width, channel]. Because we only have a single image, we
  // have to add a batch dimension of 1 to the start with ExpandDims().
  // TensorFlow 中的图像操作按惯例是所有图像都被批量化,这样它们都是具有 [batch,height,width,channel] 的索引的四维数组。 
  // 因为在这个例子中只有一个图像,所以我们开始时需要使用 ExpandDims() 添加一个维度为 1 的 batch。
  auto dims_expander = ExpandDims(root, float_caster, 0);
  // Bilinearly resize the image to fit the required dimensions.
  // 双向调整图像大小以适应所需的尺寸。
  auto resized = ResizeBilinear(
      root, dims_expander,
      Const(root.WithOpName("size"), {input_height, input_width}));
  // Subtract the mean and divide by the scale.
  // 减去平均值,除以比例。
  Div(root.WithOpName(output_name), Sub(root, resized, {input_mean}),
      {input_std});
 
  // This runs the GraphDef network definition that we've just constructed, and
  // returns the results in the output tensor.
  // 这将运行我们刚刚构造的 GraphDef 网络定义,并将结果返回到输出张量中。
  
  // 用 ToGraphDef() 函数将 Graph 转换为 GraphDef。
  tensorflow::GraphDef graph;
  TF_RETURN_IF_ERROR(root.ToGraphDef(&graph));
 
  // 创建一个 Session 对象,它是真正用来运行图的接口,并且运行它,
  // 同时指定我们从哪个节点得到输出结果以及输出数据存放在哪儿。
  std::unique_ptr<tensorflow::Session> session(
      tensorflow::NewSession(tensorflow::SessionOptions()));
  TF_RETURN_IF_ERROR(session->Create(graph));
  TF_RETURN_IF_ERROR(session->Run({}, {output_name}, {}, out_tensors));
  return Status::OK();
}
 
// Reads a model graph definition from disk, and creates a session object you
// can use to run it.
// 从磁盘读取模型图定义,并创建可用于运行它的会话对象。
Status LoadGraph(string graph_file_name,
                 std::unique_ptr<tensorflow::Session>* session) {
  tensorflow::GraphDef graph_def;
  // 直接加载包含 GraphDef 的 protobuf 文件
  Status load_graph_status =
      ReadBinaryProto(tensorflow::Env::Default(), graph_file_name, &graph_def);
  if (!load_graph_status.ok()) {
    return tensorflow::errors::NotFound("Failed to load compute graph at '",
                                        graph_file_name, "'");
  }
  // 从 GraphDef 创建一个 Session 对象,将它传回给调用者以便后续调用执行。
  session->reset(tensorflow::NewSession(tensorflow::SessionOptions()));
  Status session_create_status = (*session)->Create(graph_def);
  if (!session_create_status.ok()) {
    return session_create_status;
  }
  return Status::OK();
}
 
// Analyzes the output of the Inception graph to retrieve the highest scores and
// their positions in the tensor, which correspond to categories.
// 分析 Inception graph 的输出,以检索最高 scores 及其在张量中的位置,这些与类别相对应。
Status GetTopLabels(const std::vector<Tensor>& outputs, int how_many_labels,
                    Tensor* indices, Tensor* scores) {
  auto root = tensorflow::Scope::NewRootScope();
  using namespace ::tensorflow::ops;  // NOLINT(build/namespaces)
 
  string output_name = "top_k";
  TopK(root.WithOpName(output_name), outputs[0], how_many_labels);
  // This runs the GraphDef network definition that we've just constructed, and
  // returns the results in the output tensors.
  // 这将运行我们刚刚构造的 GraphDef 网络定义,并将结果返回到输出张量中。
  tensorflow::GraphDef graph;
  TF_RETURN_IF_ERROR(root.ToGraphDef(&graph));
 
  std::unique_ptr<tensorflow::Session> session(
      tensorflow::NewSession(tensorflow::SessionOptions()));
  TF_RETURN_IF_ERROR(session->Create(graph));
  // The TopK node returns two outputs, the scores and their original indices,
  // so we have to append :0 and :1 to specify them both.
  // TopK 节点返回两个输出,scores 和原始索引,因此我们必须附加 :0 和 :1 来指定它们。
  std::vector<Tensor> out_tensors;
  TF_RETURN_IF_ERROR(session->Run({}, {output_name + ":0", output_name + ":1"},
                                  {}, &out_tensors));
  *scores = out_tensors[0];
  *indices = out_tensors[1];
  return Status::OK();
}
 
// Given the output of a model run, and the name of a file containing the labels
// this prints out the top five highest-scoring values.
// 打印模型运行的输出
Status PrintTopLabels(const std::vector<Tensor>& outputs,
                      string labels_file_name) {
  std::vector<string> labels;
  size_t label_count;
  Status read_labels_status =
      ReadLabelsFile(labels_file_name, &labels, &label_count);
  if (!read_labels_status.ok()) {
    LOG(ERROR) << read_labels_status;
    return read_labels_status;
  }
  const int how_many_labels = std::min(5, static_cast<int>(label_count));
  Tensor indices;
  Tensor scores;
  TF_RETURN_IF_ERROR(GetTopLabels(outputs, how_many_labels, &indices, &scores));
  tensorflow::TTypes<float>::Flat scores_flat = scores.flat<float>();
  tensorflow::TTypes<int32>::Flat indices_flat = indices.flat<int32>();
  for (int pos = 0; pos < how_many_labels; ++pos) {
    const int label_index = indices_flat(pos);
    const float score = scores_flat(pos);
    LOG(INFO) << labels[label_index] << " (" << label_index << "): " << score;
  }
  return Status::OK();
}
 
// This is a testing function that returns whether the top label index is the
// one that's expected.
Status CheckTopLabel(const std::vector<Tensor>& outputs, int expected,
                     bool* is_expected) {
  *is_expected = false;
  Tensor indices;
  Tensor scores;
  const int how_many_labels = 1;
  TF_RETURN_IF_ERROR(GetTopLabels(outputs, how_many_labels, &indices, &scores));
  tensorflow::TTypes<int32>::Flat indices_flat = indices.flat<int32>();
  if (indices_flat(0) != expected) {
    LOG(ERROR) << "Expected label #" << expected << " but got #"
               << indices_flat(0);
    *is_expected = false;
  } else {
    *is_expected = true;
  }
  return Status::OK();
}
 
int main(int argc, char* argv[]) {
  // These are the command-line flags the program can understand.
  // They define where the graph and input data is located, and what kind of
  // input the model expects. If you train your own model, or use something
  // other than GoogLeNet you'll need to update these.
  // 程序可使用的命令行选项。 
  // 定义了图形和输入数据所在的位置,以及模型期望什么样的输入。 
  // 如果需要训练自己的模型,或使用 GoogLeNet 以外的其他模型,则需要更新这些命令。
  string image = "tensorflow/examples/label_image/data/grace_hopper.jpg";
  string graph =
      "tensorflow/examples/label_image/data/"
      "tensorflow_inception_graph.pb";
  string labels =
      "tensorflow/examples/label_image/data/"
      "imagenet_comp_graph_label_strings.txt";
  int32 input_width = 299;
  int32 input_height = 299;
  int32 input_mean = 128;
  int32 input_std = 128;
  string input_layer = "Mul";
  string output_layer = "softmax";
  bool self_test = false;
  string root_dir = "";
  std::vector<Flag> flag_list = {
      Flag("image", &image, "image to be processed"),
      Flag("graph", &graph, "graph to be executed"),
      Flag("labels", &labels, "name of file containing labels"),
      Flag("input_width", &input_width, "resize image to this width in pixels"),
      Flag("input_height", &input_height,
           "resize image to this height in pixels"),
      Flag("input_mean", &input_mean, "scale pixel values to this mean"),
      Flag("input_std", &input_std, "scale pixel values to this std deviation"),
      Flag("input_layer", &input_layer, "name of input layer"),
      Flag("output_layer", &output_layer, "name of output layer"),
      Flag("self_test", &self_test, "run a self test"),
      Flag("root_dir", &root_dir,
           "interpret image and graph file names relative to this directory"),
  };
  string usage = tensorflow::Flags::Usage(argv[0], flag_list);
  const bool parse_result = tensorflow::Flags::Parse(&argc, argv, flag_list);
  if (!parse_result) {
    LOG(ERROR) << usage;
    return -1;
  }
 
  // We need to call this to set up global state for TensorFlow.
  // 设置 TensorFlow 的全局状态。
  tensorflow::port::InitMain(argv[0], &argc, &argv);
  if (argc > 1) {
    LOG(ERROR) << "Unknown argument " << argv[1] << "\n" << usage;
    return -1;
  }
 
  // First we load and initialize the model.
  // 首先加载和初始化模型。
  std::unique_ptr<tensorflow::Session> session;
  string graph_path = tensorflow::io::JoinPath(root_dir, graph);
  Status load_graph_status = LoadGraph(graph_path, &session);
  if (!load_graph_status.ok()) {
    LOG(ERROR) << load_graph_status;
    return -1;
  }
 
  // Get the image from disk as a float array of numbers, resized and normalized
  // to the specifications the main graph expects.
  // 从磁盘获取图像作为数组的浮点数组,调整大小并按照主图所期望的规格进行归一化。
  std::vector<Tensor> resized_tensors;
  string image_path = tensorflow::io::JoinPath(root_dir, image);
  Status read_tensor_status =
      ReadTensorFromImageFile(image_path, input_height, input_width, input_mean,
                              input_std, &resized_tensors);
  if (!read_tensor_status.ok()) {
    LOG(ERROR) << read_tensor_status;
    return -1;
  }
  const Tensor& resized_tensor = resized_tensors[0];
 
  // Actually run the image through the model.
  // 通过模型运行图像。
  std::vector<Tensor> outputs;
  Status run_status = session->Run({{input_layer, resized_tensor}},
                                   {output_layer}, {}, &outputs);
  if (!run_status.ok()) {
    LOG(ERROR) << "Running model failed: " << run_status;
    return -1;
  }
 
  // This is for automated testing to make sure we get the expected result with
  // the default settings. We know that label 866 (military uniform) should be
  // the top label for the Admiral Hopper image.
  // 自动测试,以确保我们获得预期的结果与默认设置。
  // 我们知道标签 866(军服)应该是海军上将图像的 top label。
  if (self_test) {
    bool expected_matches;
    Status check_status = CheckTopLabel(outputs, 866, &expected_matches);
    if (!check_status.ok()) {
      LOG(ERROR) << "Running check failed: " << check_status;
      return -1;
    }
    if (!expected_matches) {
      LOG(ERROR) << "Self-test failed!";
      return -1;
    }
  }
 
  // Do something interesting with the results we've generated.
  // 对我们生成的结果做一些有趣的事情。
  Status print_status = PrintTopLabels(outputs, labels);
  if (!print_status.ok()) {
    LOG(ERROR) << "Running print failed: " << print_status;
    return -1;
  }
 
  return 0;
}

猜你喜欢

转载自blog.csdn.net/maweifei/article/details/82353134
今日推荐