Win7 implements object recognition in tensorflow

Implementation conditions:

1.win7

2.python

3. Libraries required for operation: matplotlib, lxml, pillow, Cython

Specific reference: https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md

4. object_detection package, download address: https://github.com/tensorflow/models  

 

 

After downloading, unzip the object_detection location in the research folder

5. The compiled protos file, the files in object_detection are not compiled, the compiled file download address: https://github.com/1529591487/Object-Detection

Just replace the protos folder in object_detection directly.

 Code:

1.

import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile

from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image

#Change here to the location of the object_detection package you downloaded 
sys.path.append(r " E:\learning materials\artificial intelligence\models-master\research " )
 from object_detection.utils import ops as utils_ops

if tf.__version__ < '1.4.0':
  raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')

2.

%matplotlib inline

3.

from object_detection.utils import label_map_util

from object_detection.utils import visualization_utils as vis_util

There will be a warning here, but it will not affect

4.

# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'

# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'

#The path here also needs to be modified 
PATH_TO_LABELS = os.path.join(r ' E:\learning materials\artificial intelligence\models-master\research\object_detection\data ' , ' mscoco_label_map.pbtxt ' )

NUM_CLASSES = 90

5.

opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
  file_name = os.path.basename(file.name)
  if 'frozen_inference_graph.pb' in file_name:
    tar_file.extract(file, os.getcwd())

6.

detection_graph = tf.Graph()
with detection_graph.as_default():
  od_graph_def = tf.GraphDef()
  with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
    serialized_graph = fid.read()
    od_graph_def.ParseFromString(serialized_graph)
    tf.import_graph_def(od_graph_def, name='')

7.

label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)

8.

def load_image_into_numpy_array(image):
  (im_width, im_height) = image.size
  return np.array(image.getdata()).reshape(
      (im_height, im_width, 3)).astype(np.uint8)

9.

def run_inference_for_single_image(image, graph):
  with graph.as_default():
    with tf.Session() as sess:
      # Get handles to input and output tensors
      ops = tf.get_default_graph().get_operations()
      all_tensor_names = {output.name for op in ops for output in op.outputs}
      tensor_dict = {}
      for key in [
          'num_detections', 'detection_boxes', 'detection_scores',
          'detection_classes', 'detection_masks'
      ]:
        tensor_name = key + ':0'
        if tensor_name in all_tensor_names:
          tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
              tensor_name)
      if 'detection_masks' in tensor_dict:
        # The following processing is only for single image
        detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
        detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
        # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
        real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
        detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
        detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
        detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
            detection_masks, detection_boxes, image.shape[0], image.shape[1])
        detection_masks_reframed = tf.cast(
            tf.greater(detection_masks_reframed, 0.5), tf.uint8)
        # Follow the convention by adding back the batch dimension
        tensor_dict['detection_masks'] = tf.expand_dims(
            detection_masks_reframed, 0)
      image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')

      # Run inference
      output_dict = sess.run(tensor_dict,
                             feed_dict={image_tensor: np.expand_dims(image, 0)})

      # all outputs are float32 numpy arrays, so convert types as appropriate
      output_dict['num_detections'] = int(output_dict['num_detections'][0])
      output_dict['detection_classes'] = output_dict[
          'detection_classes'][0].astype(np.uint8)
      output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
      output_dict['detection_scores'] = output_dict['detection_scores'][0]
      if 'detection_masks' in output_dict:
        output_dict['detection_masks'] = output_dict['detection_masks'][0]
  return output_dict

10.

IMAGE_SIZE = (36, 24 )
 #set the image path here 
mydir=r ' E:\learning materials\artificial intelligence\models-master\research\object_detection\test_images ' # mydir = 'G : \wallpaper' for filename in os.listdir (mydir):
     if os.path.splitext(filename)[1] == ' .jpg ' :


        filepath=os.path.join(mydir, filename)
        print(filepath)
        image = Image.open(filepath)
  # the array based representation of the image will be used later in order to prepare the
  # result image with boxes and labels on it.
        image_np = load_image_into_numpy_array(image)
  # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(image_np, axis=0)
  # Actual detection.
        output_dict = run_inference_for_single_image(image_np, detection_graph)
  # Visualization of the results of a detection.
        vis_util.visualize_boxes_and_labels_on_image_array(
            image_np,
            output_dict['detection_boxes'],
            output_dict['detection_classes'],
            output_dict['detection_scores'],
            category_index,
            instance_masks=output_dict.get('detection_masks'),
            use_normalized_coordinates=True,
            line_thickness=8)
        fig1 = plt.gcf()
        plt.figure(figsize=IMAGE_SIZE)
        plt.imshow(image_np)

operation result:

Code reference: https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb

 

Some image recognition will fail, and I have not figured it out yet. Welcome to communicate.

 

Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=324929460&siteId=291194637