MobileNet-SSD intel movidus sdk2.0实现

movidus sdk升级过一个大的版本,以便支持多个图在同一个棒上运行,但是它给的例子基本都还是1.0的,然后通过一个模拟接口转换过去,这显然有违接口升级的初衷。

Migrating Python Applications from NCAPI v1 to NCAPI v2曾提及转换的要点:

1.0中的操作步骤基本是打开设备,读取图,加载图,加载张量,获取结果

而到了2.0变成了

打开设备,读取某张图,获取图对应的输入输出队列,加载张量,获取结果

这里给出2.0的例子

#coding=utf-8
#CK-Lab yanyu 2018.09.25
#initial demo with mnvc2 api

from mvnc import mvncapi
import cv2,time,os,sys
import numpy as np
from sys import argv
cv_window_name = "CKCamera"


labels = ('background',
            'aeroplane', 'bicycle', 'bird', 'boat',
            'bottle', 'bus', 'car', 'cat', 'chair',
            'cow', 'diningtable', 'dog', 'horse',
            'motorbike', 'person', 'pottedplant',
            'sheep', 'sofa', 'train', 'tvmonitor')

# the ssd mobilenet image width and height
NETWORK_IMAGE_WIDTH = 300
NETWORK_IMAGE_HEIGHT = 300

# the minimal score for a box to be shown
min_score_percent = 60

# the resize_window arg will modify these if its specified on the commandline
resize_output = False
resize_output_width = 0
resize_output_height = 0

#app访问的图片保存路径
camera_dir="ckcamera"
#保存图片的序号
captured_num=0

# create a preprocessed image from the source image that complies to the
# network expectations and return it
def preprocess_image(source_image):
    resized_image = cv2.resize(source_image, (NETWORK_IMAGE_WIDTH, NETWORK_IMAGE_HEIGHT))
    # trasnform values from range 0-255 to range -1.0 - 1.0
    resized_image = resized_image - 127.5
    resized_image = resized_image * 0.007843
    return resized_image
#过滤检测结果,默认的输入会有一些无效值,而且是相对坐标,这个函数负责过滤掉无效值,并且转换为绝对坐标
def filter_output(img_src,output):
    num_valid_boxes = int(output[0])
    objs = []
    for box_index in range(num_valid_boxes):
            base_index = 7+ box_index * 7
            if (not np.isfinite(output[base_index]) or
                    not np.isfinite(output[base_index + 1]) or
                    not np.isfinite(output[base_index + 2]) or
                    not np.isfinite(output[base_index + 3]) or
                    not np.isfinite(output[base_index + 4]) or
                    not np.isfinite(output[base_index + 5]) or
                    not np.isfinite(output[base_index + 6])):
                # boxes with non finite (inf, nan, etc) numbers must be ignored
                continue
            object_info=output[base_index:base_index + 7]
            percentage = int(object_info[2] * 100)
            if (percentage <= min_score_percent):
                continue
            class_id = (int)(object_info[1])
            box_left = object_info[3]
            box_top = object_info[4]
            box_right = object_info[5]
            box_bottom = object_info[6]
            obj=[box_left,box_top,box_right,box_bottom,class_id,percentage,labels[class_id]]
            objs.append(obj)
    return objs
#展示检测结果,输入图片和过滤后的检测结果
def show_detections(img_src,objs):
    source_image_width = img_src.shape[1]
    source_image_height = img_src.shape[0]
    display_image=img_src.copy()
    for obj in objs:
        box_left=(int)(obj[0] * source_image_width)
        box_top=(int)(obj[1] * source_image_height)
        box_right=(int)(obj[2]* source_image_width)
        box_bottom=(int)(obj[3] * source_image_height)
        class_id=obj[4]
        percentage=obj[5]

        label_text = labels[int(class_id)] + " (" + str(percentage) + "%)"
        box_color = (255, 128, 0)  # box color
        box_thickness = 2
        cv2.rectangle(display_image, (box_left, box_top), (box_right, box_bottom), box_color, box_thickness)

        scale_max = (100.0 - min_score_percent)
        scaled_prob = (percentage - min_score_percent)
        scale = scaled_prob / scale_max

        # draw the classification label string just above and to the left of the rectangle
        # label_background_color = (70, 120, 70)  # greyish green background for text
        label_background_color = (0, int(scale * 175), 75)
        label_text_color = (255, 255, 255)  # white text

        label_size = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
        label_left = box_left
        label_top = box_top - label_size[1]
        if (label_top < 1):
            label_top = 1
        label_right = label_left + label_size[0]
        label_bottom = label_top + label_size[1]
        cv2.rectangle(display_image, (label_left - 1, label_top - 1), (label_right + 1, label_bottom + 1),
                      label_background_color, -1)

        # label text above the box
        cv2.putText(display_image, label_text, (label_left, label_bottom), cv2.FONT_HERSHEY_SIMPLEX, 0.5, label_text_color,1)

    # display text to let user know how to quit
    #cv2.rectangle(display_image, (0, 0), (100, 15), (128, 128, 128), -1)
    #cv2.putText(display_image, "Q to Quit", (10, 12), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0), 1)
    return display_image

class NCSDetection:
    def __init__(self,graph_path="graph"):
        device_list = mvncapi.enumerate_devices()
        if len(device_list)==0:
            print("No device found")
            return
        self.device = mvncapi.Device(device_list[0])
        self.device.open()
        with open(graph_path, mode='rb') as f:
            graph_buffer = f.read()
        self.graph = mvncapi.Graph('detection')
        self.input_fifo, self.output_fifo = self.graph.allocate_with_fifos(self.device, graph_buffer)
    def __del__(self):
        self.close()
    def detect(self,img):
        input_tensor=preprocess_image(img)
        self.graph.queue_inference_with_fifo_elem(self.input_fifo, self.output_fifo, input_tensor.astype(np.float32), None)
        output, user_obj = self.output_fifo.read_elem()
        objs=filter_output(img,output)
        return objs

    def close(self):
        try:
            self.input_fifo.destroy()
            self.output_fifo.destroy()
            self.graph.destroy()
            self.device.close()
            self.device.destroy()
        except Exception as e:
            print(e)

def test_one_image(detection,imgpath):
    img=cv2.imread(imgpath)
    objs=detection.detect(img)
    show=show_detections(img,objs)
    cv2.imshow("img",show)
    cv2.waitKey()

def test_dir(detection,dir="unittest"):
    files=os.listdir(dir)
    for file in files:
        filepath=dir+"/"+file
        test_one_image(detection,filepath)

def test_camera(detection,index=0):
    cap=cv2.VideoCapture(index)
    while True:
        ret,img=cap.read()
        if not ret:
            break
        objs=detection.detect(img)
        show=show_detections(img,objs)
        cv2.imshow("img",show)
        cv2.waitKey(1)

def main():
    detection=NCSDetection()
    #test_dir(detection)
    test_camera(detection)

if __name__=="__main__":
    main()

猜你喜欢

转载自blog.csdn.net/minstyrain/article/details/82840549
今日推荐