Common tool code for deep learning target detection: faster-rcnn test, the detection result is drawn at the same time

The tools tool code of faster-rcnn imitates demo.py to output all the detection results, and at the same time draws on the picture to be detected while detecting.


#!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------

"""
Draw the test results one by one
"""

import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse

CLASSES = ('__background__',
           'plane', 'baseball-diamond', 'bridge', 'ground-track-field', 'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
                'basketball-court', 'storage-tank',  'soccer-ball-field', 'roundabout', 'harbor', 'swimming-pool', 'helicopter')

NETS = {'vgg16': ('VGG16',
                  'vgg16_faster_rcnn_iter_20000.caffemodel'),
                   #'VGG16_faster_rcnn_final.caffemodel'),
        'vggcp': ('VGGcp',
                  'vggcp_faster_rcnn_iter_30000.caffemodel'),
        'zf': ('ZF',
                  'ZF_faster_rcnn_final.caffemodel')}
                  


def vis_detections(im, class_name, dets, thresh=0.5):
    """Draw detected bounding boxes."""
    inds = np.where(dets[:, -1] >= thresh)[0]
    if len(inds) == 0:
        return

    im = im[:, :, (2, 1, 0)]
    fig, ax = plt.subplots(figsize=(12, 12))
    ax.imshow(im, aspect='equal')
    for i in inds:
        bbox = its [i,: 4]
        score = its [i, -1]

        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1], fill=False,
                          edgecolor='red', linewidth=2)
            )
        ax.text(bbox[0], bbox[1] - 2,
                '{:s} {:.3f}'.format(class_name, score),
                bbox=dict(facecolor='blue', alpha=0.5),
                fontsize=8, color='white')

    ax.set_title(('{} detections with '
                  'p({} | box) >= {:.1f}').format(class_name, class_name,
                                                  thresh),
                  fontsize=14)
    plt.axis ('! 321n4s.')
    plt.tight_layout()
    plt.draw()

###opencv draw
def vis_detections_cv(image_name,im, class_name, dets, thresh=0.5):
    """Draw detected bounding boxes."""
    inds = np.where(dets[:, -1] >= thresh)[0]
    if len(inds) == 0:
        return
    fname, extension=os.path.splitext(image_name)
    #The path of the detection result txt
    fid = open(os.path.join('/home/respectv/soft-nms-dota/data/VOCdevkit2007/testsplit_images_bbox2/','%s.txt' %fname),'w')
    for i in inds:
        bbox = its [i,: 4]
        score = its [i, -1]
        bbox_info = '%s %s %f %d %d %d %d\n' % (fname,class_name,score,int(bbox[0]),int(bbox[1]),int(bbox[2]),int(bbox[3]))
        fid.writelines (bbox_info)
        cv2.rectangle(im,(bbox[0],int(bbox[1])),(int(bbox[2]),int(bbox[3])),(255,255,0),2)
        cv2.putText(im, '{:s}'.format(class_name), (int(bbox[0]), int(bbox[1] - 5)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)
    fid.close()
    #The path where the detection result is drawn on the graph
    cv2.imwrite(os.path.join('/home/respectv/soft-nms-dota/data/VOCdevkit2007/testsplit_image/demo','%s.txt.jpg' % image_name),im)

def demo(net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""
    im_file = os.path.join('/home/respectv/soft-nms-dota/data/VOCdevkit2007/testsplit_image/images', image_name)
    im = cv2.imread(im_file)
    # Detect all object classes and regress object bounds
    timer = Timer ()
    timer.tic()
    scores, boxes = im_detect(net, im)
    timer.toc ()
    print ('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0])
    # Visualize detections for each class
    CONF_THRESH = 0.6
    #CONF_THRESH = 0.8
    NMS_THRESH = 0.25
    #NMS_THRESH = 0.3
    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1 # because we skipped background
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = its [keep,:]
        #vis_detections(im, cls, dets, thresh=CONF_THRESH)
        vis_detections_cv(image_name,im, cls, dets, thresh=CONF_THRESH)

def parse_args():
    """Parse input arguments."""
    parser = argparse.ArgumentParser(description='Faster R-CNN demo')
    parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
                        default=0, type=int)
    parser.add_argument('--cpu', dest='cpu_mode',
                        help='Use CPU mode (overrides --gpu)',
                        action='store_true')
    parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
                        choices=NETS.keys(), default='vgg16')
    parser.add_argument('--nms', dest='soft_nms', help='wheather to use soft_nms',
                        default=1, type=int)

    args = parser.parse_args()
    return args

if __name__ == '__main__':
    cfg.TEST.HAS_RPN = True  # Use RPN for proposals

    args = parse_args()

    prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
                            'faster_rcnn_end2end', 'test.prototxt')
                            #'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
    caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
                              NETS[args.demo_net][1])

    if not os.path.isfile(caffemodel):
        raise IOError(('{:s} not found.\nDid you run ./data/script/'
                       'fetch_faster_rcnn_models.sh?').format(caffemodel))

    if args.cpu_mode:
        caffe.set_mode_cpu()
    else:
        caffe.set_mode_gpu()
        caffe.set_device(args.gpu_id)
        cfg.GPU_ID = args.gpu_id
    net = caffe.Net(prototxt, caffemodel, caffe.TEST)

    print '\n\nLoaded network {:s}'.format(caffemodel)

    # Warmup on a dummy image
    im = 128 * np.ones((1024, 1024, 3), dtype=np.uint8)
    #im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
    for i in xrange(2):
        _, _= im_detect(net, im)
    #The path of the image to be detected
    im_dir = '/home/respectv/soft-nms-dota/data/VOCdevkit2007/testsplit_image/images'
    im_names = os.listdir(im_dir)
    image_num = 0
    for im_name in im_names:
        if 'txt' in im_name:
            continue
        image_num += 1
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print 'Demo for %s/%s' % (im_dir,im_name)
        demo(net, im_name)
    #plt.show()
    #cv2.destroyAllWindows()

Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=324855850&siteId=291194637