python realize the picture drawn in the document annotation data

My code is really the ability to really dish ah

This code back and forth after watching three days, and finally understand and be able to code yourself down

A fact not difficult to understand, but always just do not want to see

 
import matplotlib.pyplot as plt
import cv2
import numpy as np
import json

def process_api_return_value(str):

    class Stack(object):
        def __init__(self):
            self.stack = []

        def isEmpty(self):
            return self.stack ==[]

        def push(self, item):
            self.stack.append(item)

        def pop(self):
            if self.isEmpty():
                print('stack is empty')
                return
            return self.stack.pop()
        def peek(self):
            return self.stack[-1]

        def size(self):
            return len(self.stack)




    my_list = []
    stack = Stack()
    begin = 0

    for index, value in enumerate(str):
        if value == '{':
            if stack.isEmpty():
                begin=index

            stack.push(value)

        if value == '}':
            stack.pop()
            if stack.isEmpty():
                end =index
                my_list.append(str[begin:end + 1])
    boxes_list = []
    classes = []

    for value in my_list:
        boxes_list_each = []
        dic = json.loads(value)
        classes.append(dic['label'])

        x1 = dic['X1']
        x2 = dic['X2']
        y1 = dic['Y1']
        y2 = dic['Y2']


        boxes_list_each.append(x1)
        boxes_list_each.append(y1)
        boxes_list_each.append(x2)
        boxes_list_each.append(y2)


        boxes_list_each.append(dic['score'])


        boxes_list.append(boxes_list_each)

    boxes =np.array(boxes_list)

    return boxes, classes

def get_class_string(class_name, score):
    return class_name + ' {:0.2f}'.format(score).lstrip('0')



def vis_one_image(im, class_names,
                    boxes, classes,
                        thresh=0.9, dpi=100,
                            box_alpha=1.0, show_class=True,
                               filename=None, ext='png' ):
    if boxes is None or boxes.shape[0] ==0 or max(boxes[:, 4]) < thresh:
        return
    im = im[:, :, ::-1]
    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] /dpi, im.shape[0] / dpi) #shape[1]宽 shape[1]高
    ax=plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    areas =(boxes[:, 2] -boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas) #big to small

    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            contiune
            
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          Bbox [ 2] - bbox [0],
                          Bbox [ 3] - bbox [1 ],
                          fill=False, edgecolor= 'g',
                          linewidth=1.0, alpha=box_alpha))

        if show_class:
            ax.text(
                Bbox [0], bbox [ 1] - 2 ,
                get_class_string(class_names[classes[i]], score),
                fontsize=11,
                family= 'serif',
                Bbox = dict (
                    facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
                color='white')

    if filename is not None:
        fig.savefig(filename + '.' + ext, dpi = dpi)
        plt.close('all')

    else:
        plt.imshow (im)
        plt.show()






if __name__ == '__main__':
    class_names = ['__background__',  # always index 0
                    'bai_sui_shan','cestbon','cocacola','jing_tian','pepsi_cola','sprite', 'starbucks_black_tea','starbucks_matcha',
                   'starbucks_mocha', 'vita_lemon_tea', 'vita_soymilk_blue', 'wanglaoji_green']
    raw_image = cv2.imread("/home/stt/data/wh_t/img/1.jpg")
    api_return_value = '{"X1":317.0668,"X2":334.80807,"Y1":111.80373,"Y2":138.84392,"label":1,"rotate":0.0,"scale":1.0,"score":0.9973503},{"X1":209.34439,"X2":233.45705,"Y1":98.4779,"Y2":123.20604,"label":4,"rotate":0.0,"scale":1.0,"score":0.98664963},{"X1":120.77178,"X2":154.04027,"Y1":159.96211,"Y2":227.42447,"label":12,"rotate":0.0,"scale":1.0,"score":0.37864015},{"X1":161.31192,"X2":200.91737,"Y1":31.158348,"Y2":51.811104,"label":1,"rotate":0.0,"scale":1.0,"score":0.30787778}'


    boxes, classes = process_api_return_value(api_return_value)
    vis_one_image(raw_image, class_names, boxes, classes, thresh=0.0, box_alpha=1.0, show_class=True)
 

 

 

Guess you like

Origin www.cnblogs.com/stt-ac/p/11076582.html