yolov5 script------label label automatic filtering

Tag automatic filtering

The first

import warnings
warnings.filterwarnings('ignore')
warnings.simplefilter('ignore')
import torch
import cv2
import numpy as np
import torchvision.transforms as transforms

from models.common import DetectMultiBackend
from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml,
                           coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args,
                           scale_coords, xywh2xyxy, xyxy2xywh)
from PIL import Image,ImageFont,ImageDraw
import os
import xml.etree.ElementTree as ET
from xml.dom import minidom
os.environ["CUDA_VISIBLE_DEVICES"]='0'
COLORS = np.random.uniform(0, 255, size=(80, 3))

def WriteObjectsToXml(ObjDict, save_path):


    annotation = ET.Element("annotation")
    filename = ET.Element("filename")
    filename.text = ObjDict["filename"]
    size = ET.Element("size")
    width = ET.Element("width")
    width.text = ObjDict["width"]
    height = ET.Element("height")
    height.text = ObjDict["height"]
    size.extend([width, height])
    annotation.extend([filename, size])
    # =======object_start=======
    # =====循环这部分代码加入不同目标的标注信息========
    # ===需要更改的变量:name xmin ymin xmax ymax
    counter_obj = 0
    for i, obj in enumerate(ObjDict['object']):
        # cname = obj.find('type').text#点我科技标注后此项为type,VOC数据集为name

        object = ET.Element("object")
        name = ET.Element("name")
        name.text = obj['name']

        difficult = ET.Element("difficult")
        difficult.text = obj['difficult']
        bndbox = ET.Element("bndbox")
        xmin = ET.Element("xmin")
        xmin.text = obj['bndbox']['xmin']
        ymin = ET.Element("ymin")
        ymin.text = obj['bndbox']['ymin']
        xmax = ET.Element("xmax")
        xmax.text = obj['bndbox']['xmax']
        ymax = ET.Element("ymax")
        ymax.text = obj['bndbox']['ymax']
        bndbox.extend([xmin, ymin, xmax, ymax])
        # object.extend([name, pose, truncated, difficult,bndbox])
        object.extend([name, difficult, bndbox])
        # =======object_end=======
        annotation.append(object)

    rawtext = ET.tostring(annotation)
    dom = minidom.parseString(rawtext)
    with open(os.path.join(save_path, filename.text .split('.')[0]+'.xml'), "w") as f:
        dom.writexml(f, indent="", addindent="  ", newl="\n", encoding="utf-8")

def draw_detections_pilonORGImg(path,input_size,Rotation,boxes, colors, names, confidences):

    img1=cv2.imread(path)
    if Rotation:
        img1 = img1.transpose(1, 0, 2)[:, ::-1]
    height,width,_=img1.shape
    net_h,net_w=input_size
    x_scale,y_scale=(width/net_w,height/net_h)
    imgshow = Image.fromarray(img1)
    draw = ImageDraw.Draw(imgshow)

    for box, color, name, confidence in zip(boxes, colors, names, confidences):
        xmin, ymin, xmax, ymax = box
        xmin=xmin*x_scale
        xmax=xmax*x_scale
        ymin=ymin*y_scale
        ymax=ymax*y_scale
        color=tuple(color.astype(np.uint8).tolist())
        draw.rectangle([xmin, ymin,xmax, ymax],outline=color, width=2)
        font = ImageFont.truetype('Arial.Unicode.ttf', 16)
        text = f'{
     
      
      name} {
     
      
      confidence:.2f}'

        text_width, text_height = draw.textsize(text, font=font)
        draw.text((xmin, ymin - text_height), text, font=font, fill=color)

    return np.array(imgshow)
def parse_detections_yolov5(results,coco_names):

    boxes, colors, names,confidences= [], [], [], []

    for i in range(results.shape[0]):
        confidence = results[i][4

Guess you like

Origin blog.csdn.net/m0_47405013/article/details/129383978