mmyolo训练yolov5~ppyoloe

使用mmyolo检测工具箱,完成yolo系列算法的训练,包括环境的搭建及yolo系列算法的配置文件等。

mmyolo官方地址:https://github.com/open-mmlab/mmdeploy

相关文档:​​​​​​​ https://github.com/open-mmlab/mmdeploy/blob/dev-1.x/docs/zh_cn/get_started.md

一、环境搭建

 1.创建虚拟环境

conda create --name mmyolo python=3.8 -y

激活虚拟环境:

conda activate mmyolo

2.安装pytorch、torchvision

根据自己的配置安装相应版本

pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 -f https://download.pytorch.org/whl/torch_stable.html

或手动下载,地址:https://download.pytorch.org/whl/torch_stable.html

3.下载I MMEngine 、 MMCV和MMDET3.x

pip install -U openmim
mim install mmengine
mim install 'mmcv>=2.0.0rc1'
mim install "mmdet>=3.0.0rc5,<3.1.0"

4.下载mmyolo并编译
git clone https://github.com/open-mmlab/mmyolo.git
cd mmyolo
# Install albumentations
pip install -r requirements/albu.txt
# Install MMYOLO
mim install -v -e .

二、训练yolo系列算法(以yolo6和yolox为例)

yolo5~yolo8训练的config大致相同,yolox略有不同

1.构造数据集

使用coco格式数据集进行训练,使用labelme标注,然后使用如下代码进行转换:

# -*- coding:utf-8 -*-
# !/usr/bin/env python
 
import argparse
import json
import matplotlib.pyplot as plt
import skimage.io as io
import cv2
from labelme import utils
import numpy as np
import glob
import PIL.Image
 
class MyEncoder(json.JSONEncoder):
 def default(self, obj):
  if isinstance(obj, np.integer):
   return int(obj)
  elif isinstance(obj, np.floating):
   return float(obj)
  elif isinstance(obj, np.ndarray):
   return obj.tolist()
  else:
   return super(MyEncoder, self).default(obj)
 
class labelme2coco(object):
 def __init__(self, labelme_json=[], save_json_path='./tran.json'):
  '''
  :param labelme_json: 所有labelme的json文件路径组成的列表
  :param save_json_path: json保存位置
  '''
  self.labelme_json = labelme_json
  self.save_json_path = save_json_path
  self.images = []
  self.categories = []
  self.annotations = []
  # self.data_coco = {}
  self.label = []
  self.annID = 1
  self.height = 0
  self.width = 0
 
  self.save_json()
 
 def data_transfer(self):
 
  for num, json_file in enumerate(self.labelme_json):
   with open(json_file, 'r') as fp:
    data = json.load(fp) # 加载json文件
    self.images.append(self.image(data, num))
    for shapes in data['shapes']:
     label = shapes['label']
     if label not in self.label:
      self.categories.append(self.categorie(label))
      self.label.append(label)
     points = shapes['points']#这里的point是用rectangle标注得到的,只有两个点,需要转成四个点
     #points.append([points[0][0],points[1][1]])
     #points.append([points[1][0],points[0][1]])
     self.annotations.append(self.annotation(points, label, num))
     self.annID += 1
 
 def image(self, data, num):
  image = {}
  img = utils.img_b64_to_arr(data['imageData']) # 解析原图片数据
  # img=io.imread(data['imagePath']) # 通过图片路径打开图片
  # img = cv2.imread(data['imagePath'], 0)
  height, width = img.shape[:2]
  img = None
  image['height'] = height
  image['width'] = width
  image['id'] = num + 1
  #image['file_name'] = data['imagePath'].split('/')[-1]
  image['file_name'] = data['imagePath'][3:14]
  self.height = height
  self.width = width
 
  return image
 
 def categorie(self, label):
  categorie = {}
  categorie['supercategory'] = 'Cancer'
  categorie['id'] = len(self.label) + 1 # 0 默认为背景
  categorie['name'] = label
  return categorie
 
 def annotation(self, points, label, num):
  annotation = {}
  annotation['segmentation'] = [list(np.asarray(points).flatten())]
  annotation['iscrowd'] = 0
  annotation['image_id'] = num + 1
  # annotation['bbox'] = str(self.getbbox(points)) # 使用list保存json文件时报错(不知道为什么)
  # list(map(int,a[1:-1].split(','))) a=annotation['bbox'] 使用该方式转成list
  annotation['bbox'] = list(map(float, self.getbbox(points)))
  annotation['area'] = annotation['bbox'][2] * annotation['bbox'][3]
  # annotation['category_id'] = self.getcatid(label)
  annotation['category_id'] = self.getcatid(label)#注意,源代码默认为1
  annotation['id'] = self.annID
  return annotation
 
 def getcatid(self, label):
  for categorie in self.categories:
   if label == categorie['name']:
    return categorie['id']
  return 1
 
 def getbbox(self, points):
  # img = np.zeros([self.height,self.width],np.uint8)
  # cv2.polylines(img, [np.asarray(points)], True, 1, lineType=cv2.LINE_AA) # 画边界线
  # cv2.fillPoly(img, [np.asarray(points)], 1) # 画多边形 内部像素值为1
  polygons = points
 
  mask = self.polygons_to_mask([self.height, self.width], polygons)
  return self.mask2box(mask)
 
 def mask2box(self, mask):
  '''从mask反算出其边框
  mask:[h,w] 0、1组成的图片
  1对应对象,只需计算1对应的行列号(左上角行列号,右下角行列号,就可以算出其边框)
  '''
  index = np.argwhere(mask == 1)
  rows = index[:, 0]
  clos = index[:, 1]
  # 解析左上角行列号
  left_top_r = np.min(rows) # y
  left_top_c = np.min(clos) # x
 
  # 解析右下角行列号
  right_bottom_r = np.max(rows)
  right_bottom_c = np.max(clos)
  return [left_top_c, left_top_r, right_bottom_c - left_top_c,
    right_bottom_r - left_top_r] # [x1,y1,w,h] 对应COCO的bbox格式
 
 def polygons_to_mask(self, img_shape, polygons):
  mask = np.zeros(img_shape, dtype=np.uint8)
  mask = PIL.Image.fromarray(mask)
  xy = list(map(tuple, polygons))
  PIL.ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)
  mask = np.array(mask, dtype=bool)
  return mask
 def data2coco(self):
  data_coco = {}
  data_coco['images'] = self.images
  data_coco['categories'] = self.categories
  data_coco['annotations'] = self.annotations
  return data_coco
 def save_json(self):
  self.data_transfer()
  self.data_coco = self.data2coco()
  # 保存json文件
  json.dump(self.data_coco, open(self.save_json_path, 'w'), indent=4, cls=MyEncoder) 
labelme_json = glob.glob('./Annotations/*.json')
labelme2coco(labelme_json, './json/test.json')

或参考:https://github.com/open-mmlab/mmyolo/blob/main/docs/en/user_guides/custom_dataset.md

 2.写yolov6、yolox的配置文件

新建一个名为 yolov6.py 的配置文件

新建位置自定,本人为:configs/custome/yolov6.py:

_base_ = '/home/work/mmyolo-dev/configs/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py'
max_epochs = 100
data_root = '/home/work/YOLO_presearch/20230130/' #coco数据地址
work_dir = './work_dirs/yolov6' #保存模型路径
#load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco/yolov6_s_syncbn_fast_8xb32-400e_coco_20221102_203035-932e1d91.pth'#根据需要注释
train_batch_size_per_gpu = 4
train_num_workers = 4  # train_num_workers = nGPU x 4
save_epoch_intervals = 2
# base_lr_default * (your_bs / default_bs)
base_lr = 0.01 / 4
class_name = ('tou','chaxiao','ding','zd','noding')
num_classes = len(class_name)
metainfo = dict(
    classes=class_name, 
    palette=[(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), (106, 0, 228)])
train_cfg = dict(
    max_epochs=max_epochs,
    val_begin=20,
    val_interval=save_epoch_intervals,
    dynamic_intervals=[(max_epochs - _base_.num_last_epochs, 1)])
model = dict(
    bbox_head=dict(head_module=dict(num_classes=num_classes)),
    train_cfg=dict(
        initial_assigner=dict(num_classes=num_classes),
        assigner=dict(num_classes=num_classes)))
train_dataloader = dict(
    batch_size=train_batch_size_per_gpu,
    num_workers=train_num_workers,
    dataset=dict(
         _delete_=True,
        type='RepeatDataset',
        times=1,
        dataset=dict(
            type=_base_.dataset_type,
            data_root=data_root,
            metainfo=metainfo,
            ann_file='annotations/train.json',
            data_prefix=dict(img='images/'),
            filter_cfg=dict(filter_empty_gt=False, min_size=32),
            pipeline=_base_.train_pipeline)))
val_dataloader = dict(
    dataset=dict(
    metainfo=metainfo,
    data_root=data_root,
    ann_file='annotations/val.json',
    data_prefix=dict(img='images/')))
    ann_file='annotations/val.json',
    data_prefix=dict(img='images/')))
test_dataloader = val_dataloader
val_evaluator = dict(ann_file=data_root + 'annotations/val.json')
test_evaluator = val_evaluator
optim_wrapper = dict(optimizer=dict(lr=base_lr))
default_hooks = dict(
    checkpoint=dict(
        type='CheckpointHook',
        interval=save_epoch_intervals,
        max_keep_ckpts=5,
        save_best='auto'),
        param_scheduler=dict(max_epochs=max_epochs),
        logger=dict(type='LoggerHook', interval=10))
custom_hooks = [
    dict(
        type='EMAHook',
        ema_type='ExpMomentumEMA',
        momentum=0.0001,
        update_buffers=True,
        strict_load=False,
        priority=49),
    dict(
        type='mmdet.PipelineSwitchHook',
        switch_epoch=max_epochs - _base_.num_last_epochs,
        switch_pipeline=_base_.train_pipeline_stage2)
]

新建一个名为 yolox.py 的配置文件

新建位置自定,本人为:configs/custome/yolox.py:

_base_ = '/home/work/mmyolo-dev/configs/yolox/yolox_m_fast_8xb8-300e_coco.py'
max_epochs = 100  # 训练的最大 epoch
data_root = '/home/work/YOLO_presearch/20230130/'  # 数据集目录的绝对路径
work_dir = './work_dirs/yolox'
train_batch_size_per_gpu = 2
train_num_workers = 2 
save_epoch_intervals = 2
base_lr = 0.01 / 4
anchors = [ 
    [(68, 69), (154, 91), (143, 162)],  # P3/8
    [(242, 160), (189, 287), (391, 207)],  # P4/16
    [(353, 337), (539, 341), (443, 432)]  # P5/32
    ]
class_name = ('tou','huan','chaxiao','ding','zd')
num_classes = len(class_name)
metainfo = dict(
   classes=class_name,
   palette=[(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), (106, 0, 228)]
)

3、运行

猜你喜欢

转载自blog.csdn.net/qq_41980080/article/details/129258400