Conjunto de datos personalizados de entrenamiento YOLOv8


https://blog.csdn.net/weixin_42166222/article/details/129391260
https://blog.csdn.net/m0_51530640/article/details/129975257

1. Instale Labelme y el conjunto de datos estándar.

conda create --name=labelme python=3.6
source activate labelme
pip install pyqt5  # pyqt5 can be installed via pip on python3
pip install labelme

# 输入以下指令打开
labelme
1.1 Preparación del conjunto de datos

Archivo json estándar Labelme al formato de conjunto de datos yolo-txtjson2txt_nomalize.py

# -*- coding: utf-8 -*-
import json
import os
import argparse
from tqdm import tqdm
 
 
def convert_label_json(json_dir, save_dir, classes):
    json_paths = os.listdir(json_dir)
    classes = classes.split(',')
 
    for json_path in tqdm(json_paths):
        # for json_path in json_paths:
        path = os.path.join(json_dir, json_path)
        with open(path, 'r') as load_f:
            json_dict = json.load(load_f)
        h, w = json_dict['imageHeight'], json_dict['imageWidth']
 
        # save txt path
        txt_path = os.path.join(save_dir, json_path.replace('json', 'txt'))
        txt_file = open(txt_path, 'w')
 
        for shape_dict in json_dict['shapes']:
            label = shape_dict['label']
            label_index = classes.index(label)
            points = shape_dict['points']
 
            points_nor_list = []
 
            for point in points:
                points_nor_list.append(point[0] / w)
                points_nor_list.append(point[1] / h)
 
            points_nor_list = list(map(lambda x: str(x), points_nor_list))
            points_nor_str = ' '.join(points_nor_list)
 
            label_str = str(label_index) + ' ' + points_nor_str + '\n'
            txt_file.writelines(label_str)
 
 
if __name__ == "__main__":
    """
    python json2txt_nomalize.py --json-dir my_datasets/color_rings/jsons --save-dir my_datasets/color_rings/txts --classes "cat,dogs"
    """
    parser = argparse.ArgumentParser(description='json convert to txt params')
    parser.add_argument('--json-dir', type=str,default='D:/ultralytics-main/data/json', help='json path dir')
    parser.add_argument('--save-dir', type=str,default='D:/ultralytics-main/data/txt' ,help='txt save dir')
    parser.add_argument('--classes', type=str, default='ccc,ccc1',help='classes')
    args = parser.parse_args()
    json_dir = args.json_dir
    save_dir = args.save_dir
    classes = args.classes
    convert_label_json(json_dir, save_dir, classes)
1.2 Dividir el conjunto de datos

split_datasets.py

# 将图片和标注数据按比例切分为 训练集和测试集
import shutil
import random
import os
import argparse
 
 
# 检查文件夹是否存在
def mkdir(path):
    if not os.path.exists(path):
        os.makedirs(path)
 
def main(image_dir, txt_dir, save_dir):
    # 创建文件夹
    mkdir(save_dir)
    images_dir = os.path.join(save_dir, 'images')
    labels_dir = os.path.join(save_dir, 'labels')
 
    img_train_path = os.path.join(images_dir, 'train')
    img_test_path = os.path.join(images_dir, 'test')
    img_val_path = os.path.join(images_dir, 'val')
 
    label_train_path = os.path.join(labels_dir, 'train')
    label_test_path = os.path.join(labels_dir, 'test')
    label_val_path = os.path.join(labels_dir, 'val')
 
    mkdir(images_dir);
    mkdir(labels_dir);
    mkdir(img_train_path);
    mkdir(img_test_path);
    mkdir(img_val_path);
    mkdir(label_train_path);
    mkdir(label_test_path);
    mkdir(label_val_path);
 
    # 数据集划分比例,训练集75%,验证集15%,测试集15%,按需修改
    train_percent = 0.8
    val_percent = 0.1
    test_percent = 0.1
 
    total_txt = os.listdir(txt_dir)
    num_txt = len(total_txt)
    list_all_txt = range(num_txt)  # 范围 range(0, num)
 
    num_train = int(num_txt * train_percent)
    num_val = int(num_txt * val_percent)
    num_test = num_txt - num_train - num_val
 
    train = random.sample(list_all_txt, num_train)
    # 在全部数据集中取出train
    val_test = [i for i in list_all_txt if not i in train]
    # 再从val_test取出num_val个元素,val_test剩下的元素就是test
    val = random.sample(val_test, num_val)
 
    print("训练集数目:{}, 验证集数目:{},测试集数目:{}".format(len(train), len(val), len(val_test) - len(val)))
    for i in list_all_txt:
        name = total_txt[i][:-4]
 
        srcImage = os.path.join(image_dir, name + '.jpg')
        srcLabel = os.path.join(txt_dir, name + '.txt')
 
        if i in train:
            dst_train_Image = os.path.join(img_train_path, name + '.jpg')
            dst_train_Label = os.path.join(label_train_path, name + '.txt')
            shutil.copyfile(srcImage, dst_train_Image)
            shutil.copyfile(srcLabel, dst_train_Label)
        elif i in val:
            dst_val_Image = os.path.join(img_val_path, name + '.jpg')
            dst_val_Label = os.path.join(label_val_path, name + '.txt')
            shutil.copyfile(srcImage, dst_val_Image)
            shutil.copyfile(srcLabel, dst_val_Label)
        else:
            dst_test_Image = os.path.join(img_test_path, name + '.jpg')
            dst_test_Label = os.path.join(label_test_path, name + '.txt')
            shutil.copyfile(srcImage, dst_test_Image)
            shutil.copyfile(srcLabel, dst_test_Label)
 
 
if __name__ == '__main__':
    """
    python split_datasets.py --image-dir my_datasets/color_rings/imgs --txt-dir my_datasets/color_rings/txts --save-dir my_datasets/color_rings/train_data
    """
    parser = argparse.ArgumentParser(description='split datasets to train,val,test params')
    parser.add_argument('--image-dir', type=str,default='D:/ultralytics-main/data', help='image path dir')
    parser.add_argument('--txt-dir', type=str,default='D:/ultralytics-main/data/txt' , help='txt path dir')
    parser.add_argument('--save-dir', default='D:/ultralytics-main/data/split',type=str, help='save dir')
    args = parser.parse_args()
    image_dir = args.image_dir
    txt_dir = args.txt_dir
    save_dir = args.save_dir
 
    main(image_dir, txt_dir, save_dir)

2. Instale yolo y prepare el conjunto de datos.

tirar git clone https://github.com/ultralytics/ultralytics.git
opip install ultralytics

Cree un nuevo directorio de pesas para almacenar los pesos previos al entrenamiento.
En el directorio aleatorio, cree un directorio de datos, cree un nuevo custom.yaml,

train: /home/xxx/data/images/train
val: /home/xxx/data/images/val

# number of classes
nc: 2


# Classes
names:
  0: ccc
  1: ccc1
formación y exportación

Método 1: El siguiente es el método oficial de entrenamiento/predicción/verificación/exportación de la línea de comando proporcionado por yolov8:

yolo task=detect    mode=train    model=yolov8n.pt        args...
          classify       predict        yolov8n-cls.yaml  args...
          segment        val            yolov8n-seg.yaml  args...
                         export         yolov8n.pt        format=onnx  args...

# 示例
yolo task=detect   mode=train     model=weights/yolov8n.pt \
     data=data/animal.yaml        batch=16  epochs=150 imgsz=640 workers=4 device=0

Método 1: comando python: Nuevo demo.py, el contenido es el siguiente:

from ultralytics import YOLO

# 加载模型
# model = YOLO("yolov8n.yaml")  # 从头开始构建新模型
model = YOLO("weights/yolov8n.pt")  # 加载预训练模型(推荐用于训练)

# Use the model
results = model.train(data="data/animal.yaml", epochs=20, batch=8)  # 训练模型

Supongo que te gusta

Origin blog.csdn.net/wsp_1138886114/article/details/131474779
Recomendado
Clasificación