Darknet 框架学习笔记 ---- 2

理解darknet如何训练目标检测模型

定位源码

参考 darknet训练自己的yolo_v3模型 这篇博客,训练用这行命令:

./darknet detector train cfg/voc.data cfg/yolov3-voc.cfg darknet53.conv.74

darknet.c 中对应源码:

else if (0 == strcmp(argv[1], "detector")){
        run_detector(argc, argv);

run_detector() 中对应源码:

else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);

解读源码

默认使用含GPU预定义的代码。

void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
    list *options = read_data_cfg(datacfg);
    char *train_images = option_find_str(options, "train", "data/train.list");
    char *backup_directory = option_find_str(options, "backup", "/backup/");

读取datacfg数据集配置文件,寻找训练集索引文件名,赋值给 train_images 字符串;寻找备份文件夹名,赋值给 backup_directory 字符串。

以 cfg/voc.data 为例,其内容如下:

指定训练集索引文件为 /home/pjreddie/data/voc/train.txt ,备份文件夹为 bachup 。

    srand(time(0));
    char *base = basecfg(cfgfile);
    printf("%s\n", base);
    float avg_loss = -1;
    network **nets = calloc(ngpus, sizeof(network));

time(0)代表当前时间,srand(time(0))代表用当前时间来设定rand函数所用的随机数产生演算法的种子值。

读取网络配置文件内容到 base 字符串。申请ngpus(gpu个数)个network结构体大小的连续内存,返回首地址指针给nets。

    srand(time(0));
    int seed = rand();
    int i;
    for(i = 0; i < ngpus; ++i){
        srand(seed);
#ifdef GPU
        cuda_set_device(gpus[i]);
#endif
        nets[i] = load_network(cfgfile, weightfile, clear);
        nets[i]->learning_rate *= ngpus;
    }
    srand(time(0));
    network *net = nets[0];

利用cfggile(网络配置)和weightfile(网络权重)载入网络。如果用到多个GPU,则多次载入网络,且学习率要乘上GPU个数,我尚不知道为什么这么做。

    int imgs = net->batch * net->subdivisions * ngpus;
    printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
    data train, buffer;

    layer l = net->layers[net->n - 1];

    int classes = l.classes;
    float jitter = l.jitter;

    list *plist = get_paths(train_images);
    //int N = plist->size;
    char **paths = (char **)list_to_array(plist);

    load_args args = get_base_args(net);
    args.coords = l.coords;
    args.paths = paths;
    args.n = imgs;
    args.m = plist->size;
    args.classes = classes;
    args.jitter = jitter;
    args.num_boxes = l.max_boxes;
    args.d = &buffer;
    args.type = DETECTION_DATA;
    //args.type = INSTANCE_DATA;
    args.threads = 64;

主要是载入目标检测网络的一些参数。参数很多,牵扯到一些目标检测的操作,都可以单独写一篇博客,这里暂时跳过。

    pthread_t load_thread = load_data(args);
    double time;
    int count = 0;
    //while(i*imgs < N*120){
    while(get_current_batch(net) < net->max_batches){
        if(l.random && count++%10 == 0){
            printf("Resizing\n");
            int dim = (rand() % 10 + 10) * 32;
            if (get_current_batch(net)+200 > net->max_batches) dim = 608;
            //int dim = (rand() % 4 + 16) * 32;
            printf("%d\n", dim);
            args.w = dim;
            args.h = dim;

            pthread_join(load_thread, 0);
            train = buffer;
            free_data(train);
            load_thread = load_data(args);

            #pragma omp parallel for
            for(i = 0; i < ngpus; ++i){
                resize_network(nets[i], dim, dim);
            }
            net = nets[0];
        }
        time=what_time_is_it_now();
        pthread_join(load_thread, 0);
        train = buffer;
        load_thread = load_data(args);

        printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);

        time=what_time_is_it_now();
        float loss = 0;
#ifdef GPU
        if(ngpus == 1){
            loss = train_network(net, train);
        } else {
            loss = train_networks(nets, ngpus, train, 4);
        }
#else
        loss = train_network(net, train);
#endif
        if (avg_loss < 0) avg_loss = loss;
        avg_loss = avg_loss*.9 + loss*.1;

        i = get_current_batch(net);
        printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
        if(i%100==0){
#ifdef GPU
            if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
            char buff[256];
            sprintf(buff, "%s/%s.backup", backup_directory, base);
            save_weights(net, buff);
        }
        if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
            if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
            char buff[256];
            sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
            save_weights(net, buff);
        }
        free_data(train);
    }

这是训练的主体。这里用到了多线程读取多张图片,然后结合成一个batch,赋给data结构体变量 “tain” 中。

训练的迭代次数是训练的批次数。

训练过程中,每训练了100个批次,把网络权重保存到后缀名为backup的文件中,当做备份。

训练批次在1000以内中的每100批次,或者训练批次为100000时,保存权重到后缀名为weights的文件。

当训练批次到达 net->max_batches 时,会停止训练,并把训练得到的权重保存下来。

训练产生的权重文件保存在backup目录下。

猜你喜欢

转载自blog.csdn.net/Jeff_zjf/article/details/102671661
今日推荐