darknet yolo v3 route layer

route layer层主要是把输入层连接在一起,在darknet 网络结构中,要求输入层对应的width、height必须相等,如果不相等,则把route layer层的输出w,h,c都设置为0。

例如输入层1:26*26*256   输入层2:26*26*128  则route layer输出为:26*26*(256+128)

route_layer parse_route(list *options, size_params params, network *net)
{
    char *l = option_find(options, "layers");
    int len = strlen(l);
    if(!l) error("Route Layer must specify input layers");
    int n = 1;
    int i;
	
    for(i = 0; i < len; ++i){							//统计有几个层参与到route层									
        if (l[i] == ',') ++n;
    }

    int *layers = calloc(n, sizeof(int));				//
    int *sizes = calloc(n, sizeof(int));				//
    for(i = 0; i < n; ++i){
        int index = atoi(l);
        l = strchr(l, ',')+1;
        if(index < 0) 
			index = params.index + index;				//如果层的索引为负,则在当前层往后索引
        layers[i] = index;								//否则,为正,则直接使用该层索引
        sizes[i] = net->layers[index].outputs;			//
    }
    int batch = params.batch;

    route_layer layer = make_route_layer(batch, n, layers, sizes);

    convolutional_layer first = net->layers[layers[0]];	//参与route层的第一个层
    layer.out_w = first.out_w;
    layer.out_h = first.out_h;
    layer.out_c = first.out_c;
    for(i = 1; i < n; ++i){								//参与route层剩余的其他层
        int index = layers[i];
        convolutional_layer next = net->layers[index];	//如果其他层与第一层的特征映射尺寸一样,则输出通道数累加
        if(next.out_w == first.out_w && next.out_h == first.out_h){
            layer.out_c += next.out_c;
        }else{
            layer.out_h = layer.out_w = layer.out_c = 0;//否则输出通道数和尺寸都设置为0
        }
    }

    return layer;
}
#include "route_layer.h"
#include "cuda.h"
#include "blas.h"

#include <stdio.h>

route_layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes)
{
    fprintf(stderr,"route ");
    route_layer l = {0};
    l.type = ROUTE;
    l.batch = batch;
    l.n = n;										//输入层的个数
    l.input_layers = input_layers;
    l.input_sizes = input_sizes;
    int i;
    int outputs = 0;
    for(i = 0; i < n; ++i){
        fprintf(stderr," %d", input_layers[i]);
        outputs += input_sizes[i];					//参与rout层的输入层的数目进行累加等到route的输出大学
    }
    fprintf(stderr, "\n");
    l.outputs = outputs;
    l.inputs = outputs;
    l.delta =  calloc(outputs*batch, sizeof(float));
    l.output = calloc(outputs*batch, sizeof(float));;

    l.forward = forward_route_layer;
    l.backward = backward_route_layer;
    #ifdef GPU
    l.forward_gpu = forward_route_layer_gpu;
    l.backward_gpu = backward_route_layer_gpu;

    l.delta_gpu =  cuda_make_array(l.delta, outputs*batch);
    l.output_gpu = cuda_make_array(l.output, outputs*batch);
    #endif
    return l;
}

void resize_route_layer(route_layer *l, network *net)
{
    int i;
    layer first = net->layers[l->input_layers[0]];
    l->out_w = first.out_w;
    l->out_h = first.out_h;
    l->out_c = first.out_c;
    l->outputs = first.outputs;
    l->input_sizes[0] = first.outputs;
    for(i = 1; i < l->n; ++i){
        int index = l->input_layers[i];
        layer next = net->layers[index];
        l->outputs += next.outputs;
        l->input_sizes[i] = next.outputs;
        if(next.out_w == first.out_w && next.out_h == first.out_h){
            l->out_c += next.out_c;
        }else{
            printf("%d %d, %d %d\n", next.out_w, next.out_h, first.out_w, first.out_h);
            l->out_h = l->out_w = l->out_c = 0;
        }
    }
    l->inputs = l->outputs;
    l->delta =  realloc(l->delta, l->outputs*l->batch*sizeof(float));
    l->output = realloc(l->output, l->outputs*l->batch*sizeof(float));

#ifdef GPU
    cuda_free(l->output_gpu);
    cuda_free(l->delta_gpu);
    l->output_gpu  = cuda_make_array(l->output, l->outputs*l->batch);
    l->delta_gpu   = cuda_make_array(l->delta,  l->outputs*l->batch);
#endif
    
}

//依次将参与的输入层一维化后放到 l.output对应的位置
//例如:输入层1:26*26*256         输入层2:26*26*128      则route输出 26*26*(256+128)
void forward_route_layer(const route_layer l, network net)
{
    int i, j;
    int offset = 0;
    for(i = 0; i < l.n; ++i){
        int index = l.input_layers[i];
        float *input = net.layers[index].output;
        int input_size = l.input_sizes[i];
        for(j = 0; j < l.batch; ++j){
			//输入:net.layers[index].output        输出:l.output
            copy_cpu(input_size, input + j*input_size, 1, l.output + offset + j*l.outputs, 1);
        }
        offset += input_size;
    }
}

void backward_route_layer(const route_layer l, network net)
{
    int i, j;
    int offset = 0;
    for(i = 0; i < l.n; ++i){
        int index = l.input_layers[i];
        float *delta = net.layers[index].delta;
        int input_size = l.input_sizes[i];
        for(j = 0; j < l.batch; ++j){
            axpy_cpu(input_size, 1, l.delta + offset + j*l.outputs, 1, delta + j*input_size, 1);
        }
        offset += input_size;
    }
}

#ifdef GPU
void forward_route_layer_gpu(const route_layer l, network net)
{
    int i, j;
    int offset = 0;
    for(i = 0; i < l.n; ++i){
        int index = l.input_layers[i];
        float *input = net.layers[index].output_gpu;
        int input_size = l.input_sizes[i];
        for(j = 0; j < l.batch; ++j){
            copy_gpu(input_size, input + j*input_size, 1, l.output_gpu + offset + j*l.outputs, 1);
        }
        offset += input_size;
    }
}

void backward_route_layer_gpu(const route_layer l, network net)
{
    int i, j;
    int offset = 0;
    for(i = 0; i < l.n; ++i){
        int index = l.input_layers[i];
        float *delta = net.layers[index].delta_gpu;
        int input_size = l.input_sizes[i];
        for(j = 0; j < l.batch; ++j){
            axpy_gpu(input_size, 1, l.delta_gpu + offset + j*l.outputs, 1, delta + j*input_size, 1);
        }
        offset += input_size;
    }
}
#endif
[net]batch=1subdivisions=1width=416height=416channels=3momentum=0.9decay=0.0005angle=0saturation = 1.5exposure = 1.5hue=.1
	learning_rate=0.001burn_in=1000max_batches = 500200policy=stepssteps=400000,450000scales=.1,.1
0	[convolutional]batch_normalize=1 filters=16   size=3 stride=1 pad=1 activation=leaky	416*416*16
1	[maxpool]size=2stride=2																	208*208*16
2	[convolutional]batch_normalize=1 filters=32   size=3 stride=1 pad=1 activation=leaky	208*208*32
3	[maxpool]size=2stride=2																	104*104*32
4	[convolutional]batch_normalize=1 filters=64   size=3 stride=1 pad=1 activation=leaky	104*104*64
5	[maxpool]size=2stride=2																	52*52*64
6	[convolutional]batch_normalize=1 filters=128  size=3 stride=1 pad=1 activation=leaky	52*52*128
7	[maxpool]size=2stride=2																	26*26*128
8	[convolutional]batch_normalize=1 filters=256  size=3 stride=1 pad=1 activation=leaky	26*26*256
9	[maxpool]size=2stride=2																	13*13*256
10	[convolutional]batch_normalize=1 filters=512  size=3 stride=1 pad=1 activation=leaky	13*13*512
11	[maxpool]size=2stride=1																	13*13*512
12	[convolutional]batch_normalize=1 filters=1024 size=3 stride=1 pad=1 activation=leaky	13*13*1024
13	[convolutional]batch_normalize=1 filters=256  size=1 stride=1 pad=1 activation=leaky	13*13*256
14	[convolutional]batch_normalize=1 filters=512  size=3 stride=1 pad=1 activation=leaky	13*13*512
15	[convolutional]size=1 stride=1 pad=1 filters=255 activation=linear						13*13*255							
16	[yolo]mask = 3,4,5anchors = 10,14,  23,27,  37,58,  81,82,  135,169,  344,319
	classes=80num=6jitter=.3ignore_thresh = .7truth_thresh = 1random=1
17	[route]layers = -4																		13*13*1024		12层还是13层?不知道yolo算不算
18	[convolutional]batch_normalize=1 filters=128  size=1 stride=1 pad=1 activation=leaky	13*13*128
19	[upsample]stride=2																		26*26*128
20	[route]layers = -1, 8																	26*26*(128+256)	 19层和8层作连接																
21	[convolutional]batch_normalize=1 filters=256  size=3 stride=1 pad=1 activation=leaky	26*26*256
22	[convolutional]size=1 stride=1 pad=1 filters=255 activation=linear						26*26*255
23	[yolo]mask = 1,2,3anchors = 10,14,  23,27,  37,58,  81,82,  135,169,  344,319
	classes=80num=6jitter=.3ignore_thresh = .7truth_thresh = 1random=1

猜你喜欢

转载自blog.csdn.net/wzw12315/article/details/81113793