基于caffe框架复现yolov3目标检测

网上有pytorch、tensorflow等框架实现的很多,但是使用caffe复现的几乎没有;或许是因为caffe框架逐渐没落了么?没办法,只要自己动手丰衣足食了!过程有点麻烦。。。。。。。。。。。。。。。。。

补充说明一下复现平台:Jetson-TX2、Ubuntu16.04 LTS

1 模型转换(模型已经上传百度云,在github上有链接,嫌麻烦的话,可以跳过该部分)

     可以借助一个模型转换的工具:https://github.com/marvis/pytorch-caffe-darknet-convert;(需要安装pytorch,安装自行百度解决)

但是这个github上介绍的是基于yolo与yolov2的,有以下几点需要注意:

    1)、这两个weights的存储方式与yolov3的存储方式有点不同;

    2)、yolov3上有upsample层在之前的版本上没有;

因此针对这些不同点我重新写了一个darknet2caffe.py的文件

[html]  view plain  copy
  1. <code class="language-html">import sys  
  2. sys.path.append('/home/ss/caffe/python')  
  3. import caffe  
  4. import numpy as np  
  5. from collections import OrderedDict  
  6. from cfg import *  
  7. from prototxt import *  
  8.   
  9. def darknet2caffe(cfgfile, weightfile, protofile, caffemodel):  
  10.     net_info = cfg2prototxt(cfgfile)  
  11.     save_prototxt(net_info , protofile, region=False)  
  12.   
  13.     net = caffe.Net(protofile, caffe.TEST)  
  14.     params = net.params  
  15.   
  16.     blocks = parse_cfg(cfgfile)  
  17.   
  18.     #Open the weights file  
  19.     fp = open(weightfile, "rb")  
  20.   
  21.     #The first 4 values are header information   
  22.     # 1. Major version number  
  23.     # 2. Minor Version Number  
  24.     # 3. Subversion number   
  25.     # 4. IMages seen   
  26.     header = np.fromfile(fp, dtype = np.int32, count = 5)  
  27.   
  28.     #fp = open(weightfile, 'rb')  
  29.     #header = np.fromfile(fp, count=5dtype=np.int32)  
  30.     #header = np.ndarray(shape=(5,),dtype='int32',buffer=fp.read(20))  
  31.     #print(header)  
  32.     buf = np.fromfile(fp, dtype = np.float32)  
  33.     #print(buf)  
  34.     fp.close()  
  35.   
  36.     layers = []  
  37.     layer_id = 1  
  38.     start = 0  
  39.     for block in blocks:  
  40.         if start >= buf.size:  
  41.             break  
  42.   
  43.         if block['type'] == 'net':  
  44.             continue  
  45.         elif block['type'] == 'convolutional':  
  46.             batch_normalize = int(block['batch_normalize'])  
  47.             if block.has_key('name'):  
  48.                 conv_layer_name = block['name']  
  49.                 bn_layer_name = '%s-bn' % block['name']  
  50.                 scale_layer_name = '%s-scale' % block['name']  
  51.             else:  
  52.                 conv_layer_name = 'layer%d-conv' % layer_id  
  53.                 bn_layer_name = 'layer%d-bn' % layer_id  
  54.                 scale_layer_name = 'layer%d-scale' % layer_id  
  55.   
  56.             if batch_normalize:  
  57.                 start = load_conv_bn2caffe(buf, start, params[conv_layer_name], params[bn_layer_name], params[scale_layer_name])  
  58.             else:  
  59.                 start = load_conv2caffe(buf, start, params[conv_layer_name])  
  60.             layer_id = layer_id+1  
  61.         elif block['type'] == 'connected':  
  62.             if block.has_key('name'):  
  63.                 fc_layer_name = block['name']  
  64.             else:  
  65.                 fc_layer_name = 'layer%d-fc' % layer_id  
  66.             start = load_fc2caffe(buf, start, params[fc_layer_name])  
  67.             layer_id = layer_id+1  
  68.         elif block['type'] == 'maxpool':  
  69.             layer_id = layer_id+1  
  70.         elif block['type'] == 'avgpool':  
  71.             layer_id = layer_id+1  
  72.         elif block['type'] == 'region':  
  73.             layer_id = layer_id + 1  
  74.         elif block['type'] == 'route':  
  75.             layer_id = layer_id + 1  
  76.         elif block['type'] == 'shortcut':  
  77.             layer_id = layer_id + 1  
  78.         elif block['type'] == 'softmax':  
  79.             layer_id = layer_id + 1  
  80.         elif block['type'] == 'cost':  
  81.             layer_id = layer_id + 1  
  82.     elif block['type'] == 'upsample':  
  83.         layer_id = layer_id + 1  
  84.         else:  
  85.             print('unknow layer type %s ' % block['type'])  
  86.             layer_id = layer_id + 1  
  87.     print('save prototxt to %s' % protofile)  
  88.     save_prototxt(net_info , protofile, region=True)  
  89.     print('save caffemodel to %s' % caffemodel)  
  90.     net.save(caffemodel)  
  91.   
  92. def load_conv2caffe(buf, start, conv_param):  
  93.     weight = conv_param[0].data  
  94.     bias = conv_param[1].data  
  95.     conv_param[1].data[...] = np.reshape(buf[start:start+bias.size], bias.shape);   start = start + bias.size  
  96.     conv_param[0].data[...] = np.reshape(buf[start:start+weight.size], weight.shape); start = start + weight.size  
  97.     return start  
  98.   
  99. def load_fc2caffe(buf, start, fc_param):  
  100.     weight = fc_param[0].data  
  101.     bias = fc_param[1].data  
  102.     fc_param[1].data[...] = np.reshape(buf[start:start+bias.size], bias.shape);   start = start + bias.size  
  103.     fc_param[0].data[...] = np.reshape(buf[start:start+weight.size], weight.shape); start = start + weight.size  
  104.     return start  
  105.   
  106.   
  107. def load_conv_bn2caffe(buf, start, conv_param, bn_param, scale_param):  
  108.     conv_weight = conv_param[0].data  
  109.     running_mean = bn_param[0].data  
  110.     running_var = bn_param[1].data  
  111.     scale_weight = scale_param[0].data  
  112.     scale_bias = scale_param[1].data  
  113.   
  114.       
  115.      
  116.     scale_param[1].data[...] = np.reshape(buf[start:start+scale_bias.size], scale_bias.shape); start = start + scale_bias.size  
  117.     #print scale_bias.size  
  118.     #print scale_bias  
  119.   
  120.     scale_param[0].data[...] = np.reshape(buf[start:start+scale_weight.size], scale_weight.shape); start = start + scale_weight.size  
  121.     #print scale_weight.size  
  122.   
  123.     bn_param[0].data[...] = np.reshape(buf[start:start+running_mean.size], running_mean.shape); start = start + running_mean.size  
  124.     #print running_mean.size  
  125.   
  126.     bn_param[1].data[...] = np.reshape(buf[start:start+running_var.size], running_var.shape); start = start + running_var.size  
  127.     #print running_var.size  
  128.   
  129.     bn_param[2].data[...] = np.array([1.0])  
  130.     conv_param[0].data[...] = np.reshape(buf[start:start+conv_weight.size], conv_weight.shape); start = start + conv_weight.size  
  131.     #print conv_weight.size  
  132.   
  133.     return start  
  134.   
  135. def cfg2prototxt(cfgfile):  
  136.     blocks = parse_cfg(cfgfile)  
  137.   
  138.     layers = []  
  139.     props = OrderedDict()   
  140.     bottom = 'data'  
  141.     layer_id = 1  
  142.     topnames = dict()  
  143.     for block in blocks:  
  144.         if block['type'] == 'net':  
  145.             props['name'] = 'Darkent2Caffe'  
  146.             props['input'] = 'data'  
  147.             props['input_dim'] = ['1']  
  148.             props['input_dim'].append(block['channels'])  
  149.             props['input_dim'].append(block['height'])  
  150.             props['input_dim'].append(block['width'])  
  151.             continue  
  152.         elif block['type'] == 'convolutional':  
  153.             conv_layer = OrderedDict()  
  154.             conv_layer['bottom'] = bottom  
  155.             if block.has_key('name'):  
  156.                 conv_layer['top'] = block['name']  
  157.                 conv_layer['name'] = block['name']  
  158.             else:  
  159.                 conv_layer['top'] = 'layer%d-conv' % layer_id  
  160.                 conv_layer['name'] = 'layer%d-conv' % layer_id  
  161.             conv_layer['type'] = 'Convolution'  
  162.             convolution_param = OrderedDict()  
  163.             convolution_param['num_output'] = block['filters']  
  164.             convolution_param['kernel_size'] = block['size']  
  165.             if block['pad'] == '1':  
  166.                 convolution_param['pad'] = str(int(convolution_param['kernel_size'])/2)  
  167.             convolution_param['stride'] = block['stride']  
  168.             if block['batch_normalize'] == '1':  
  169.                 convolution_param['bias_term'] = 'false'  
  170.             else:  
  171.                 convolution_param['bias_term'] = 'true'  
  172.             conv_layer['convolution_param'] = convolution_param  
  173.             layers.append(conv_layer)  
  174.             bottom = conv_layer['top']  
  175.   
  176.             if block['batch_normalize'] == '1':  
  177.                 bn_layer = OrderedDict()  
  178.                 bn_layer['bottom'] = bottom  
  179.                 bn_layer['top'] = bottom  
  180.                 if block.has_key('name'):  
  181.                     bn_layer['name'] = '%s-bn' % block['name']  
  182.                 else:  
  183.                     bn_layer['name'] = 'layer%d-bn' % layer_id  
  184.                 bn_layer['type'] = 'BatchNorm'  
  185.                 batch_norm_param = OrderedDict()  
  186.                 batch_norm_param['use_global_stats'] = 'true'  
  187.                 bn_layer['batch_norm_param'] = batch_norm_param  
  188.                 layers.append(bn_layer)  
  189.   
  190.                 scale_layer = OrderedDict()  
  191.                 scale_layer['bottom'] = bottom  
  192.                 scale_layer['top'] = bottom  
  193.                 if block.has_key('name'):  
  194.                     scale_layer['name'] = '%s-scale' % block['name']  
  195.                 else:  
  196.                     scale_layer['name'] = 'layer%d-scale' % layer_id  
  197.                 scale_layer['type'] = 'Scale'  
  198.                 scale_param = OrderedDict()  
  199.                 scale_param['bias_term'] = 'true'  
  200.                 scale_layer['scale_param'] = scale_param  
  201.                 layers.append(scale_layer)  
  202.   
  203.             if block['activation'] != 'linear':  
  204.                 relu_layer = OrderedDict()  
  205.                 relu_layer['bottom'] = bottom  
  206.                 relu_layer['top'] = bottom  
  207.                 if block.has_key('name'):  
  208.                     relu_layer['name'] = '%s-act' % block['name']  
  209.                 else:  
  210.                     relu_layer['name'] = 'layer%d-act' % layer_id  
  211.                 relu_layer['type'] = 'ReLU'  
  212.                 if block['activation'] == 'leaky':  
  213.                     relu_param = OrderedDict()  
  214.                     relu_param['negative_slope'] = '0.1'  
  215.                     relu_layer['relu_param'] = relu_param  
  216.                 layers.append(relu_layer)  
  217.             topnames[layer_id] = bottom  
  218.             layer_id = layer_id+1  
  219.         elif block['type'] == 'maxpool':  
  220.             max_layer = OrderedDict()  
  221.             max_layer['bottom'] = bottom  
  222.             if block.has_key('name'):  
  223.                 max_layer['top'] = block['name']  
  224.                 max_layer['name'] = block['name']  
  225.             else:  
  226.                 max_layer['top'] = 'layer%d-maxpool' % layer_id  
  227.                 max_layer['name'] = 'layer%d-maxpool' % layer_id  
  228.             max_layer['type'] = 'Pooling'  
  229.             pooling_param = OrderedDict()  
  230.             pooling_param['kernel_size'] = block['size']  
  231.             pooling_param['stride'] = block['stride']  
  232.             pooling_param['pool'] = 'MAX'  
  233.             if block.has_key('pad') and int(block['pad']) == 1:  
  234.                 pooling_param['pad'] = str((int(block['size'])-1)/2)  
  235.             max_layer['pooling_param'] = pooling_param  
  236.             layers.append(max_layer)  
  237.             bottom = max_layer['top']  
  238.             topnames[layer_id] = bottom  
  239.             layer_id = layer_id+1  
  240.         elif block['type'] == 'avgpool':  
  241.             avg_layer = OrderedDict()  
  242.             avg_layer['bottom'] = bottom  
  243.             if block.has_key('name'):  
  244.                 avg_layer['top'] = block['name']  
  245.                 avg_layer['name'] = block['name']  
  246.             else:  
  247.                 avg_layer['top'] = 'layer%d-avgpool' % layer_id  
  248.                 avg_layer['name'] = 'layer%d-avgpool' % layer_id  
  249.             avg_layer['type'] = 'Pooling'  
  250.             pooling_param = OrderedDict()  
  251.             pooling_param['kernel_size'] = 7  
  252.             pooling_param['stride'] = 1  
  253.             pooling_param['pool'] = 'AVE'  
  254.             avg_layer['pooling_param'] = pooling_param  
  255.             layers.append(avg_layer)  
  256.             bottom = avg_layer['top']  
  257.             topnames[layer_id] = bottom  
  258.             layer_id = layer_id+1  
  259.         elif block['type'] == 'region':  
  260.             if True:  
  261.                 region_layer = OrderedDict()  
  262.                 region_layer['bottom'] = bottom  
  263.                 if block.has_key('name'):  
  264.                     region_layer['top'] = block['name']  
  265.                     region_layer['name'] = block['name']  
  266.                 else:  
  267.                     region_layer['top'] = 'layer%d-region' % layer_id  
  268.                     region_layer['name'] = 'layer%d-region' % layer_id  
  269.                 region_layer['type'] = 'Region'  
  270.                 region_param = OrderedDict()  
  271.                 region_param['anchors'] = block['anchors'].strip()  
  272.                 region_param['classes'] = block['classes']  
  273.                 region_param['num'] = block['num']  
  274.                 region_layer['region_param'] = region_param  
  275.                 layers.append(region_layer)  
  276.                 bottom = region_layer['top']  
  277.             topnames[layer_id] = bottom  
  278.             layer_id = layer_id + 1  
  279.   
  280.         elif block['type'] == 'route':  
  281.             route_layer = OrderedDict()  
  282.         layer_name = str(block['layers']).split(',')  
  283.         #print(layer_name[0])  
  284.         bottom_layer_size = len(str(block['layers']).split(','))  
  285.         #print(bottom_layer_size)  
  286.         if(1 == bottom_layer_size):  
  287.                 prev_layer_id = layer_id + int(block['layers'])  
  288.                 bottom = topnames[prev_layer_id]  
  289.                 #topnames[layer_id] = bottom  
  290.         route_layer['bottom'] = bottom  
  291.         if(2 == bottom_layer_size):  
  292.         prev_layer_id1 = layer_id + int(layer_name[0])  
  293.         #print(prev_layer_id1)  
  294.         prev_layer_id2 = int(layer_name[1]) + 1  
  295.         print(topnames)  
  296.         bottom1 = topnames[prev_layer_id1]  
  297.         bottom2 = topnames[prev_layer_id2]  
  298.         route_layer['bottom'] = [bottom1, bottom2]  
  299.         if block.has_key('name'):  
  300.                 route_layer['top'] = block['name']  
  301.                 route_layer['name'] = block['name']  
  302.             else:  
  303.                 route_layer['top'] = 'layer%d-route' % layer_id  
  304.                 route_layer['name'] = 'layer%d-route' % layer_id  
  305.         route_layer['type'] = 'Concat'  
  306.         print(route_layer)  
  307.         layers.append(route_layer)  
  308.         bottom = route_layer['top']  
  309.         print(layer_id)  
  310.             topnames[layer_id] = bottom  
  311.         layer_id = layer_id + 1  
  312.   
  313.     elif block['type'] == 'upsample':  
  314.         upsample_layer = OrderedDict()  
  315.         print(block['stride'])  
  316.         upsample_layer['bottom'] = bottom  
  317.         if block.has_key('name'):  
  318.                 upsample_layer['top'] = block['name']  
  319.                 upsample_layer['name'] = block['name']  
  320.             else:  
  321.                 upsample_layer['top'] = 'layer%d-upsample' % layer_id  
  322.                 upsample_layer['name'] = 'layer%d-upsample' % layer_id  
  323.         upsample_layer['type'] = 'Upsample'  
  324.         upsample_param = OrderedDict()  
  325.         upsample_param['scale'] = block['stride']  
  326.         upsample_layer['upsample_param'] = upsample_param  
  327.         print(upsample_layer)  
  328.         layers.append(upsample_layer)  
  329.         bottom = upsample_layer['top']  
  330.         print('upsample:',layer_id)  
  331.             topnames[layer_id] = bottom  
  332.         layer_id = layer_id + 1  
  333.   
  334.         elif block['type'] == 'shortcut':  
  335.             prev_layer_id1 = layer_id + int(block['from'])  
  336.             prev_layer_id2 = layer_id - 1  
  337.             bottom1 = topnames[prev_layer_id1]  
  338.             bottom2topnames[prev_layer_id2]  
  339.             shortcut_layer = OrderedDict()  
  340.             shortcut_layer['bottom'] = [bottom1, bottom2]  
  341.             if block.has_key('name'):  
  342.                 shortcut_layer['top'] = block['name']  
  343.                 shortcut_layer['name'] = block['name']  
  344.             else:  
  345.                 shortcut_layer['top'] = 'layer%d-shortcut' % layer_id  
  346.                 shortcut_layer['name'] = 'layer%d-shortcut' % layer_id  
  347.             shortcut_layer['type'] = 'Eltwise'  
  348.             eltwise_param = OrderedDict()  
  349.             eltwise_param['operation'] = 'SUM'  
  350.             shortcut_layer['eltwise_param'] = eltwise_param  
  351.             layers.append(shortcut_layer)  
  352.             bottom = shortcut_layer['top']  
  353.    
  354.             if block['activation'] != 'linear':  
  355.                 relu_layer = OrderedDict()  
  356.                 relu_layer['bottom'] = bottom  
  357.                 relu_layer['top'] = bottom  
  358.                 if block.has_key('name'):  
  359.                     relu_layer['name'] = '%s-act' % block['name']  
  360.                 else:  
  361.                     relu_layer['name'] = 'layer%d-act' % layer_id  
  362.                 relu_layer['type'] = 'ReLU'  
  363.                 if block['activation'] == 'leaky':  
  364.                     relu_param = OrderedDict()  
  365.                     relu_param['negative_slope'] = '0.1'  
  366.                     relu_layer['relu_param'] = relu_param  
  367.                 layers.append(relu_layer)  
  368.             topnames[layer_id] = bottom  
  369.             layer_id = layer_id + 1             
  370.               
  371.         elif block['type'] == 'connected':  
  372.             fc_layer = OrderedDict()  
  373.             fc_layer['bottom'] = bottom  
  374.             if block.has_key('name'):  
  375.                 fc_layer['top'] = block['name']  
  376.                 fc_layer['name'] = block['name']  
  377.             else:  
  378.                 fc_layer['top'] = 'layer%d-fc' % layer_id  
  379.                 fc_layer['name'] = 'layer%d-fc' % layer_id  
  380.             fc_layer['type'] = 'InnerProduct'  
  381.             fc_param = OrderedDict()  
  382.             fc_param['num_output'] = int(block['output'])  
  383.             fc_layer['inner_product_param'] = fc_param  
  384.             layers.append(fc_layer)  
  385.             bottom = fc_layer['top']  
  386.   
  387.             if block['activation'] != 'linear':  
  388.                 relu_layer = OrderedDict()  
  389.                 relu_layer['bottom'] = bottom  
  390.                 relu_layer['top'] = bottom  
  391.                 if block.has_key('name'):  
  392.                     relu_layer['name'] = '%s-act' % block['name']  
  393.                 else:  
  394.                     relu_layer['name'] = 'layer%d-act' % layer_id  
  395.                 relu_layer['type'] = 'ReLU'  
  396.                 if block['activation'] == 'leaky':  
  397.                     relu_param = OrderedDict()  
  398.                     relu_param['negative_slope'] = '0.1'  
  399.                     relu_layer['relu_param'] = relu_param  
  400.                 layers.append(relu_layer)  
  401.             topnames[layer_id] = bottom  
  402.             layer_id = layer_id+1  
  403.         else:  
  404.             print('unknow layer type %s ' % block['type'])  
  405.             topnames[layer_id] = bottom  
  406.             layer_id = layer_id + 1  
  407.   
  408.     net_info = OrderedDict()  
  409.     net_info['props'] = props  
  410.     net_info['layers'] = layers  
  411.     return net_info  
  412.   
  413. if __name__ == '__main__':  
  414.     import sys  
  415.     if len(sys.argv) != 5:  
  416.         print('try:')  
  417.         print('python darknet2caffe.py tiny-yolo-voc.cfg tiny-yolo-voc.weights tiny-yolo-voc.prototxt tiny-yolo-voc.caffemodel')  
  418.         print('')  
  419.         print('please add name field for each block to avoid generated name')  
  420.         exit()  
  421.   
  422.     cfgfile = sys.argv[1]  
  423.     #net_info = cfg2prototxt(cfgfile)  
  424.     #print_prototxt(net_info)  
  425.     #save_prototxt(net_info, 'tmp.prototxt')  
  426.     weightfile = sys.argv[2]  
  427.     protofile = sys.argv[3]  
  428.     caffemodel = sys.argv[4]  
  429.     darknet2caffe(cfgfile, weightfile, protofile, caffemodel)</code>  

转换之后,就会得到yolov3.prototxt与yolov3.caffemodel.

2 搭建caffe环境

    首先caffe环境搭建自行百度解决,其次需要了解Yolov3里面有shortcut、route、upsample、yolo等这些层是caffe不支持的,但是shortcut可以用eltwise替换,route可以用concat替换,yolo只能自己写,upsample可以添加。这里添加upsample这一层的代码,代码地址:链接:https://pan.baidu.com/s/13GpoYoqKSCeFX0m0ves_fQ 密码:bwrd

    添加过程:把upsample_layer.hpp 放在include/caffe/layers下面;

                     把upsample_layer.cpp与upsample_layer.cu放在src/caffe/layers下面;

    往自己的caffe.proto里面添加UpsampleParameter相关参数;可以参考我上传的caffe.proto;

然后重新编译;编译成功之后就可以加载yolov3.prototxt与yolov3.caffemodel运行;

代码下载地址:https://github.com/ChenYingpeng/caffe-yolov3

补充说明:Windows也可以只要搭建了相应的环境,代码都是基于c/c++的,可以直接编译使用,自己已经测试过;

猜你喜欢

转载自blog.csdn.net/maweifei/article/details/81066578