一、prototxt
car.prototxt:原本的训练文件(A)
deploy.prototxt:GoogLeNet Caffe示例文件(B)
Inception_v1_float.prototxt:Inception v1 DNNDK示例文件(C)
1、data层(三者存在差异):
A: layer { name: "data" type:"MemoryData" top: "data" top: "label" memory_data_param { batch_size: 1 channels:3 height: 224 width: 224 } transform_param { crop_size: 224 mirror: false } } B: layer { name: "data" type: "Input" top: "data" input_param { shape: { dim: 10 dim: 3 dim: 224 dim: 224 } } } C: layer { name: "data" type: "ImageData" top: "data" top: "label" include { phase: TRAIN } transform_param { mirror: false mean_value: 104 mean_value: 117 mean_value: 123 } image_data_param { source: "./data/imagenet_256/calibration.txt" root_folder: "./data/imagenet_256/calibration_images/" batch_size: 10 shuffle: false new_height: 224 new_width: 224 } }
2、convolution_param中weight_filter的std属性(A无,B有):
3、inception_4a/output之后(A比B多出几层):
layer { name: "loss1/ave_pool" type: "Pooling" bottom: "inception_4a/output" top: "loss1/ave_pool" pooling_param { pool: AVE kernel_size: 5 stride: 3 } } layer { name: "loss1/conv" type: "Convolution" bottom: "loss1/ave_pool" top: "loss1/conv" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 128 kernel_size: 1 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0.2 } } } layer { name: "loss1/relu_conv" type: "ReLU" bottom: "loss1/conv" top: "loss1/conv" } layer { name: "loss1/fc" type: "InnerProduct" bottom: "loss1/conv" top: "loss1/fc" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } inner_product_param { num_output: 1024 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0.2 } } } layer { name: "loss1/relu_fc" type: "ReLU" bottom: "loss1/fc" top: "loss1/fc" } layer { name: "loss1/drop_fc" type: "Dropout" bottom: "loss1/fc" top: "loss1/fc" dropout_param { dropout_ratio: 0.7 } } layer { name: "model_loss1/classifier" type: "InnerProduct" bottom: "loss1/fc" top: "model_loss1/classifier" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } inner_product_param { num_output: 1232 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0 } } }
4、inception_4d/output之后(A比B多出几层):
layer { name: "loss2/ave_pool" type: "Pooling" bottom: "inception_4d/output" top: "loss2/ave_pool" pooling_param { pool: AVE kernel_size: 5 stride: 3 } } layer { name: "loss2/conv" type: "Convolution" bottom: "loss2/ave_pool" top: "loss2/conv" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } convolution_param { num_output: 128 kernel_size: 1 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0.2 } } } layer { name: "loss2/relu_conv" type: "ReLU" bottom: "loss2/conv" top: "loss2/conv" } layer { name: "loss2/fc" type: "InnerProduct" bottom: "loss2/conv" top: "loss2/fc" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } inner_product_param { num_output: 1024 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0.2 } } } layer { name: "loss2/relu_fc" type: "ReLU" bottom: "loss2/fc" top: "loss2/fc" } layer { name: "loss2/drop_fc" type: "Dropout" bottom: "loss2/fc" top: "loss2/fc" dropout_param { dropout_ratio: 0.7 } } layer { name: "model_loss2/classifier" type: "InnerProduct" bottom: "loss2/fc" top: "model_loss2/classifier" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } inner_product_param { num_output: 1232 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0 } } }
5、pool5/drop_7x7_s1之后(A与B存在差异):
A: layer { name: "fc/hash" type: "InnerProduct" bottom: "pool5/7x7_s1" top: "fc/hash" param { lr_mult: 10 decay_mult: 10 } param { lr_mult: 20 decay_mult: 0 } inner_product_param { num_output: 1024 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0 } } } layer { name: "fc_hash/relu" type: "ReLU" bottom: "fc/hash" top: "fc_hash/relu" } B: layer { name: "loss3/classifier" type: "InnerProduct" bottom: "pool5/7x7_s1" top: "loss3/classifier" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } inner_product_param { num_output: 1000 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0 } } } layer { name: "prob" type: "Softmax" bottom: "loss3/classifier" top: "prob" }