1.数据层
#lamb 数据
layer {
name: "left_eye"
type: "Data"
top: "data_left_eye"
include {
phase: TRAIN
}
transform_param {
scale: 0.00390625
}
data_param {
source: "left_eye_regression/lmdb/train_data_npd"
batch_size: 64
backend: LMDB
}
}
layer {
name: "left_eye"
type: "Data"
top: "label_left_eye"
include {
phase: TRAIN
}
data_param {
source: "left_eye_regression/lmdb/train_label_npd"
batch_size: 64
backend: LMDB
}
}
layer {
name: "left_eye"
type: "Data"
top: "data_left_eye"
include {
phase: TEST
}
transform_param {
scale: 0.00390625
}
data_param {
source: "left_eye_regression/lmdb/test_data_npd"
batch_size: 64
backend: LMDB
}
}
layer {
name: "left_eye"
type: "Data"
top: "label_left_eye"
include {
phase: TEST
}
data_param {
source: "left_eye_regression/lmdb/test_label_npd"
batch_size: 64
backend: LMDB
}
}
#hdf5 数据
layer {
name: "data"
type: "HDF5Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
hdf5_data_param {
source: "examples/hdf5_classification/data/train.txt"
batch_size: 10
}
}
layer {
name: "data"
type: "HDF5Data"
top: "data"
top: "label"
include {
phase: TEST
}
hdf5_data_param {
source: "examples/hdf5_classification/data/test.txt"
batch_size: 10
}
}
2.其他层
#卷积层
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 32
kernel_size: 3
stride: 1
(pad: 2)
weight_filler {
type: "xavier"
(type: "gaussian"
std: 0.0001)
}
bias_filler {
type: "constant"
}
}
}
#池化层
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX/AVE
kernel_size: 2
stride: 2
}
}
#激活层
layer {
name: "relu1"
type: "ReLU"
bottom: "pool1"
top: "pool1"
}
layer {
name: "prelu"
type: "PReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "tanh4"
type: "TanH"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool3"
top: "ip1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 256
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
#局部响应归一化层
layer {
name: "norm2"
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
local_size: 3
alpha: 5e-05
beta: 0.75
norm_region: WITHIN_CHANNEL
}
}
#dropout 层
layer {
name: "drop7"
type: "Dropout"
bottom: "fc7-conv"
top: "fc7-conv"
dropout_param {
dropout_ratio: 0.5
}
}
#sigmoid 层
layer {
name: "Sigmoid1"
type: "Sigmoid"
bottom: "pool1"
top: "Sigmoid1"
}
# Softmax 层
layer {
name: "prob"
type: "Softmax"
bottom: "ip1"
top: "prob"
}
#Softmax 损失
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip1"
bottom: "label"
top: "loss"
}
# accuracy 准确率
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip1"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
3.特殊 层
#维度分离层 axis:分离的维度 slice_point: 分层几份(slice_point个数 = top个数 - 1)
不写 slice_point 代表 label1 和label2 均分
slice_point :10 [:10] 选择[0-10]给label1 剩余的给label2
layer {
name: "slice"
type: "Slice"
bottom: "label"
top: "label1"
top: "label2"
slice_param {
axis: 1
slice_point: 10
}
}
# label1 是 [0-10] label2 是 [10-20] label3 是剩余的。
layer {
name: "slice"
type: "Slice"
bottom: "label"
top: "label1"
top: "label2"
top: "label3"
slice_param {
axis: 1
slice_point: 10
slice_point: 20
}
}
# concat 连接 层
layer {
name: "data_all"
type: "Concat"
bottom: "data_left_eye"
bottom: "data_right_eye"
bottom: "data_nose"
bottom: "data_mouth"
top: "data_all"
concat_param {
axis: 1
}
}
layer {
name: "label_all"
type: "Concat"
bottom: "label_left_eye"
bottom: "label_right_eye"
bottom: "label_nose"
bottom: "label_mouth"
top: "label_all"
concat_param {
axis: 1
}
}
#reshape 层
layer {
name: "reshape"
type: "Reshape"
bottom: "input"
top: "output"
reshape_param {
shape {
dim: 0 # copy the dimension from below
dim: 2
dim: 3
dim: -1 # infer it from the other dimensions
}
}
}
#batch normilizatiosn
layer {
name: "BatchNorm1"
type: "BatchNorm"
bottom: "conv1"
top: "conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "scale1"
type: "Scale"
bottom: "conv1"
top: "conv1"
scale_param {
bias_term: true
}
}
4.loss 层
caffe 常用损失:
MULTINOMIAL_LOGISTIC_LOSS 多分类逻辑损失
SIGMOID_CROSS_ENTROPY_LOSS sigmoid交叉熵损失
SOFTMAX_LOSS softmax损失
EUCLIDEAN_LOSS 平方差损失
HINGE_LOSS hinge损失 svm
INFOGAIN_LOSS
#平方差Loss
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
#多 loss 可以设置权重
layer {
name: "loss1"
type: "EuclideanLoss"
bottom: "out1"
bottom: "label1"
top: "loss1"
loss_weight:0.4
}
layer {
name: "loss2"
type: "EuclideanLoss"
bottom: "out2"
bottom: "label2"
top: "loss2"
loss_weight:0.6
}
5.测试数据层
layer
{
name: "data"
type: "MemoryData"
top: "data_all"
top: "label"
memory_data_param
{
batch_size: 1
channels: 12
height: 36
width: 48
}
transform_param
{
scale: 0.00390625
}
}
获得多个图片通道融合后的数据。
Mat mats[4];
vector<Rect> rect4;
for (int k = 0; k < 4; k++)
{
float x, y;
if (k == 3) {
x = (som.points[k].x + som.points[k + 1].x) / 2.0 * Scale;
y = (som.points[k].y + som.points[k + 1].y) / 2.0 * Scale;
}
else
{
x = som.points[k].x * Scale;
y = som.points[k].y * Scale;
}
x = x - 24;
if (k == 2)
{
y = y - 12;
}
else if (k == 3)
{
y = y - 20;
}
else
{
y = y - 24;
}
checkxy(x, y);
Rect rect(x, y, 48, 36);
rect4.push_back(rect);
Mat roi = img(rect);
//imshow("test", roi);
//waitKey();
mats[k] = roi;
}
vector<Mat> dd;
Mat all_data(36, 48, CV_8UC(12));
//mats 数组融合 4代表需要融合的图片张数。
merge(mats, 4, all_data);