caffe Python API整理

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/xiaoxu2050/article/details/82985354

目录

1、Packages导入

2、定义layer

3、生成prototxt文件

4、生成Solver文件

5、Model训练

6、访问layer输出值

7、net.params访问网络参数

8、二进制均值文件转python均值文件

9、图片预处理

10、自定义函数:参数/卷积结果可视化

11、自定义:训练过程Loss&Accuracy可视化


1、Packages导入

import sys
import os
caffe_root = './'  #指定caffe的根目录 
sys.path.insert(0, caffe_root + 'python')    #将caffe python接口文件路径添加到python path中

import caffe
from caffe import layers as L
from caffe import params as P

2、定义layer

使用python接口创建layer层时,输入参数应该符合caffe.proto中的层定义

  • lmdb/leveldb Data层定义
L.Data( source=lmdb,
        backend=P.Data.LMDB,
        batch_size=batch_size, ntop=2,
        transform_param=dict( crop_size=227,
                              mean_value=[104, 117, 123],
                              mirror=True   )     )

# 实例
data, label = L.Data( batch_size=batch_size, 
                      backend=P.Data.LMDB, 
                      source='mnist/mnist_test_lmdb',
                      transform_param=dict(scale=1./255), 
                      ntop=2  #指定返回两个top blob  )
  • HDF5 Data层定义
layer {
  name: "image"
  type: "HDF5Data"
  top: "image"
  include {
    phase: TRAIN
  }
  hdf5_data_param {
    source: "./training_data_paths.txt"
    batch_size: 64
  }
}


image = L.HDF5Data( hdf5_data_param={ 'source': './training_data_paths.txt',  
                                      'batch_size': 64                         },
                    include={'phase': caffe.TRAIN    }                              )
                     
                     
data, label = L.HDF5Data( batch_size=batch_size, 
                          source=source_path, 
                          ntop=2, # 设置输出top数量
                          include={'phase': caffe.TRAIN}        ) 

data, label = L.HDF5Data( batch_size=batch_size, 
                          source=source_path,
                          ntop=2, 
                          include={'phase': caffe.TEST}        )
  • ImageData Data层定义

适用于txt文件一行记录一张图片的数据源

L.ImageData(source=list_path,
            batch_size=batch_size,
            new_width=48,
            new_height=48,
            ntop=2,
            ransform_param=dict(crop_size=40,mirror=True)      )
  • Convloution层定义

L.Convolution(  bottom, 
                kernel_size=ks, 
                stride=stride,
                num_output=nout, 
                pad=pad, 
                group=group                   )


#实例
conv1 = L.Convolution( data_blob, 
                       kernel_size=5, 
                       num_output=20, 
                       weight_filler=dict(type='xavier')        )
  • LRN层定义
L.LRN(  bottom, 
        local_size=5, 
        alpha=1e-4, 
        beta=0.75        )
  • ReLU层定义
L.ReLU( bottom, 
        in_place=True        )

#实例
relu1 = L.ReLU(data_blob, in_place=True)
  • Pooling层定义

L.Pooling( bottom,
            pool=P.Pooling.MAX, 
            kernel_size=ks, 
            stride=stride            )

#实例
 pool1 = L.Pooling(conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
  • FullConnect层定义
L.InnerProduct( bottom, 
                num_output=nout                )
  • Dropout层定义
L.Dropout(  bottom, 
            in_place=True         )
  • Loss层定义
L.SoftmaxWithLoss(  bottom, 
                    label                 )

#example
loss =  L.SoftmaxWithLoss(score_blob, label)
  • Accuracy层定义
L.Accuracy( bottom,
            label            )

3、生成prototxt文件

caffe.NetSpec 是定义在caffe/net_spec.py 中的类,定义如下:

 """A NetSpec contains a set of Tops (assigned directly as attributes).
    Calling NetSpec.to_proto generates a NetParameter containing all of the
    layers needed to produce all of the assigned Tops, using the assigned
    names.
    NetSpec包括一系列的被赋值成key的Tops,调用NetSpec.to_proto生成NetParameter
    NetParameter包括生成所有tops的layer定义,NetParameter使用names做key
    
 """
def __init__(self):
        super(NetSpec, self).__setattr__('tops', OrderedDict())
    
    #方便使用.操作符调用
    def __setattr__(self, name, value):
        self.tops[name] = value

    def __getattr__(self, name):
        return self.tops[name]
    
    #方便使用[]访问字典的值
    def __setitem__(self, key, value):
        self.__setattr__(key, value)

    def __getitem__(self, item):
        return self.__getattr__(item)

    def to_proto(self):
        names = {v: k for k, v in six.iteritems(self.tops)}
        autonames = Counter()
        layers = OrderedDict()
        for name, top in six.iteritems(self.tops):
            top._to_proto(layers, names, autonames)
        net = caffe_pb2.NetParameter()
        net.layer.extend(layers.values())
        return net
from caffe import layers as L, params as P

#参数:lmdb  数据文件路径
#      batch_size 批处理大小
def lenet(lmdb, batch_size):
    # our version of LeNet: a series of linear and simple nonlinear transformations
    n = caffe.NetSpec()
    
    n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
                             transform_param=dict(scale=1./255), ntop=2)
    
    n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
    n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
    n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.fc1 =   L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.fc1, in_place=True)
    n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
    n.loss =  L.SoftmaxWithLoss(n.score, n.label)
    
    #返回proto文件
    return n.to_proto()

#通说str(proto_object)返回字符串格式网络定义    
with open('mnist/lenet_auto_train.prototxt', 'w') as f:
    f.write(str(lenet('mnist/mnist_train_lmdb', 64)))
    
with open('mnist/lenet_auto_test.prototxt', 'w') as f:
    f.write(str(lenet('mnist/mnist_test_lmdb', 100)))

4、生成Solver文件

from caffe.proto import caffe_pb2

# 创建solver_proto对象
s = caffe_pb2.SolverParameter()

s.train_net = 'train.prototxt'         # 训练配置文件
s.test_net.append('val.prototxt')      # 测试配置文件,可以有多个测试proto txt
s.test_interval = 782                   # 测试间隔
s.test_iter.append(313)                 # 测试迭代次数
s.max_iter = 78200                      # 最大迭代次数

s.base_lr = 0.001                       # 基础学习率
s.momentum = 0.9                        # momentum系数
s.weight_decay = 5e-4                   # 权值衰减系数
s.lr_policy = 'step'                    # 学习率衰减方法
s.stepsize=26067                        # 此值仅对step方法有效
s.gamma = 0.1                           # 学习率衰减指数
s.display = 782                         # 屏幕日志显示间隔
s.snapshot = 7820
s.snapshot_prefix = 'shapshot'
s.type = “SGD”                          # 优化算法
s.solver_mode = caffe_pb2.SolverParameter.GPU

#指定solver文件保存路径
path='/home/xxx/data/'
solver_file=path+'solver.prototxt'     
with open(solver_file, 'w') as f:
    f.write(str(s))

5、Model训练

# 训练设置
caffe.set_device(gpu_id) # 若不设置,默认为0
caffe.set_mode_gpu()    # 使用GPU
caffe.set_mode_cpu()    # 使用CPU

# 加载Solver,有两种常用方法
# 1. 无论模型中Slover类型是什么统一设置为SGD
solver = caffe.SGDSolver('/home/xxx/data/solver.prototxt') 
# 2. 根据solver的prototxt中solver_type读取,默认为SGD
solver = caffe.get_solver('/home/xxx/data/solver.prototxt')

# 训练模型
# 1.1 前向传播
solver.net.forward()  # train net
# 1.2 反向传播,计算梯度
solver.net.backward()
# 1.3 进行一轮测试
solver.test_nets[0].forward()  # test net (there can be more than one)
# 1.4 保存参数
solver.net.save('mymodel.caffemodel')

# 2. 进行一次完整的训练,包括前向传播+反向传播+根据梯度更新参数
solver.step(1)
# 3. 根据solver文件中设置进行完整model训练
solver.solve()

6、访问layer输出值

每层的输出值保存在OrderedDict类型的对象中,net.blobs,其典型shape是(batch_size, channel, height, width)

# 访问blob时使用blob名字做key值
# 返回值为numpy数组,shape(batch_size, Channels, H, W)
feat = net.blobs['conv1'].data[0, :36]

# 遍历网络的每一层,显示输出的shape
for layer_name, blob  in net.blobs.iteritems():
    print layer_name, '\t', str(blob.data.shape)

'''
输出
    data	(50, 3, 227, 227)
    conv1	(50, 96, 55, 55)
    pool1	(50, 96, 27, 27)
    norm1	(50, 96, 27, 27)
    conv2	(50, 256, 27, 27)
    pool2	(50, 256, 13, 13)
    norm2	(50, 256, 13, 13)
    conv3	(50, 384, 13, 13)
    conv4	(50, 384, 13, 13)
    conv5	(50, 256, 13, 13)
    pool5	(50, 256, 6, 6)
    fc6		(50, 4096)
    fc7		(50, 4096)
    fc8		(50, 1000)
    prob	(50, 1000)
'''

 7、net.params访问网络参数

      parameter值保存在OrderedDict类型对象中,net.params, weight用0索引,biase用1索引。weight的典型shape是(output_channels, input_channels, filter_height,filter_width)。biases的典型shape是(output_channels,)

'''
 the parameters are a list of [weights, biases]
 访问参数时,使用的layer层的名字做key值
 返回值为列表,0为权重,shape为(channels, h, w)
              1为偏置, shape为(channels)
'''
filters = net.params['conv1'][0].data

# 遍历网络参数
for layer_name, param in net.params.iteritems():
    print layer_name + '\t' + str(param[0].data.shape), str(param[1].data.shape)

'''
输出:
    conv1	(96, 3, 11, 11) (96,)
    conv2	(256, 48, 5, 5) (256,)
    conv3	(384, 256, 3, 3) (384,)
    conv4	(384, 192, 3, 3) (384,)
    conv5	(256, 192, 3, 3) (256,)
    fc6	(4096, 9216) (4096,)
    fc7	(4096, 4096) (4096,)
    fc8	(1000, 4096) (1000,)
'''

8、二进制均值文件转python均值文件

# 编写一个函数,将二进制的均值转换为python的均值
def convert_mean(binMean,npyMean):
    #创建一个BlobProto对象
    blob = caffe.proto.caffe_pb2.BlobProto()
    bin_mean = open(binMean, 'rb' ).read()
    #将二进制数据解析为BlobProto格式
    blob.ParseFromString(bin_mean)
    #blobProto对象转换为np数组
    arr = np.array( caffe.io.blobproto_to_array(blob) )
    npy_mean = arr[0]
    np.save(npyMean, npy_mean)

# 调用函数转换均值
binMean='examples/cifar10/mean.binaryproto'
npyMean='examples/cifar10/mean.npy'
convert_mean(binMean,npyMean)

9、图片预处理

# 创建一个名字叫‘data’图片转换器
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) 
# transformer会将channels变成最外面的维度, 即 (H,W,C) 变成(C, W, C)
transformer.set_transpose('data', (2,0,1))  
transformer.set_mean('data', mu)            # 每个通道上减去均值
transformer.set_raw_scale('data', 255)      # 从[0, 1]的范围放大到[0, 255]
transformer.set_channel_swap('data', (2,1,0))  #修改通道顺序,从RGB变成BG

# 读取图片
# caffe.io.load_image读取图片值的范围是0-1,cv2.imread读取图片值的范围是0-255
image = caffe.io.load_image(caffe_root + 'examples/images/cat.jpg')
# transformer进行图片预处理,包括图片值转换到0-255
transformed_image = transformer.preprocess('data', image)

10、自定义函数:参数/卷积结果可视化

import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import caffe
%matplotlib inline

plt.rcParams['figure.figsize'] = (8, 8)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'

def show_data(data, padsize=1, padval=0):
"""Take an array of shape (n, height, width) or (n, height, width, 3)
       and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)"""
    # data归一化
    data -= data.min()
    data /= data.max()
    
    # 根据data中图片数量data.shape[0],计算最后输出时每行每列图片数n
    n = int(np.ceil(np.sqrt(data.shape[0])))
    # padding = ((图片个数维度的padding),(图片高的padding), (图片宽的padding), ....)
    padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
    data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
    
    # 先将padding后的data分成n*n张图像
    data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
    # 再将(n, W, n, H)变换成(n*w, n*H)
    data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
    plt.figure()
    plt.imshow(data,cmap='gray')
    plt.axis('off')

# 示例:显示第一个卷积层的输出数据和权值(filter)
print net.blobs['conv1'].data[0].shape
show_data(net.blobs['conv1'].data[0])
print net.params['conv1'][0].data.shape
show_data(net.params['conv1'][0].data.reshape(32*3,5,5))

11、自定义:训练过程Loss&Accuracy可视化

import matplotlib.pyplot as plt  
import caffe   
caffe.set_device(0)  
caffe.set_mode_gpu()   
# 使用SGDSolver,即随机梯度下降算法  
solver = caffe.SGDSolver('/home/xxx/mnist/solver.prototxt')  
  
# 等价于solver文件中的max_iter,即最大解算次数  
niter = 10000 

# 每隔100次收集一次loss数据  
display= 100  
  
# 每次测试进行100次解算 
test_iter = 100

# 每500次训练进行一次测试
test_interval =500
  
#初始化 
train_loss = zeros(ceil(niter * 1.0 / display))   
test_loss = zeros(ceil(niter * 1.0 / test_interval))  
test_acc = zeros(ceil(niter * 1.0 / test_interval))  
  
# 辅助变量  
_train_loss = 0; _test_loss = 0; _accuracy = 0  
# 进行解算  
for it in range(niter):  
    # 进行一次解算  
    solver.step(1)  
    # 统计train loss  
    _train_loss += solver.net.blobs['SoftmaxWithLoss1'].data  
    if it % display == 0:  
        # 计算平均train loss  
        train_loss[it // display] = _train_loss / display  
        _train_loss = 0  
  
    if it % test_interval == 0:  
        for test_it in range(test_iter):  
            # 进行一次测试  
            solver.test_nets[0].forward()  
            # 计算test loss  
            _test_loss += solver.test_nets[0].blobs['SoftmaxWithLoss1'].data  
            # 计算test accuracy  
            _accuracy += solver.test_nets[0].blobs['Accuracy1'].data  
        # 计算平均test loss  
        test_loss[it / test_interval] = _test_loss / test_iter  
        # 计算平均test accuracy  
        test_acc[it / test_interval] = _accuracy / test_iter  
        _test_loss = 0  
        _accuracy = 0  
  
# 绘制train loss、test loss和accuracy曲线  
print '\nplot the train loss and test accuracy\n'  
_, ax1 = plt.subplots()  
ax2 = ax1.twinx()  
  
# train loss -> 绿色  
ax1.plot(display * arange(len(train_loss)), train_loss, 'g')  
# test loss -> 黄色  
ax1.plot(test_interval * arange(len(test_loss)), test_loss, 'y')  
# test accuracy -> 红色  
ax2.plot(test_interval * arange(len(test_acc)), test_acc, 'r')  
  
ax1.set_xlabel('iteration')  
ax1.set_ylabel('loss')  
ax2.set_ylabel('accuracy')  
plt.show()

参考文档:

1、https://blog.csdn.net/langb2014/article/details/53082704

2、http://nbviewer.jupyter.org/github/BVLC/caffe/blob/master/examples/00-classification.ipynb

3、http://nbviewer.jupyter.org/github/BVLC/caffe/blob/master/examples/net_surgery.ipynb

4、http://nbviewer.jupyter.org/github/BVLC/caffe/blob/master/examples/01-learning-lenet.ipynb

猜你喜欢

转载自blog.csdn.net/xiaoxu2050/article/details/82985354