Mxnet训练自己的数据集并测试

使用Mxnet训练图片分类类器
1、准备数据:
(1)建立一个根目录然后,再为每一个类别的图片建立一个子文件夹,把每一类图片放入对应的子文件夹即可。
--root:
----class1
----class2
......
----classn
首先生成训练集和测试集的list,命令如下:

Python ~/mxnet/tools/im2rec.py --list True --recursive True --train-ratio 0.9 myData /home/xxx/root/

–list:当要生成list文件时,这个参数一定要设为True,表示当前用来生成的list文件;默认是生成rec文件;

–recursive:递归的遍历你的所有数据集,要设为True;

–train-ratio:用来将你的全部数据集拆分成两部分:训练集(train)和交叉验证集(val),具体多少作为训练集,多少作为验证集,就由这个参数来确定;

–test-ratio:同上,分成训练集和测试集两部分;

–exts:这个是你数据的后缀(注,这里我们一般说的图片数据),目前的MXNet只支持两种图片格式:jpg和jpeg。
执行完这个命令,你会发现生成两个文件:myData_train.lst和myData_val.lst
(2)生成rec文件:

python ~/mxnet/tools/im2rec.py –num-thread 4 –pass-through 1 myData /home/xxx/root/

–pass-through: 设为1,即跳过矩阵变换,否则会报错:未知的array type;

myData就是第一步中生成.lst文件的前缀,这里用它来生成rec;

执行完这条命令,你就会看到两个文件:myData_train.rec和myData_val.rec

2、加载训练数据以及验证数据集:

def get_iterators(batch_size, data_shape=(3, 224, 224)):
    train = mx.io.ImageRecordIter(
        path_imgrec         = './ld_train/my_images_train.rec',
        data_name           = 'data',
        label_name          = 'softmax_label',
        batch_size          = batch_size,
        data_shape          = data_shape,
        shuffle             = True,
        rand_crop           = True,
        rand_mirror         = True)
    val = mx.io.ImageRecordIter(
        path_imgrec         = './ld_train/my_images_val.rec',
        data_name           = 'data',
        label_name          = 'softmax_label',
        batch_size          = batch_size,
        data_shape          = data_shape,
        rand_crop           = False,
        rand_mirror         = False)
    return (train, val)
 train,val=get_iterators(128,(3,128,128))#指定Batch_size以及图片大小。

3、定义网络结构:
这里以resnet为例:

'''
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
Original author Wei Wu

Implemented the following paper:

Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Identity Mappings in Deep Residual Networks"
'''
import mxnet as mx
import numpy as np

def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True, bn_mom=0.9, workspace=256, memonger=False):
    """Return ResNet Unit symbol for building ResNet
    Parameters
    ----------
    data : str
        Input data
    num_filter : int
        Number of output channels
    bnf : int
        Bottle neck channels factor with regard to num_filter
    stride : tupe
        Stride used in convolution
    dim_match : Boolen
        True means channel number between input and output is the same, otherwise means differ
    name : str
        Base name of the operators
    workspace : int
        Workspace used in convolution operator
    """
    if bottle_neck:
        # the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
        bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
        act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
        weight = mx.symbol.Variable(name=name + '_conv1_weight', dtype=np.float32)
        weight = mx.symbol.Cast(data=weight, dtype=np.float16)
        conv1 = mx.sym.Convolution(data=act1, weight=weight, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0),
                                   no_bias=True, workspace=workspace, name=name + '_conv1')
        bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
        act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
        weight = mx.symbol.Variable(name=name + '_conv2_weight', dtype=np.float32)
        weight = mx.symbol.Cast(data=weight, dtype=np.float16)
        conv2 = mx.sym.Convolution(data=act2, weight=weight, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1),
                                   no_bias=True, workspace=workspace, name=name + '_conv2')
        bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
        act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
        weight = mx.symbol.Variable(name=name + '_conv3_weight', dtype=np.float32)
        weight = mx.symbol.Cast(data=weight, dtype=np.float16)
        conv3 = mx.sym.Convolution(data=act3, weight=weight, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
                                   workspace=workspace, name=name + '_conv3')
        if dim_match:
            shortcut = data
        else:
            weight = mx.symbol.Variable(name=name + '_sc_weight', dtype=np.float32)
            weight = mx.symbol.Cast(data=weight, dtype=np.float16)
            shortcut = mx.sym.Convolution(data=act1, weight=weight, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
                                            workspace=workspace, name=name+'_sc')
        if memonger:
            shortcut._set_attr(mirror_stage='True')
        return conv3 + shortcut
    else:
        bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
        act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
        weight = mx.symbol.Variable(name=name + '_conv1_weight', dtype=np.float32)
        weight = mx.symbol.Cast(data=weight, dtype=np.float16)
        conv1 = mx.sym.Convolution(data=act1, weight=weight, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
                                      no_bias=True, workspace=workspace, name=name + '_conv1')
        bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
        act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
        weight = mx.symbol.Variable(name=name + '_conv2_weight', dtype=np.float32)
        weight = mx.symbol.Cast(data=weight, dtype=np.float16)
        conv2 = mx.sym.Convolution(data=act2, weight=weight, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
                                      no_bias=True, workspace=workspace, name=name + '_conv2')
        if dim_match:
            shortcut = data
        else:
            weight = mx.symbol.Variable(name=name + '_sc_weight', dtype=np.float32)
            weight = mx.symbol.Cast(data=weight, dtype=np.float16)
            shortcut = mx.sym.Convolution(data=act1, weight=weight, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
                                            workspace=workspace, name=name+'_sc')
        if memonger:
            shortcut._set_attr(mirror_stage='True')
        return conv2 + shortcut

def resnet(units, num_stages, filter_list, num_classes, image_shape, bottle_neck=True, bn_mom=0.9, workspace=256, memonger=False):
    """Return ResNet symbol of
    Parameters
    ----------
    units : list
        Number of units in each stage
    num_stages : int
        Number of stage
    filter_list : list
        Channel size of each stage
    num_classes : int
        Ouput size of symbol
    dataset : str
        Dataset type, only cifar10 and imagenet supports
    workspace : int
        Workspace used in convolution operator
    """
    num_unit = len(units)
    assert(num_unit == num_stages)
    data = mx.sym.Variable(name='data')
    data = mx.symbol.Cast(data=data, dtype=np.float16)
    data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data')
    (nchannel, height, width) = image_shape
    weight = mx.symbol.Variable(name='conv0_weight', dtype=np.float32)
    weight = mx.symbol.Cast(data=weight, dtype=np.float16)
    if height <= 32:            # such as cifar10
        body = mx.sym.Convolution(data=data, weight=weight, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1),
                                  no_bias=True, name="conv0", workspace=workspace)
    else:                       # often expected to be 224 such as imagenet
        body = mx.sym.Convolution(data=data, weight=weight, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3),
                                  no_bias=True, name="conv0", workspace=workspace)
        body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
        body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
        body = mx.symbol.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')

    for i in range(num_stages):
        body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False,
                             name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, workspace=workspace,
                             memonger=memonger)
        for j in range(units[i]-1):
            body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2),
                                 bottle_neck=bottle_neck, workspace=workspace, memonger=memonger)
    bn1 = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn1')
    relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
    # Although kernel is not used here when global_pool=True, we should put one
    pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
    flat = mx.symbol.Flatten(data=pool1)
    weight = mx.symbol.Variable(name='fc1_weight', dtype=np.float32)
    bias = mx.symbol.Variable(name='fc1_bias', dtype=np.float32)
    weight = mx.symbol.Cast(data=weight, dtype=np.float16)
    bias = mx.symbol.Cast(data=bias, dtype=np.float16)
    fc1 = mx.symbol.FullyConnected(data=flat, weight=weight, bias=bias, num_hidden=num_classes, name='fc1')
    fc1 = mx.symbol.Cast(data=fc1, dtype=np.float32)
    return mx.symbol.SoftmaxOutput(data=fc1, name='softmax')

def get_symbol(num_classes, num_layers, image_shape, conv_workspace=256, **kwargs):
    """
    Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
    Original author Wei Wu
    """
    image_shape = [int(l) for l in image_shape.split(',')]
    (nchannel, height, width) = image_shape
    if height <= 28:
        num_stages = 3
        if (num_layers-2) % 9 == 0 and num_layers >= 164:
            per_unit = [(num_layers-2)//9]
            filter_list = [16, 64, 128, 256]
            bottle_neck = True
        elif (num_layers-2) % 6 == 0 and num_layers < 164:
            per_unit = [(num_layers-2)//6]
            filter_list = [16, 16, 32, 64]
            bottle_neck = False
        else:
            raise ValueError("no experiments done on num_layers {}, you can do it youself".format(num_layers))
        units = per_unit * num_stages
    else:
        if num_layers >= 50:
            filter_list = [64, 256, 512, 1024, 2048]
            bottle_neck = True
        else:
            filter_list = [64, 64, 128, 256, 512]
            bottle_neck = False
        num_stages = 4
        if num_layers == 18:
            units = [2, 2, 2, 2]
        elif num_layers == 34:
            units = [3, 4, 6, 3]
        elif num_layers == 50:
            units = [3, 4, 6, 3]
        elif num_layers == 101:
            units = [3, 4, 23, 3]
        elif num_layers == 152:
            units = [3, 8, 36, 3]
        elif num_layers == 200:
            units = [3, 24, 36, 3]
        elif num_layers == 269:
            units = [3, 30, 48, 8]
        else:
            raise ValueError("no experiments done on num_layers {}, you can do it youself".format(num_layers))

    return resnet(units       = units,
                  num_stages  = num_stages,
                  filter_list = filter_list,
                  num_classes = num_classes,
                  image_shape = image_shape,
                  bottle_neck = bottle_neck,
                  workspace   = conv_workspace)
resnet18=get_symbol(2,18,'3,128,128')

4、训练并保存模型:

# construct a callback function to save checkpoints
model_prefix = 'resnet18'
checkpoint = mx.callback.do_checkpoint(model_prefix)

mod = mx.mod.Module(symbol=lenet,context=mx.gpu(0))
mod.fit(train,
        eval_data=val,
        optimizer='sgd',
        optimizer_params={'learning_rate':0.001, 'momentum': 0.9},
        eval_metric='acc',
        num_epoch=30, 
        epoch_end_callback=checkpoint)

5、加载模型准备测试:

sym, arg_params, aux_params = mx.model.load_checkpoint('resnet18', 30)
mod = mx.mod.Module(symbol=sym, context=mx.gpu(0), label_names=None)
mod.bind(for_training=False, data_shapes=[('data', (1,3,128,128))], 
         label_shapes=mod._label_shapes)
mod.set_params(arg_params, aux_params, allow_missing=True)

6、测试数进行预测:

%matplotlib inline
import matplotlib.pyplot as plt
import cv2
import numpy as np
# define a simple data batch
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])

def get_image(path, show=False):
    # download and show the image
    img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
    if img is None:
         return None
    if show:
         plt.imshow(img)
         plt.axis('off')
    # convert into format (batch, RGB, width, height)
    img = cv2.resize(img, (128, 128))
    img = np.swapaxes(img, 0, 2)
    img = np.swapaxes(img, 1, 2)
    img = img[np.newaxis, :]
    return img

def predict(path):
    img = get_image(path, show=True)
    # compute the predict probabilities
    mod.forward(Batch([mx.nd.array(img)]))
    prob = mod.get_outputs()[0].asnumpy()
    # print the top-5
    prob = np.squeeze(prob)
    a = np.argsort(prob)[::-1]
    for i in a[0:5]:
        print((prob[i]))
predict('/home/test_images/cat.jpg')

猜你喜欢

转载自blog.csdn.net/Scythe666/article/details/83058528
今日推荐