常见神经网络(MLP,CNN,RNN,GAN,SSD)介绍及Mxnet实现

前言

本文使用 Python 3,依据代码可使用 Nvidia GPU 进行硬件加速,关于 Mxnet, TensorFlow 和各模块的安装不多复述。

一、Mxnet

相对于 TensorFlow 以及其他深度学习框架,Mxnet 具备可读性强、易学,以及并行效率高、节省内存等优点。缺点是使用该开源框架的项目并不多见,不便于代码交流和学习。

1) 基础代码

#导入所需模块
import numpy as np
import mxnet as mx
from mxnet import nd, autograd, gluon
import gluonbook as gb

# 基础公式
mx.gpu(0)    #在0位置的GPU
mx.random.seed(1)    #设置随机数种子
mx.init.Normal(0.02)    #正态分布参数初始化
mx.init.Xavier()    #Xavier参数初始化
nd.log(X)    #log公式
nd.sum(X)    #求和公式(有nan存在时返回nan)
nd.nansum(X)    #求和公式(无视nan)
nd.squeeze(X)    #降维公式
nd.tanh(Z)    #tanh公式
nd.relu(Z)    #relu公式
nd.sigmoid(Z)    #sigmoid公式
nd.softmax(Z)    #softmax公式
nd.log_softmax(Z)    #log_softmax公式
nd.dot(W,X)    #矩阵乘积
nd.norm(X)    #空间长度
nd.argmax(Z,axis=1)    #返回最大值所在行
nd.random_normal(shape=(2,3,5))    #生成正态分布随机数矩阵
nd.zeros(shape=(2,3,5))     #生成零矩阵
nd.transpose(X,(1,0,2))    #转置维度
nd.reshape(X,(2,3,5))    #重置维度大小
nd.tile(X,(1,2,3))    #矩阵堆叠
nd.one_hot(X,10)    #转换为正整数one_hot矩阵

# ND矩阵类公式
W1 = nd.array([1],ctx=mx.gpu(0))    #转换为ND矩阵
W1.asscalar()    #ND矩阵转换为数值
W1.asnumpy()    #ND矩阵转换为Numpy矩阵
W1.expand_dims(axis=0)    #在指定位置添加维度
W1.as_in_context(gpu(0))    #将数据转至GPU
W1.attach_grad()    #参数标记
W1.grad    #梯度(训练时产生,需在训练开始前标记参数)

# 卷积神经网络相关
net = gluon.nn.Sequential()
net.add(gluon.nn.Conv2D(channels=6, kernel_size=(5,3), strides=2, padding=1, activation='relu'),    #卷积层
		gluon.nn.BatchNorm(),    #批量归一化层
		gluon.nn.MaxPool2D(pool_size=2, strides=2, padding=0),    #池化层
		gluon.nn.GlobalAvgPool2D(),    #全局池化层
		gluon.nn.Dense(10,activation='relu'),    #全连接层
		gluon.nn.Dropout(0.5),    #丢弃层
		gluon.nn.Flatten())    #平铺层
net.initialize(force_reinit=True, init=init.Xavier())    #初始化

# 模型训练()
with autograd.record():    #开始记录梯度
	y_hat = net(X)
	loss = gluon.loss.SoftmaxCrossEntropyLoss()(y_hat, y)    #计算损失函数
loss.backward()    #从损失函数中计算梯度
gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1}).step(batch_size=64)    #更新梯度

# GluonBook公式包
gb.try_gpu()    #检测GPU/CPU

2) 多层感知机 (MLP实现)

在这里插入图片描述
在使用 MLP (Multi-Layer Perceptron) 处理图像时,需要将图形矩阵重新定义形状为向量。

# 导入所需模块
import numpy as np
import mxnet as mx
from mxnet import nd, autograd, gluon
import gluonbook as 
from sklearn.datasets import load_boston
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split

# 加载数据
ctx = gb.try_gpu()
boston = load_boston()
X_train,X_test,y_train,y_test = train_test_split(boston.data,boston.target,test_size=0.1)
X_train = nd.array(scale(X_train),ctx=ctx)
X_test = nd.array(scale(X_test),ctx=ctx)
y_train = nd.array(scale(y_train.reshape((-1,1))),ctx=ctx)
y_test = nd.array(scale(y_test.reshape((-1,1))),ctx=ctx)

# 定义参数
iters = 10000
lr = 0.1
num_inputs = X_train.shape[1]
num_hiddens = 25
num_outputs = y_train.shape[1]
W1 = nd.random_normal(shape=(num_inputs,num_hiddens),ctx=ctx)
b1 = nd.random_normal(shape=num_hiddens,ctx=ctx)
W2 = nd.random_normal(shape=(num_hiddens,num_outputs),ctx=ctx)
b2 = nd.random_normal(shape=num_outputs,ctx=ctx)

# 模型训练
params = [W1, b1, W2, b2]
for param in params:
    param.attach_grad()
for i in range(iters):
    with autograd.record():
        Z1 = nd.tanh(nd.dot(X_train,W1)+b1)
        Z2 = nd.sigmoid(nd.dot(Z1,W2)+b2)
        train_loss = nd.sqrt(nd.mean(nd.square(Z2-y_train)))
    train_loss.backward()
	for param in params:
        param[:] = param - lr * param.grad
	if i % 100 == 0: 
        Z1 = nd.tanh(nd.dot(X_test,W1)+b1)
        Z2 = nd.sigmoid(nd.dot(Z1,W2)+b2)
        test_loss = nd.sqrt(nd.mean(nd.square(Z2-y_test)))
        print('epoch: %d, train loss: %.6f, test loss: %.6f'%(i,train_loss.asscalar(),test_loss.asscalar()))

3) 卷积神经网络 (LeNet/AlexNet/VGG/NiN/GoogLeNet/ResNet实现)

在这里插入图片描述
在基础架构不变的基础上,通过修改卷积层数量、通道数量、卷积核大小等参数,生成从简单到复杂的 CNN (Convolutional Neutral Network) 模型,如 LeNet, AlexNet, VGG, NiN, GoogLeNet, ResNet。

LeNet 的结构即如图所示。AlexNet 在 LeNet 基础上增加卷积层,放大通道数量和卷积核大小,使用 ReLU 激活函数,更为精细复杂。而相对于 AlexNet,VGG 增加了卷积层数量,并将每一个池化层和该层之前的一个或数个完全相同的卷积层命名为一个区块,区块与区块的组合搭配配上和 AlexNet 中一致的全连接层,成为 VGG。NiN 则一改前三者通过抽取空间特征而后由全连接层输出分类结果的架构,在 VGG 的基础上,将每一个区块中的重复卷积层的卷积核大小改为1×1,并将全连接层改为全局池化层,直接输出结果。GoogLeNet 吸收了 NiN 网络嵌套网络的思想并在此基础上进行改进,区块中多线并行,全局池化后使用单连接层进行分类。ResNet 引入残差块概念,使得梯度更新能绕过中间层直接到达更接近输入层的区块,收敛更快。

# 导入所需模块
import sys, time
import mxnet as mx
from mxnet import autograd, nd, gluon, init
from mxnet.gluon import nn
import gluonbook as gb

# 搭建网络架构
def model(model_type):
	if model_type == 'lenet':
		net = nn.Sequential()
		net.add(nn.Conv2D(channels=6, kernel_size=5, activation='sigmoid'),
		        nn.MaxPool2D(pool_size=2, strides=2),
		        nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'),
		        nn.MaxPool2D(pool_size=2, strides=2),
		        nn.Dense(120, activation='sigmoid'),
		        nn.Dense(84, activation='sigmoid'), 
		        nn.Dense(10))
	elif model_type == 'alexnet':
		net = nn.Sequential()
		net.add(nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'),
		        nn.MaxPool2D(pool_size=3, strides=2),
		        nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'),
		        nn.MaxPool2D(pool_size=3, strides=2),
		        nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),
		        nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),
		        nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'),
		        nn.MaxPool2D(pool_size=3, strides=2),
		        nn.Dense(4096, activation="relu"), nn.Dropout(0.5),
		        nn.Dense(4096, activation="relu"), nn.Dropout(0.5),
		        nn.Dense(10))
	elif model_type == 'vgg':
		def vgg_block(num_convs, num_channels):
		    blk = nn.Sequential()
		    for _ in range(num_convs):
		        blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu'))
		    blk.add(nn.MaxPool2D(pool_size=2, strides=2))
		    return blk
		def vgg(conv_arch):
		    net = nn.Sequential()
		    for (num_convs, num_channels) in conv_arch:
		        net.add(vgg_block(num_convs, num_channels))
		    net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5),
		            nn.Dense(4096, activation='relu'), nn.Dropout(0.5),
		            nn.Dense(10))
		    return net
		conv_arch = ((1, 16), (1, 32), (2, 64), (2, 128), (2, 128))
		net = vgg(conv_arch)
	elif model_type == 'nin':
		def nin_block(num_channels, kernel_size, strides, padding):
		    blk = nn.Sequential()
		    blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'),
		            nn.Conv2D(num_channels, kernel_size=1, activation='relu'),
		            nn.Conv2D(num_channels, kernel_size=1, activation='relu'))
		    return blk
		net = nn.Sequential()
		net.add(nin_block(96, kernel_size=11, strides=4, padding=0),
		        nn.MaxPool2D(pool_size=3, strides=2),
		        nin_block(256, kernel_size=5, strides=1, padding=2),
		        nn.MaxPool2D(pool_size=3, strides=2),
		        nin_block(384, kernel_size=3, strides=1, padding=1),
		        nn.MaxPool2D(pool_size=3, strides=2), nn.Dropout(0.5),
		        nin_block(10, kernel_size=3, strides=1, padding=1),
		        nn.GlobalAvgPool2D(),
		        nn.Flatten())
	elif model_type == 'googlenet':
		class Inception(nn.Block):
		    def __init__(self, c1, c2, c3, c4, **kwargs):
		        super(Inception, self).__init__(**kwargs)
		        self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu')
		        self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu')
		        self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu')
		        self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu')
		        self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu')
		        self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1)
		        self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu')
		    def forward(self, x):
		        p1 = self.p1_1(x)
		        p2 = self.p2_2(self.p2_1(x))
		        p3 = self.p3_2(self.p3_1(x))
		        p4 = self.p4_2(self.p4_1(x))        
		        return nd.concat(p1, p2, p3, p4, dim=1)  # 在通道维上连结输出。
		b1 = nn.Sequential()
		b1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'),
		       nn.MaxPool2D(pool_size=3, strides=2, padding=1))
		b2 = nn.Sequential()
		b2.add(nn.Conv2D(64, kernel_size=1),
		       nn.Conv2D(192, kernel_size=3, padding=1),
		       nn.MaxPool2D(pool_size=3, strides=2, padding=1))
		b3 = nn.Sequential()
		b3.add(Inception(64, (96, 128), (16, 32), 32),
		       Inception(128, (128, 192), (32, 96), 64),
		       nn.MaxPool2D(pool_size=3, strides=2, padding=1))
		b4 = nn.Sequential()
		b4.add(Inception(192, (96, 208), (16, 48), 64),
		       Inception(160, (112, 224), (24, 64), 64),
		       Inception(128, (128, 256), (24, 64), 64),
		       Inception(112, (144, 288), (32, 64), 64),
		       Inception(256, (160, 320), (32, 128), 128),
		       nn.MaxPool2D(pool_size=3, strides=2, padding=1))
		b5 = nn.Sequential()
		b5.add(Inception(256, (160, 320), (32, 128), 128),
		       Inception(384, (192, 384), (48, 128), 128),
		       nn.GlobalAvgPool2D())
		net = nn.Sequential()
		net.add(b1, b2, b3, b4, b5, nn.Dense(10))
	elif model_type == 'resnet':
		class Residual(nn.Block):
		    def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):
		        super(Residual, self).__init__(**kwargs)
		        self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1,strides=strides)
		        self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)
		        if use_1x1conv:
		            self.conv3 = nn.Conv2D(num_channels, kernel_size=1,strides=strides)
		        else:
		            self.conv3 = None
		        self.bn1 = nn.BatchNorm()
		        self.bn2 = nn.BatchNorm()
		    def forward(self, X):
		        Y = nd.relu(self.bn1(self.conv1(X)))
		        Y = self.bn2(self.conv2(Y))
		        if self.conv3:
		            X = self.conv3(X)
		        return nd.relu(Y + X)
		def resnet_block(num_channels, num_residuals, first_block=False):
		    blk = nn.Sequential()
		    for i in range(num_residuals):
		        if i == 0 and not first_block:
		            blk.add(Residual(num_channels, use_1x1conv=True, strides=2))
		        else:
		            blk.add(Residual(num_channels))
		    return blk
		net = nn.Sequential()
		net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),
		        nn.BatchNorm(), nn.Activation('relu'),
		        nn.MaxPool2D(pool_size=3, strides=2, padding=1))
		net.add(resnet_block(64, 2, first_block=True),
		        resnet_block(128, 2),
		        resnet_block(256, 2),
		        resnet_block(512, 2))
		net.add(nn.GlobalAvgPool2D(), nn.Dense(10))
	return net

# 载入数据及定义参数
ctx = gb.try_gpu()
num_epochs = 20
model_type = 'lenet'
if model_type == 'lenet': 
	lr = 0.8 
	batch_size = 128
	train_iter, test_iter = gb.load_data_fashion_mnist(batch_size=batch_size)
if model_type == 'alexnet': 
	lr = 0.01
	batch_size = 128
	train_iter, test_iter = gb.load_data_fashion_mnist(batch_size=batch_size,resize=224)
if model_type == 'vgg': 
	lr = 0.05
	batch_size = 64
	train_iter, test_iter = gb.load_data_fashion_mnist(batch_size=batch_size,resize=224)
if model_type == 'nin': 
	lr = 0.1
	batch_size = 128
	train_iter, test_iter = gb.load_data_fashion_mnist(batch_size=batch_size,resize=224)
if model_type == 'googlenet': 
	lr = 0.1
	batch_size = 128
	train_iter, test_iter = gb.load_data_fashion_mnist(batch_size=batch_size,resize=96)
if model_type == 'resnet': 
	lr = 0.05
	batch_size = 256
	train_iter, test_iter = gb.load_data_fashion_mnist(batch_size=batch_size,resize=96)

# 模型训练
net = model(model_type)
net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())
for epoch in range(num_epochs):
    train_loss_sum, train_acc_sum, start = 0, 0, time.time()
    for X, y in train_iter:
        X, y  = X.as_in_context(ctx), y.as_in_context(ctx)
        with autograd.record():
            y_hat = net(X)
            loss = gluon.loss.SoftmaxCrossEntropyLoss()(y_hat, y)
        loss.backward()
        gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr}).step(batch_size)
        train_loss_sum += loss.mean().asscalar()
        train_acc_sum += gb.accuracy(y_hat, y)
    test_acc = gb.evaluate_accuracy(test_iter, net, ctx)
    print('epoch %d, loss %.4f, train acc %.4f, test acc %.4f, time %.1f sec' 
          % (epoch + 1, 
             train_loss_sum/len(train_iter),
             train_acc_sum/len(train_iter),
             test_acc, 
             time.time()-start))

# 模型/参数保存和读取
net.save_parameters(params_path)    #将参数保存为params格式文件
net.load_parameters(params_path,ctx=ctx)    #从params格式文件读取参数
hybrid_net.export(path)    #将模型和参数分别保存为json和params格式文件;注:仅使用gluon.nn.HybridSequence()类定义的net可以导出模型
hybrid_net = gluon.nn.SymbolBlock.imports(json_path, ['data'], params_path, ctx=ctx)    #从json和params格式文件读取模型和参数

4) 循环神经网络 (RNN/GRU/LSTM实现)

在这里插入图片描述
LSTM (Long Short-Term Memory) 在 RNN (Recurrent Neutral Network) 的基础上进行变形,能够学习长期依赖关系,而 GRU (Gated Recurrent Unit) 是 LSTM 的简化版,运行更高效。通过在输出单元/层对隐藏状态进行合并,可实现双向设计。

# 导入所需模块
import math, time
from mxnet import autograd, nd, gluon
import gluonbook as gb 

# 定义功能区块
def to_onehot(X, size):
    return [nd.one_hot(x, size) for x in X.T]
def get_params():
    _one = lambda shape: nd.random.normal(scale=0.01, shape=shape, ctx=ctx)
    _three = lambda : (_one((num_inputs, num_hiddens)), 
                       _one((num_hiddens, num_hiddens)), 
                       nd.zeros(num_hiddens, ctx=ctx))   
    if model_type == 'rnn':
        W_xh = _one((num_inputs, num_hiddens))  #输入层参数
        W_hh = _one((num_hiddens, num_hiddens))  #隐藏层参数
        W_hy = _one((num_hiddens, num_outputs))  #输出层参数
        b_h = nd.zeros(num_hiddens, ctx=ctx)
        b_y = nd.zeros(num_outputs, ctx=ctx)
        params = [W_xh, W_hh, b_h, W_hy, b_y]
    elif model_type == 'gru': 
        W_xr, W_hr, b_r = _three()  #重置门参数。    
        W_xz, W_hz, b_z = _three()  #更新门参数。    
        W_xh, W_hh, b_h = _three()  #候选隐藏状态参数。
        W_hy = _one((num_hiddens, num_outputs))  #输出层参数。
        b_y = nd.zeros(num_outputs, ctx=ctx)
        params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hy, b_y]
    elif model_type == 'lstm':       
        W_xi, W_hi, b_i = _three()  #输入门参数。
        W_xf, W_hf, b_f = _three()  #遗忘门参数。
        W_xo, W_ho, b_o = _three()  #输出门参数。
        W_xc, W_hc, b_c = _three()  #候选细胞参数。
        W_hy = _one((num_hiddens, num_outputs))  #输出层参数。
        b_y = nd.zeros(num_outputs, ctx=ctx)
        params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hy, b_y]
    for param in params:
        param.attach_grad()
    return params
def init_state(batch_size, num_hiddens, ctx):
    if (model_type == 'rnn') | (model_type == 'gru'):
        return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx),)
    elif model_type == 'lstm':
        return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx), 
                nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx))
def model(inputs, state, params):
    if model_type == 'rnn':
        W_xh, W_hh, b_h, W_hy, b_y = params
        H, = state
        outputs = []
        for X in inputs:
            H = nd.tanh(nd.dot(X, W_xh) + nd.dot(H, W_hh) + b_h)
            Y = nd.dot(H, W_hy) + b_y
            outputs.append(Y)
        return outputs, (H,)
    elif model_type == 'gru':
        W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hy, b_y = params
        H, = state
        outputs = []
        for X in inputs:        
            Z = nd.sigmoid(nd.dot(X, W_xz) + nd.dot(H, W_hz) + b_z)
            R = nd.sigmoid(nd.dot(X, W_xr) + nd.dot(H, W_hr) + b_r)
            H_tilda = nd.tanh(nd.dot(X, W_xh) + R * nd.dot(H, W_hh) + b_h)
            H = Z * H + (1 - Z) * H_tilda
            Y = nd.dot(H, W_hy) + b_y
            outputs.append(Y)
        return outputs, (H,)
    elif model_type == 'lstm':
        [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,
         W_hy, b_y] = params
        (H, C) = state
        outputs = []
        for X in inputs:        
            I = nd.sigmoid(nd.dot(X, W_xi) + nd.dot(H, W_hi) + b_i)
            F = nd.sigmoid(nd.dot(X, W_xf) + nd.dot(H, W_hf) + b_f)
            O = nd.sigmoid(nd.dot(X, W_xo) + nd.dot(H, W_ho) + b_o)
            C_tilda = nd.tanh(nd.dot(X, W_xc) + nd.dot(H, W_hc) + b_c)
            C = F * C + I * C_tilda
            H = O * C.tanh()
            Y = nd.dot(H, W_hy) + b_y
            outputs.append(Y)
        return outputs, (H, C)
def predict(prefix, num_chars, model, params, init_state, m_hiddens, vocab_size, ctx, idx_to_char, char_to_idx):
    state = init_state(1, num_hiddens, ctx)
    output = [char_to_idx[prefix[0]]]
    for t in range(num_chars + len(prefix)):
        X = to_onehot(nd.array([output[-1]], ctx=ctx), vocab_size)  #将上一时间步的输出作为当前时间步的输入
        (Y, state) = model(X, state, params)  #计算输出和更新隐藏状态
        if t < len(prefix) - 1:  #下一个时间步的输入是 prefix 里的字符或者当前的最好预测字符。
            output.append(char_to_idx[prefix[t + 1]])
        else:
            output.append(int(Y[0].argmax(axis=1).asscalar()))
    return ''.join([idx_to_char[i] for i in output])
def grad_clipping(params, theta, ctx):
    norm = nd.array([0.0], ctx)
    for param in params:
        norm += (param.grad ** 2).sum()
    norm = norm.sqrt().asscalar()
    if norm > theta:
        for param in params:
            param.grad[:] *= theta / norm
def train_and_predict(model, get_params, init_state, num_hiddens, vocab_size, ctx, corpus_indices, 
                          idx_to_char, char_to_idx, is_random_iter, num_epochs, num_steps, lr, clipping_theta, 
                          batch_size, pred_period, pred_len, prefixes):
    if is_random_iter:
        data_iter_fn = gb.data_iter_random
    else:
        data_iter_fn = gb.data_iter_consecutive     
    params = get_params()
    loss = gluon.loss.SoftmaxCrossEntropyLoss()
    for epoch in range(num_epochs):
        if not is_random_iter:  #如使用相邻采样,在 epoch 开始时初始化隐藏变量
            state = init_state(batch_size, num_hiddens, ctx)
        loss_sum, start = 0.0, time.time()
        data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, ctx) 
        for t, (X, Y) in enumerate(data_iter): #t=10000/(batch_size*num_steps)个batches(时间步长),每一个batch包含(batch_size*num_steps)的X和Y
            if is_random_iter: #如使用随机采样,在每个小批量更新前初始化隐藏变量
                state = init_state(batch_size, num_hiddens, ctx)
            else:  #否则需要使用 detach 函数从计算图分离隐藏状态变量
                for s in state:
                    s.detach()
            with autograd.record():
                inputs = to_onehot(X, vocab_size)
                outputs, state = model(inputs, state, params)  #outputs有num_steps个形状为(batch_size,vocab_size)的矩阵
                outputs = nd.concat(*outputs, dim=0)  # 拼接之后形状为(num_steps*batch_size,vocab_size)
                y = Y.T.reshape((-1,))  #Y的形状是(batch_size,num_steps),转置后再变成长,batch*num_steps的向量,这样跟输出的行一一对应
                l = loss(outputs, y).mean()  #使用交叉熵损失计算平均分类误差
            l.backward()
            grad_clipping(params, clipping_theta, ctx)  #裁剪梯度后使用SGD更新权重
            gb.sgd(params, lr, 1)  #因为已经误差取过均值,梯度不用再做平均
            loss_sum += l.asscalar()
        if (epoch + 1) % pred_period == 0:
            print('epoch %d, perplexity %4f, time %.2fs'  % (
                epoch + 1, math.exp(loss_sum / (t + 1)),
                     time.time() - start))
            for prefix in prefixes:
                print(' -', predict(
                    prefix, pred_len, model, params, init_state,
                    num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx))   

# 载入数据
corpus_indices, char_to_idx, idx_to_char, vocab_size = gb.load_data_jay_lyrics()

# 定义参数
ctx = gb.try_gpu()
num_inputs = vocab_size
num_hiddens = 256
num_outputs = vocab_size
num_epochs = 200
num_steps = 35
batch_size = 32
lr = 100 
clipping_theta = 0.01
prefixes = ['分开', '不分开']
pred_period = 500
pred_len = 50

# 模型训练
model_type = 'lstm'
train_and_predict(model, get_params, init_state, num_hiddens, vocab_size, ctx, corpus_indices, 
                  idx_to_char, char_to_idx, False, num_epochs, num_steps, lr, clipping_theta, 
                  batch_size, pred_period, pred_len, prefixes)
               
# 模型训练(调库)
from mxnet.gluon import rnn
model = gb.RNNModel(rnn.LSTM(num_hiddens), vocab_size)
gb.train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx, corpus_indices, idx_to_char, 
                               char_to_idx, num_epochs, num_steps, lr, clipping_theta, batch_size, 
                               pred_period, pred_len, prefixes) 

5) 对抗式生成网络 (DCGAN实现)

GAN (Generative Adversarial Network) 更多的是一种设计模型的理念,而不是特定的模型架构,通常由两个或两个以上的深度神经网络组合而成,网络类型由特定任务决定,如任务目标为生成图形时使用 CNN,生成语音时使用 MLP 或 RNN。根据理念,GAN 模型中神经网络分为两部分,一部分 (generator) 生成特定目标 (object),而另一部分 (discriminator) 用于辨别该目标真伪。一方以将生成目标以假乱真为目的尽可能逼近真实目标,而另一方尽可能去识别它的真伪,两方形成对抗,使得最终生成的结果极具真实性。也因此,不同于上文中的任何一种网络架构,模型需要同时对两个神经网络进行反向回馈和梯度更新。

基础的 GAN 框架同样具有多种变形形式,包括 Conditional GAN, InfoGan, pixel2pixel GAN。Conditional GAN 通过在生成器和辨别器的输入端同时增加目标分类标签,使得训练后的模型生成器能生成指定标签的目标。InfoGan 引用了 Conditional GAN 的思想,通过在 GAN 中引入潜在码 (latent code),训练时最大化潜在码与生成目标的互信息,使得生成器不仅能依据开发者需求向特定方向生成目标,同时能使辨别器识别给定目标在潜在码指定的特征上的相符程度(相似度检测)。pixel2pixel GAN 应用于图像任务,一改前三者输入随机数输出图像的模式,改为输入图像并输出图像,常用于给图像上色或抽取图像特征等图像翻译工作。

以下代码列示用于图像生成的一种基础 GAN 模型,又名 DCGAN (Deep Convolutional GAN),模型训练完成以后,可使用任意一个固定长度的随机向量生成一幅如下图所示的图形:
在这里插入图片描述

# 载入所需模块
import os
import numpy as np
import tarfile
import logging
import matplotlib.image as mpimg
import matplotlib as mpl
from matplotlib import pyplot as plt
import mxnet as mx
from mxnet import gluon
from mxnet import ndarray as nd
from mxnet.gluon import nn, utils
from mxnet import autograd
import gluonbook as gb

# 搭建网络架构
def model():
    nc, ngf = 3, 64
    netG = nn.Sequential()
    with netG.name_scope():
        netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False))
        netG.add(nn.BatchNorm())
        netG.add(nn.Activation('relu'))
        netG.add(nn.Conv2DTranspose(ngf * 4, 4, 2, 1, use_bias=False))
        netG.add(nn.BatchNorm())
        netG.add(nn.Activation('relu'))
        netG.add(nn.Conv2DTranspose(ngf * 2, 4, 2, 1, use_bias=False))
        netG.add(nn.BatchNorm())
        netG.add(nn.Activation('relu'))
        netG.add(nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False))
        netG.add(nn.BatchNorm())
        netG.add(nn.Activation('relu'))
        netG.add(nn.Conv2DTranspose(nc, 4, 2, 1, use_bias=False))
        netG.add(nn.Activation('tanh'))
    ndf = 64
    netD = nn.Sequential()
    with netD.name_scope():
        netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False))
        netD.add(nn.LeakyReLU(0.2))
        netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False))
        netD.add(nn.BatchNorm())
        netD.add(nn.LeakyReLU(0.2))
        netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False))
        netD.add(nn.BatchNorm())
        netD.add(nn.LeakyReLU(0.2))
        netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False))
        netD.add(nn.BatchNorm())
        netD.add(nn.LeakyReLU(0.2))
        netD.add(nn.Conv2D(1, 4, 1, 0, use_bias=False))
    return netG, netD

# 定义参数
epochs = 5
batch_size = 64
latent_z_size = 100
ctx = gb.try_gpu()
lr = 0.0002
beta1 = 0.5

# 载入数据
lfw_url = 'http://vis-www.cs.umass.edu/lfw/lfw-deepfunneled.tgz'
data_path = 'lfw-deepfunneled'
if os.path.exists(data_path):
    with tarfile.open(data_path+'.gz') as tar:
        tar.extractall(path=os.getcwd())
else:
    data_file = utils.download(lfw_url)
    with tarfile.open(data_file) as tar:
        tar.extractall(path=os.getcwd())
target_wd = 64
target_ht = 64
img_list = []
def transform(data, target_wd, target_ht):
    data = mx.image.imresize(data, target_wd, target_ht)
    data = nd.transpose(data, (2,0,1))
    data = data.astype(np.float32)/127.5 - 1
    if data.shape[0] == 1:
        data = nd.tile(data, (3, 1, 1))
    return data.reshape((1,) + data.shape)
for path, _, fnames in os.walk(data_path):
    for fname in fnames:
        if not fname.endswith('.jpg'):
            continue
        img = os.path.join(path, fname)
        img_arr = mx.image.imread(img)
        img_arr = transform(img_arr, target_wd, target_ht)
        img_list.append(img_arr)
train_data = mx.io.NDArrayIter(data=nd.concatenate(img_list), batch_size=batch_size)
real_label = nd.ones((batch_size,), ctx=ctx)
fake_label = nd.zeros((batch_size,),ctx=ctx)

# 模型训练
netG,netD = model()
netG.initialize(mx.init.Normal(0.02), ctx=ctx)
netD.initialize(mx.init.Normal(0.02), ctx=ctx)
trainerG = gluon.Trainer(netG.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
loss = gluon.loss.SigmoidBinaryCrossEntropyLoss()
def facc(label, pred):
    pred = pred.ravel()
    label = label.ravel()
    return ((pred > 0.5) == label).mean()
metric = mx.metric.CustomMetric(facc)
logging.basicConfig(level=logging.DEBUG)
for epoch in range(epochs):
    train_data.reset()
    i = 0
    for batch in train_data:
        data = batch.data[0].as_in_context(ctx)
        latent_z = mx.nd.random_normal(0, 1, shape=(batch_size, latent_z_size, 1, 1), ctx=ctx)
        with autograd.record():
            output = netD(data).reshape((-1, 1))
            errD_real = loss(output, real_label)
            metric.update([real_label,], [output,])
            fake = netG(latent_z)
            output = netD(fake.detach()).reshape((-1, 1))
            errD_fake = loss(output, fake_label)
            errD = errD_real + errD_fake
            errD.backward()
            metric.update([fake_label,], [output,])
        trainerD.step(batch.data[0].shape[0])
        with autograd.record():
            fake = netG(latent_z)
            output = netD(fake).reshape((-1, 1))
            errG = loss(output, real_label)
            errG.backward()
        trainerG.step(batch.data[0].shape[0])
        if i % 100 == 0:
            name, acc = metric.get()
            logging.info(' epoch %d, batch %d, D_loss = %f, G_loss = %f, train acc = %f' 
                         %(epoch, i, nd.mean(errD).asscalar(), nd.mean(errG).asscalar(), acc))
        i += 1
    name, acc = metric.get()
    metric.reset()
    fake_img = fake[0]
	plt.imshow(((fake_img.asnumpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8))
    plt.show()

6) 区域卷积神经网络 (SSD实现)

在这里插入图片描述
区域卷积神经网络的创造是现代图像目标检测技术的起源,其中 R-CNN 是奠基之作,通过对同一图像抽取几千甚至几万个候选矩形区域进行分类,从而达到物体检测的目的。Fast R-CNN 通过将提取候选区域这一步骤用神经网络 (后接一个兴趣区域池化层, ROI, Region of Interest Pooling) 代替,从而大量节省模型训练时间。Faster R-CNN 对前两者进行取长补短,引入区域建议网络 (RPN),通过设立锚点和锚框来确定有限而有效的候选区域。Mask R-CNN 在 Faster R-CNN 的基础上加入了全连接卷积网络 (FCN),使得目标检测可以细致到具体的像素点,这一技术最行之有效的应用是图像语义分割。SSD (单发多框检测器) 将区域建议和分类合并在一处操作,从而大大加快模型速度。YOLO (你只看一次) 通过将图片特征均匀切割以解决以上模型大量区域重复计算的弊端。

以下代码呈现 SSD 的实现,最终效果如图所示:
在这里插入图片描述

# 导入所需模块    
import sys,time
import numpy as np
import matplotlib.pyplot as plt
from mxnet import autograd, contrib, gluon, image, init, nd
from mxnet.gluon import loss as gloss, nn
import gluonbook as gb

# 定义功能区块
def cls_predictor(num_anchors, num_classes):
    return nn.Conv2D(num_anchors * (num_classes + 1), kernel_size=3,padding=1)
def bbox_predictor(num_anchors):
    return nn.Conv2D(num_anchors * 4, kernel_size=3, padding=1)
def forward(x, block):
    block.initialize()
    return block(x)
def flatten_pred(pred):
    return pred.transpose(axes=(0, 2, 3, 1)).flatten()
def concat_preds(preds):
    return nd.concat(*[flatten_pred(p) for p in preds], dim=1)
def down_sample_blk(num_filters):
    blk = nn.HybridSequential()
    for _ in range(2):
        blk.add(nn.Conv2D(num_filters, kernel_size=3, padding=1),
                nn.BatchNorm(in_channels=num_filters),
                nn.Activation('relu'))
    blk.add(nn.MaxPool2D(2))
    blk.hybridize()
    return blk
def body_blk():
    blk = nn.HybridSequential()
    for num_filters in [16, 32, 64]:
        blk.add(down_sample_blk(num_filters))
    return blk
def get_blk(i):
    if i == 0:
        blk = body_blk()
    elif i == 4:
        blk = nn.GlobalMaxPool2D()
    else:
        blk = down_sample_blk(128)
    return blk
def single_scale_forward(x, blk, size, ratio, cls_predictor, bbox_predictor):
    y = blk(x)
    anchor = contrib.ndarray.MultiBoxPrior(y, sizes=size, ratios=ratio)
    cls_pred = cls_predictor(y)
    bbox_pred = bbox_predictor(y)
    return (y, anchor, cls_pred, bbox_pred)
def calc_loss(cls_preds, cls_labels, bbox_preds, bbox_labels, bbox_masks):
    cls = cls_loss(cls_preds, cls_labels)
    bbox = bbox_loss(bbox_preds * bbox_masks, bbox_labels * bbox_masks)
    return cls + bbox
def cls_metric(cls_preds, cls_labels):
    return (cls_preds.argmax(axis=-1) == cls_labels).mean().asscalar()
def bbox_metric(bbox_preds, bbox_labels, bbox_masks):
    return (bbox_labels - bbox_preds * bbox_masks).abs().mean().asscalar()
def predict(x):
    anchors, cls_preds, bbox_preds = net(x.as_in_context(ctx))
    cls_probs = cls_preds.softmax().transpose((0, 2, 1))
    out = contrib.nd.MultiBoxDetection(cls_probs, bbox_preds, anchors)
    idx = [i for i, row in enumerate(out[0]) if row[0].asscalar() != -1]
    return out[:, idx]
def display(x,out):
    img = ((x.transpose((0, 2, 3, 1))).clip(0, 254) / 254)[0]
    ax = plt.subplot(111)
    ax.imshow(img.asnumpy())
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
    row = out[0]
    if row.shape[0] == 5:
        gb.show_bboxes(ax, [row[1:]*img.shape[0]], colors=['yellow'])
    else:
        score = row[1].asscalar()
        gb.show_bboxes(ax, [row[2:]*img.shape[0]], '%.2f'%score, colors=['yellow'])
    plt.show()
class SSD(nn.Block):
    def __init__(self, num_classes, verbose=False, **kwargs):
        super(SSD, self).__init__(**kwargs)
        self.num_classes = num_classes
        for i in range(5):
            setattr(self, 'blk_%d'%i, get_blk(i))
            setattr(self, 'cls_%d'%i, cls_predictor(num_anchors,num_classes))
            setattr(self, 'bbox_%d'%i, bbox_predictor(num_anchors))
    def forward(self, x):
        anchors, cls_preds, bbox_preds = [None]*5, [None]*5, [None]*5
        for i in range(5):
            x, anchors[i], cls_preds[i], bbox_preds[i] = single_scale_forward(
                x, getattr(self, 'blk_%d' % i), sizes[i], ratios[i],
                getattr(self, 'cls_%d' % i), getattr(self, 'bbox_%d' % i))
        return (nd.concat(*anchors, dim=1),
                		  concat_preds(cls_preds).reshape((0, -1, self.num_classes + 1)),
                   		  concat_preds(bbox_preds))
# 定义参数
ctx = gb.try_gpu()
num_anchors = 4
sizes = [[0.2, 0.272], [0.37, 0.447], [0.54, 0.619], [0.71, 0.79], [0.88, 0.961]]
ratios = [[1, 2, 0.5]] * 5
batch_size = 32

# 载入数据
train_data, test_data = gb.load_data_pikachu(batch_size)
train_data.reshape(label_shape=(3, 5))

# 模型训练
net = SSD(num_classes=2)
net.initialize(init=init.Xavier(), ctx=ctx)
trainer = gluon.Trainer(net.collect_params(),'sgd', {'learning_rate': 0.1, 'wd': 5e-4})
cls_loss = gloss.SoftmaxCrossEntropyLoss()
bbox_loss = gloss.L1Loss()
for epoch in range(5):
    acc, mae = 0, 0
    train_data.reset()
    tic = time.time()
    for i, batch in enumerate(train_data):
        X_train = batch.data[0].as_in_context(ctx)
        Y_train = batch.label[0].as_in_context(ctx)
        with autograd.record():
            anchors, cls_preds, bbox_preds = net(X_train)
            bbox_labels, bbox_masks, cls_labels = contrib.nd.MultiBoxTarget(anchors, Y_train, cls_preds.transpose(axes=(0,2,1)))
            l = calc_loss(cls_preds, cls_labels, bbox_preds, bbox_labels, bbox_masks)
        l.backward()
        trainer.step(batch_size)
        acc += cls_metric(cls_preds, cls_labels)
        mae += bbox_metric(bbox_preds, bbox_labels, bbox_masks)
        if (i+1) % 10 == 0:
            print('epoch %d, batch %d, class err %.6f, bbox MAE %.6f, time %.1f sec' % (
                epoch, i+1, 1 - acc / (i + 1), mae / (i + 1), time.time() - tic))
            tic = time.time()
        if (i+1) % 50 == 0:
            X_test = nd.zeros(1)
            while X_test.sum().asscalar() == 0:
                batch = test_data.next()
                idx = np.int(nd.random_uniform(1,batch_size).asnumpy().tolist())
                X_test = batch.data[0].as_in_context(ctx)[idx].expand_dims(axis=0)
            out = predict(X_test)[0]
            display(X_test,out)

二、TensorFlow

作为谷歌的重磅利器,一经放出便引发神经网络的热潮,其开放的态度和功能的丰富吸引了最为庞大的神经网络开发群体。

1) 基础代码

#导入所需模块
import numpy as np
import tensorflow as tf

# 基础公式
tf.Variable(1)    #tf变量统一形式
tf.reduce_sum(X)    #求和公式
tf.reduce_mean(X)    #平均数公式  
tf.square(X)    #平方公式
tf.nn.relu(X)    #relu公式
tf.matmul(X,W)    #矩阵乘积
tf.random_normal(shape=(2,3,5))    #生成正态分布随机数矩阵
tf.placeholder(shape=(None,20), dtype=tf.float32)    #占位符

2) 多层感知机 (MLP实现)

# 导入所需模块
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.datasets import load_boston
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split

# 载入数据
boston = load_boston()
X_train,X_test,y_train,y_test = train_test_split(boston.data,boston.target,test_size=0.1)
X_train = scale(X_train)
X_test = scale(X_test)
y_train = scale(y_train.reshape((-1,1)))
y_test = scale(y_test.reshape((-1,1)))

# 定义占位符
xs = tf.placeholder(shape=(None,X_train.shape[1]), dtype=tf.float32)
ys = tf.placeholder(shape=(None,1), dtype=tf.float32)

# 定义网络架构
def layer(X, input_size, output_size, keep_prob, activation=None):
    W = tf.Variable(tf.random_normal(shape=(input_size,output_size)))
    b = tf.Variable(tf.zeros(shape=(1,output_size))+0.1)
    Y = tf.matmul(X,W) + b
    Y = tf.nn.dropout(Y, keep_prob=keep_prob)
    if activation is None:
        return Y
    else:
        return activation(Y)
l1 = layer(xs, 13, 8, 1, activation=tf.nn.relu)
l2 = layer(l1, 8, 5, 1, activation=tf.nn.relu)
pred = layer(l2, 5, 1, 1, activation=None)

# 定义参数
iters = 10000
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - pred),reduction_indices=[1]))  # MSE
Trainer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)

# 模型训练
feed_dict_train = {ys: y_train, xs: X_train}
feed_dict_test = {ys: y_test, xs: X_test}
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(iters):
        l,_ = sess.run([loss, Trainer], feed_dict=feed_dict_train)
        if i % 500 == 0:
            y_train_pred = sess.run(pred, feed_dict=feed_dict_train)
            y_pred = sess.run(pred, feed_dict=feed_dict_test)
            print("epoch: %d, loss: %.5f, Train RMSE: %.3f, Test RMSE: %.3f" 
            % (i, l, np.sqrt(np.mean(np.square(y_train_pred-y_train))), 
            np.sqrt(np.mean(np.square(y_pred-y_test)))))

笔者正奋笔疾书…

猜你喜欢

转载自blog.csdn.net/weixin_43269174/article/details/88008917