动手学深度学习——残差网络ResNet

1、 残差块

串联一个层改变函数类,我们希望能扩大函数类;

残差块加入快速通道来得到f(x)=x+g(x)的结构:

2、ResNet块细节

可以使用不同的残差块;

高宽减半的ResNet块;

后接多个高宽不变的ResNet;

3、ResNet架构

类似于VGG和GoogleNetde 总体架构,但是替换了ResNet块;

 4、总结

残差块是的很深的网络更加容易训练,甚至可以训练到一千层;

残差网络对随后的深层神经网络设计产生了深远影响,无论是卷积类网络还是全连接类网络。

5、代码实现

import time
import torch
from torch import nn, optim
import torch.nn.functional as F
import sys

sys.path.append("..")
import d2lzh_pytorch as d2l

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


class Residual(nn.Module):  # 本类已保存在d2lzh_pytorch包中⽅便以后使⽤
    def __init__(self, in_channels, out_channels, use_1x1conv=False, stride=1):
        super(Residual, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels,
                               kernel_size=3, padding=1, stride=stride)
        self.conv2 = nn.Conv2d(out_channels, out_channels,
                               kernel_size=3, padding=1)
        if use_1x1conv:
            self.conv3 = nn.Conv2d(in_channels, out_channels,
                                   kernel_size=1, stride=stride)
        else:
            self.conv3 = None
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.bn2 = nn.BatchNorm2d(out_channels)

    def forward(self, X):
        Y = F.relu(self.bn1(self.conv1(X)))
        Y = self.bn2(self.conv2(Y))
        if self.conv3:
            X = self.conv3(X)
        return F.relu(Y + X)


# 查看输入和输出形状一致的情况
blk = Residual(3, 3)
X = torch.rand((4, 3, 6, 6))
print(blk(X).shape)

# torch.Size([4, 3, 6, 6])

# 也可以增加输出通道数的同时减半输出的高和宽
blk = Residual(3, 6, use_1x1conv=True, stride=2)
print(blk(X).shape)
# torch.Size([4, 6, 3, 3])

"""RESNET模型"""
net = nn.Sequential(
    nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
    nn.BatchNorm2d(64),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2, padding=1))


def resnet_block(in_channels, out_channels, num_residuals,
                 first_block=False):
    if first_block:
        assert in_channels == out_channels  # 第⼀个模块的通道数同输⼊通道数⼀致
    blk = []
    for i in range(num_residuals):
        if i == 0 and not first_block:
            blk.append(Residual(in_channels, out_channels, use_1x1conv=True, stride=2))
    else:
        blk.append(Residual(out_channels, out_channels))
    return nn.Sequential(*blk)


# 接着我们为RESNET加入所有的残差块,每个模块使用两个残差块
net.add_module("resnet_block1", resnet_block(64, 64, 2,
                                             first_block=True))
net.add_module("resnet_block2", resnet_block(64, 128, 2))
net.add_module("resnet_block3", resnet_block(128, 256, 2))
net.add_module("resnet_block4", resnet_block(256, 512, 2))
# 最后,与GOOGLENET一样,加入全局平均池化层后接上全连接层输出
net.add_module("global_avg_pool", d2l.GlobalAvgPool2d())
# GlobalAvgPool2d的输出: (Batch, 512, 1, 1)
net.add_module("fc", nn.Sequential(d2l.FlattenLayer(),
                                   nn.Linear(512, 10)))

# 查看输入形状在ResNet不同模块之间的变化
X = torch.rand((1, 1, 224, 224))
for name, layer in net.named_children():
    X = layer(X)
    print(name, ' output shape:\t', X.shape)

""" 获取数据和训练模型"""
batch_size = 256
# 如出现“out of memory”的报错信息,可减⼩batch_size或resize
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size,resize=96)
lr, num_epochs = 0.001, 5
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
d2l.train_ch5(net, train_iter, test_iter, batch_size, optimizer,device, num_epochs)

猜你喜欢

转载自blog.csdn.net/qq_42012782/article/details/123393630