(三)使用自己搭建的分类网络来训练

这篇文章搭建的是resnet18网络模型,由于是自己搭建的,所以加载不了官方的预训练模型。

主要是了解一下网络是怎么搭建的。

可以加载官方预训练模型的地址(不是我写的):resnet18网络模型,可加载官方网络模型

1.resnet18的网络结构图如下:

 2.流程:

  • 先经过卷积核kernel_size为(7,7)的卷积层,然后再经过kernel_size为(3,3)的池化层
  • 然后再进行一系列的卷积残差相加的网络层,也就是图片中的红框。
  • 然后经过全局平均池化层(average pool)以及全连接层(fc)。

3.关于残差块(卷积残差相加的网络层)的解析,(以第二个红框来讲解):

        可以看到一个残差块里面有两个【3x3, 128】,表示经过两次3x3的卷积层。经过第一个3x3的卷积的时候步长为2, 后面的3x3卷积的步长为1。进行残差相加的话要求两个相加的特征(向量)的shape大小保持一致,而由于第一个3x3卷积的步长为2,这样子输入输出的大小就会不一致。所以就需要调整特征大小之后再进行残差相加。代码中有一个self.shortcut,通过这个变量来决定是否需要对特征进行调整再进行残差相加。

        第一个红框的残差层与后面的残差层有些不同,第一个残差层经过两次3x3卷积的步长都为1.

扫描二维码关注公众号,回复: 16944632 查看本文章

my_resnet.py(搭建的网络)代码

搭建玩网络之后,就可以调用自己搭建的网络进行训练了。

之前训练部分的文章在:分类网络训练

下一章写一下在搭建网络过程中踩的一些坑

import torch
import torch.nn as nn
import torch.nn.functional as F


def auto_pad(k, p=None):
    if p is None:
        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]
    return p


class Residual_2(nn.Module):
    def __init__(self, in_channel, out_channel, stride=1, shortcut=False):
        super(Residual_2, self).__init__()
        self.shortcut = shortcut
        self.conv1 = nn.Conv2d(in_channel, out_channel, kernel_size=3, padding=auto_pad(3), stride=stride)
        self.bn1 = nn.BatchNorm2d(out_channel)
        self.relu1 = nn.ReLU()
        self.conv2 = nn.Conv2d(out_channel, out_channel, kernel_size=3, padding=auto_pad(3), stride=1)
        self.bn2 = nn.BatchNorm2d(out_channel)
        if self.shortcut:
            self.conv3 = nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride)

    def forward(self, x):
        y1 = self.conv1(x)
        y1 = self.bn1(y1)
        y1 = self.relu1(y1)
        y2 = self.conv2(y1)
        y2 = self.bn2(y2)
        if self.shortcut:
            x = self.conv3(x)
        out = x + y2
        return out


class HeadConv(nn.Module):
    def __init__(self, in_channel, out_channel):
        super(HeadConv, self).__init__()
        self.conv1 = nn.Conv2d(in_channel, out_channel, kernel_size=7, padding=3, stride=2)
        self.bn1 = nn.BatchNorm2d(out_channel)
        self.relu1 = nn.ReLU()
        self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu1(x)
        x = self.pool(x)

        return x


class MainNet(nn.Module):
    def __init__(self, num_classes=1000):
        super(MainNet, self).__init__()
        blocks_num = [2, 2, 2, 2]
        channel_planes = [64, 128, 256, 512]

        self.num_classes = num_classes
        self.conv1 = HeadConv(3, channel_planes[0])
        self.blocks = [nn.Sequential(), nn.Sequential(), nn.Sequential(), nn.Sequential()]
        for i in range(len(blocks_num)):
            if i == 0:
                for j in range(blocks_num[i]):
                    # 且第一个block的stride全为1
                    if j == 0:
                        self.blocks[i].add_module("conv_block%s_branch_%s_2a" % (i + 1, j),
                                                  Residual_2(channel_planes[0], channel_planes[0], stride=1,
                                                             shortcut=True))
                    else:
                        self.blocks[i].add_module("conv_block%s_branch_%s_2b" % (i + 1, j),
                                                  Residual_2(channel_planes[0], channel_planes[0], stride=1,
                                                             shortcut=False))
            else:
                for j in range(blocks_num[i]):
                    # 且第一个block的stride全为1
                    if j == 0:
                        self.blocks[i].add_module("conv_block%s_branch_%s_2a" % (i + 1, j),
                                                  Residual_2(channel_planes[i - 1], channel_planes[i], stride=2,
                                                             shortcut=True))
                    else:
                        self.blocks[i].add_module("conv_block%s_branch_%s_2b" % (i + 1, j),
                                                  Residual_2(channel_planes[i], channel_planes[i], stride=1,
                                                             shortcut=False))

        self.block1, self.block2, self.block3, self.block4 = self.blocks
        self.avg = nn.AdaptiveAvgPool2d(output_size=(1, 1))
        self.fc = nn.Sequential(
            nn.Flatten(),
            nn.Linear(512, 100),
            nn.Linear(100, num_classes),
        )

    def forward(self, x):
        x = self.conv1(x)
        feat1 = self.block1(x)
        feat2 = self.block2(feat1)
        feat3 = self.block3(feat2)
        feat4 = self.block4(feat3)
        avg = self.avg(feat4)
        outputs = self.fc(avg)
        return outputs


if __name__ == '__main__':
    x = torch.ones(size=(4, 3, 224, 224)).to("cuda")

    model = MainNet(num_classes=10)
    print(model)
    # model = model.to("cuda")
    # out = model(x)
    # print(out.shape)
    from torchsummary import summary

    # summary(model, (3, 224, 224), device="cuda")

训练代码:

import os
import torch
from PIL import ImageFile
import torch.optim as optim
from my_resnet import MainNet
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torchvision import datasets, transforms

ImageFile.LOAD_TRUNCATED_IMAGES = True
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"


def train():
    running_loss = 0
    for batch_idx, (data, target) in enumerate(train_data):
        data, target = data.to(device), target.to(device)
        out = net(data)
        loss = criterion(out, target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        running_loss += loss.item()

    return running_loss


def test():
    correct, total = 0, 0
    with torch.no_grad():
        for _, (data, target) in enumerate(val_data):
            data, target = data.to(device), target.to(device)
            out = net(data)
            out = F.softmax(out, dim=1)
            prediction = out.argmax(dim=1)
            # prediction = torch.max(out.data, dim=1)[1]
            total += target.size(0)
            correct += (prediction == target).sum().item()
        print('Accuracy on test set: (%d/%d)=%d %%' % (correct, total, 100 * correct / total))


if __name__ == '__main__':
    LR = 0.0001
    Epoches = 200
    Batch_Size = 4
    num_classes = 3
    best_loss = 100
    Image_Size = [256, 256]

    # 1.数据加载
    data_dir = r'D:\Code\python\完整项目放置\classify_project\multi_classification\my_dataset1'
    # 1.1 定义要对数据进行的处理
    data_transform = {x: transforms.Compose([transforms.Resize(Image_Size), transforms.ToTensor()]) for x in
                      ["train", "valid"]}
    image_datasets = {x: datasets.ImageFolder(root=os.path.join(data_dir, x), transform=data_transform[x]) for x in
                      ["train", "valid"]}
    dataloader = {x: torch.utils.data.DataLoader(dataset=image_datasets[x], batch_size=Batch_Size, shuffle=True) for x
                  in ["train", "valid"]}
    train_data, val_data = dataloader["train"], dataloader["valid"]

    index_classes = image_datasets["train"].class_to_idx
    print(index_classes)
    example_classes = image_datasets["train"].classes
    print(example_classes)

    net = MainNet(num_classes)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    net.to(device)

    # 5.定义损失函数,以及优化器
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=LR)

    loss_list = []
    for epoch in range(Epoches):
        loss = train()
        loss_list.append(loss)
        print("第%d轮的loss为:%5f:" % (epoch, loss))
        test()

        if loss < best_loss:
            best_loss = loss
            torch.save(net, "best.pth")
        torch.save(net, "last.pth")

    plt.title("Graph")
    plt.plot(range(Epoches), loss_list)
    plt.ylabel("loss")
    plt.xlabel("epoch")
    plt.show()

猜你喜欢

转载自blog.csdn.net/m0_48095841/article/details/125725877
今日推荐