Pytorch学习系列之十:如何确定合适的epoch数、在加载的模型基础上继续训练

1,使用背景

当有以下两种情况时:

1) 不知道训练的epoch选取为何值时。过小,训练不充分,泛化能力差; 过大,训练过度,导致过拟合。所以需要动态观察每个epoch后,模型在验证集(也可以不严谨的说是测试集)上的精度,选取精度最大的epoch作为最终的训练结果。

2)在加载的模型基础上继续训练。

在训练模型的时候可能会因为一些问题导致程序中断,或者常常需要观察训练情况的变化来更改学习率等参数,这时候就需要加载中断前保存的模型,并在此基础上继续训练。

2,实战代码

本文以CIFAR10数据集为例,将数据集分为了训练集(5W张)、测试集(1W张),每张图片是3*28*28(CHW)。严格意义上讲,实战项目中还需要有验证集的。这里就将验证集省略掉了,道理都是一样的。

本文将自动检查当前机器是否支持CUDA,自动切换设备

use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")

#而且该方法还可以通过设备号指定使用哪个GPU设备,比如使用0号设备:
device = torch.device("cuda:0" if use_cuda else "cpu")


完整的代码如下: (注意看注释即可)

# -*- coding:utf-8 -*-

'''本文件用于举例说明pytorch保存和加载文件的方法'''

import torch as torch
import torchvision as tv
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as transforms
import os
# 引入可视化工具包
from visdom import Visdom

# from torchvision.transforms import ToPILImage
# import torch.backends.cudnn as cudnn
# import datetime
# import argparse

# 可视化工具对象
viz = Visdom()
# 纵轴y:[loss, acc], 横轴x:[epoch]
viz.line([[0.0, 0.0]],[0.0],win='test',opts=dict(title='loss&acc',legend=['loss','acc']))

# 参数声明
batch_size = 32
epochs = 20
WORKERS = 0  # dataloder线程数
test_flag = False  # 测试标志,True时加载保存好的模型进行测试
ROOT = './'  # MNIST数据集保存路径
log_dir = './cifar_model.pth'  # 模型保存路径

# 检查当前机器是否支持CUDA,自动切换设备
use_cuda = torch.cuda.is_available()
if use_cuda:
    print("--- Use GPU to train.")
device = torch.device("cuda" if use_cuda else "cpu")

# 加载CIFAR10数据集
transform = tv.transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])

# 5W张训练集
train_data = tv.datasets.CIFAR10(root=ROOT, train=True, download=True, transform=transform)
print('train_data size: {},shape:{}'.format(len(train_data), train_data))
# 1W张测试集
test_data = tv.datasets.CIFAR10(root=ROOT, train=False, download=False, transform=transform)
print('test_data size: {}'.format(len(test_data)))

train_load = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=WORKERS)
test_load = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=WORKERS)


# 构造模型
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, 3, padding=1)
        self.conv2 = nn.Conv2d(64, 128, 3, padding=1)
        self.conv3 = nn.Conv2d(128, 256, 3, padding=1)
        self.conv4 = nn.Conv2d(256, 256, 3, padding=1)
        self.pool = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(256 * 8 * 8, 1024)
        self.fc2 = nn.Linear(1024, 256)
        self.fc3 = nn.Linear(256, 10)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = self.pool(F.relu(self.conv2(x)))
        x = F.relu(self.conv3(x))
        x = self.pool(F.relu(self.conv4(x)))
        x = x.view(-1, x.size()[1] * x.size()[2] * x.size()[3])
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


model = Net().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)


# 模型训练,返回train loss
def train(model, train_loader, epoch):
    model.train()
    train_loss = 0
    for i, data in enumerate(train_loader, 0):
        x, y = data
        x = x.to(device)
        y = y.to(device)
        optimizer.zero_grad()
        y_hat = model(x)
        loss = criterion(y_hat, y)
        loss.backward()
        optimizer.step()
        train_loss += loss
        # print('epoch:{} \t Train Loss: {:.6f}'.format(epoch,loss.item()))
    loss_mean = train_loss / (i + 1)
    print('Train Epoch: {}\t Loss: {:.6f}'.format(epoch, loss_mean.item()))
    #viz.line([loss_mean.item()],[epoch], win='train_loss', update='append')
    return  loss_mean.item()

# 模型测试,
def test(model, test_loader):
    """
    模型测试,也可以用作模型验证集验证
    :param model: 模型
    :param test_loader: 数据集loader
    :return: accuracy, loss
    """
    model.eval()
    test_loss = 0
    correct = 0
    accuracy = 0.0
    with torch.no_grad():
        for i, data in enumerate(test_loader, 0):
            x, y = data
            x = x.to(device)
            y = y.to(device)
            optimizer.zero_grad()
            y_hat = model(x)
            #print("y_hat",y_hat.shape) #torch.Size([32, 10])
            test_loss += criterion(y_hat, y).item()
            pred = y_hat.max(1, keepdim=True)[1]
            #print("pred:",pred.shape)#torch.Size([32, 1])
            #print("y:",y.shape)#torch.Size([32])
            correct += pred.eq(y.view_as(pred)).sum().item()
        test_loss /= (i + 1)
        accuracy = 1.0 * correct / len(test_data)
        print("====correct",correct)
        print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            test_loss, correct, len(test_data), accuracy*100.0))

        return accuracy, test_loss


def main():
    # 如果test_flag=True,则加载已保存的模型
    if test_flag:
        # 加载保存的模型直接进行测试机验证,不进行此模块以后的步骤
        checkpoint = torch.load(log_dir)
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['epoch']
        test(model, test_load)
        return

    best_accuracy = 0.0
    # 如果有保存的模型,则加载模型,并在其基础上继续训练
    if os.path.exists(log_dir):
        checkpoint = torch.load(log_dir)
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['epoch']
        save_accuracy = checkpoint['accuracy']
        print('加载 epoch {} accuracy:{:.4f}成功!'.format(start_epoch,save_accuracy))
        best_accuracy = save_accuracy
    else:
        start_epoch = 0
        print('无保存模型,将从头开始训练!')


    for epoch in range(start_epoch + 1, epochs):
        train_loss = train(model, train_load, epoch)
        accuracy, _ = test(model, test_load)
        viz.line([[train_loss,accuracy]],[epoch],win='test', update='append')
        print('epoch: {} , valid set accuracy: {:.4f}'.format(epoch, accuracy))
        if (accuracy > best_accuracy):
            best_accuracy = accuracy
            # 保存模型
            state = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch,'accuracy': accuracy}
            torch.save(state, log_dir)
            print('epoch: {} , valid set accuracy: {:.4f}, save as:{}'.format(epoch, accuracy, log_dir))


if __name__ == '__main__':
    main()

Guess you like

Origin blog.csdn.net/thequitesunshine007/article/details/120973133