pytorch学习(十九)—Visdom可视化训练过程

前言

在训练CNN网络时候,如何实时显示训练过程的数据,比如Loss, Accuracy等, 将这些数据可视化显示有助于我们进行模型调参,模型改进优化。本章节内容将基于visdom可视化工具绘制训练过程的Loss, Acc曲线。

关于visdom的基本用法,请参考之前系列的文章。


开发/测试环境

  • Ubuntu 18.04
  • Anaconda3
  • pycharm
  • visdom
  • pytorch

目的

  • 使用MNIST手写体数据集训练LeNet-5网络
  • 使用visdom实时可视化训练的Loss,Accuracy曲线

过程

定义CNN网络

直接使用pytorch官网的例子(做一次代码搬运工)
网络输入: N x 1 x 32 x 32 (N表示min_batch size)
网络输出: N x 1 x 10 (10个类别)

  • 代码
    net.py
import torch
import torch.nn as nn
import torch.nn.functional as F


class Net(nn.Module):

    def __init__(self):
        super(Net, self).__init__()
        # 1 input image channel, 6 output channels, 5x5 square convolution
        # kernel
        self.conv1 = nn.Conv2d(1, 6, 5)
        self.conv2 = nn.Conv2d(6, 16, 5)
        # an affine operation: y = Wx + b
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        # Max pooling over a (2, 2) window
        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
        # If the size is a square you can only specify a single number
        x = F.max_pool2d(F.relu(self.conv2(x)), 2)
        x = x.view(-1, self.num_flat_features(x))
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

    def num_flat_features(self, x):
        size = x.size()[1:]  # all dimensions except the batch dimension
        num_features = 1
        for s in size:
            num_features *= s
        return num_features

准备数据集

  • 训练集
  • 验证集
    使用torchvision提供的MNIST数据集,不用提前下载。
    注意地方: MNIST数据的图像为28 x 28 x1, 但是定义的网络输入的N x 1 x 32 x 32, 因此对数据进行了Resize((32, 32))
mport torch
import torchvision
import numpy as np
import matplotlib.pyplot as plt
import visdom
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import net
import utils


dataset_dir = '/media/weipenghui/Extra/MNIST'
transform = transforms.Compose([transforms.Resize((32, 32)),
                                transforms.ToTensor()])
batch_size = 64

train_dataset = torchvision.datasets.MNIST(root=dataset_dir, train=True, transform=transform)
val_dataset = torchvision.datasets.MNIST(root=dataset_dir, train=False, transform=transform)

print('train dataset: {} \nval dataset: {}'.format(len(train_dataset), len(val_dataset)))

train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
val_dataloader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)

# 显示一个batch
viz = visdom.Visdom(env='train-mnist')
viz.image(torchvision.utils.make_grid(next(iter(train_dataloader))[0], nrow=8), win='train-image')

plt.figure()
utils.imshow(next(iter(train_dataloader)))
plt.show()

matplotlib显示效果:


11478104-54ec36e1d82fee43.png
image.png

visdom显示效果:


11478104-caa23e75a2633ebe.png
image.png

训练网络,可视化Loss,Accuracy

Loss, Accuracy的统计:
batch_size设置为64, 迭代一次即跑完64张图像。本人设置每迭代200次统计一次Train Loss, 并且进行一次完整的测试,分别统计Train Acc, Val Acc, 然后将数据发送给visdom服务端,实时显示。

# ------------------模型,优化方法------------------------------

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
net = net.Net()
net.to(device)
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
loss_fc = nn.CrossEntropyLoss()

# -----------------训练---------------------------------------
loss_win = viz.line(np.arange(10))
acc_win = viz.line(X=np.column_stack((np.array(0), np.array(0))),
                   Y=np.column_stack((np.array(0), np.array(0))))
iter_count = 0
for epoch in range(20):

    running_loss = 0.0
    tr_loss = 0.0
    tr_acc = 0.0
    ts_acc = 0.0
    tr_total = 0
    tr_correct = 0
    ts_total = 0
    ts_correct = 0


    scheduler.step()
    for i, sample_batch in enumerate(train_dataloader):
        inputs = sample_batch[0].to(device)
        labels = sample_batch[1].to(device)

        net.train()
        optimizer.zero_grad()

        outputs = net(inputs)

        loss = loss_fc(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        tr_total += labels.size(0)
        tr_correct += (torch.max(outputs, 1)[1] == labels).sum().item()

        if i % 200 == 199:
            # test
            for sample_batch in val_dataloader:
                inputs = sample_batch[0].to(device)
                labels = sample_batch[1].to(device)

                net.eval()
                outputs = net(inputs)

                _, prediction = torch.max(outputs, 1)
                ts_correct += (prediction == labels).sum().item()
                ts_total += labels.size(0)

            tr_loss = running_loss / 200
            tr_acc = tr_correct / tr_total
            ts_acc = ts_correct / ts_total
            iter_count += 200
            if iter_count == 200:
                viz.line(Y=np.array([tr_loss]), X=np.array([iter_count]), update='replace', win=loss_win)
                viz.line(Y=np.column_stack((np.array([tr_acc]), np.array([ts_acc]))),
                         X=np.column_stack((np.array([iter_count]), np.array([iter_count]))),
                         win=acc_win, update='replace',
                         opts=dict(legned=['Train_acc', 'Val_acc']))

            else:
                viz.line(Y=np.array([tr_loss]), X=np.array([iter_count]), update='append', win=loss_win)
                viz.line(Y=np.column_stack((np.array([tr_acc]), np.array([ts_acc]))),
                         X=np.column_stack((np.array([iter_count]), np.array([iter_count]))),
                         win=acc_win, update='append')

            running_loss = 0
            tr_total = 0
            tr_correct = 0
            ts_total = 0
            ts_correct = 0

print('Train finish!')
torch.save(net.state_dict(), './model/model_10_2_epoch.pth')

训练输出

  • Train loss

  • Train acc

  • Val acc

  • mini_batch图像

11478104-b6b7cf98f294f911.png
image.png
11478104-498ca83f5d189c8d.png
Train Loss
11478104-2e7f477c28db2850.png
Train Val accuracy

最终,验证集的Accuracy达到98%以上。

扫描二维码关注公众号,回复: 5190331 查看本文章

完整工程

  • train.py
import torch
import torchvision
import numpy as np
import matplotlib.pyplot as plt
import visdom
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import net
import utils


dataset_dir = '/media/weipenghui/Extra/MNIST'
transform = transforms.Compose([transforms.Resize((32, 32)),
                                transforms.ToTensor()])
batch_size = 64

train_dataset = torchvision.datasets.MNIST(root=dataset_dir, train=True, transform=transform)
val_dataset = torchvision.datasets.MNIST(root=dataset_dir, train=False, transform=transform)

print('train dataset: {} \nval dataset: {}'.format(len(train_dataset), len(val_dataset)))

train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
val_dataloader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)

# 显示一个batch
viz = visdom.Visdom(env='train-mnist')
viz.image(torchvision.utils.make_grid(next(iter(train_dataloader))[0], nrow=8), win='train-image')

# plt.figure()
# utils.imshow(next(iter(train_dataloader)))
# plt.show()

# ------------------模型,优化方法------------------------------

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
net = net.Net()
net.to(device)
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
loss_fc = nn.CrossEntropyLoss()

# -----------------训练---------------------------------------
loss_win = viz.line(np.arange(10))
acc_win = viz.line(X=np.column_stack((np.array(0), np.array(0))),
                   Y=np.column_stack((np.array(0), np.array(0))))
iter_count = 0
for epoch in range(20):

    running_loss = 0.0
    tr_loss = 0.0
    tr_acc = 0.0
    ts_acc = 0.0
    tr_total = 0
    tr_correct = 0
    ts_total = 0
    ts_correct = 0


    scheduler.step()
    for i, sample_batch in enumerate(train_dataloader):
        inputs = sample_batch[0].to(device)
        labels = sample_batch[1].to(device)

        net.train()
        optimizer.zero_grad()

        outputs = net(inputs)

        loss = loss_fc(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        tr_total += labels.size(0)
        tr_correct += (torch.max(outputs, 1)[1] == labels).sum().item()

        if i % 200 == 199:
            # test
            for sample_batch in val_dataloader:
                inputs = sample_batch[0].to(device)
                labels = sample_batch[1].to(device)

                net.eval()
                outputs = net(inputs)

                _, prediction = torch.max(outputs, 1)
                ts_correct += (prediction == labels).sum().item()
                ts_total += labels.size(0)

            tr_loss = running_loss / 200
            tr_acc = tr_correct / tr_total
            ts_acc = ts_correct / ts_total
            iter_count += 200
            if iter_count == 200:
                viz.line(Y=np.array([tr_loss]), X=np.array([iter_count]), update='replace', win=loss_win)
                viz.line(Y=np.column_stack((np.array([tr_acc]), np.array([ts_acc]))),
                         X=np.column_stack((np.array([iter_count]), np.array([iter_count]))),
                         win=acc_win, update='replace',
                         opts=dict(legned=['Train_acc', 'Val_acc']))

            else:
                viz.line(Y=np.array([tr_loss]), X=np.array([iter_count]), update='append', win=loss_win)
                viz.line(Y=np.column_stack((np.array([tr_acc]), np.array([ts_acc]))),
                         X=np.column_stack((np.array([iter_count]), np.array([iter_count]))),
                         win=acc_win, update='append')

            running_loss = 0
            tr_total = 0
            tr_correct = 0
            ts_total = 0
            ts_correct = 0

print('Train finish!')
torch.save(net.state_dict(), './model/model_10_2_epoch.pth')
  • net.py
import torch
import torch.nn as nn
import torch.nn.functional as F


class Net(nn.Module):

    def __init__(self):
        super(Net, self).__init__()
        # 1 input image channel, 6 output channels, 5x5 square convolution
        # kernel
        self.conv1 = nn.Conv2d(1, 6, 5)
        self.conv2 = nn.Conv2d(6, 16, 5)
        # an affine operation: y = Wx + b
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        # Max pooling over a (2, 2) window
        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
        # If the size is a square you can only specify a single number
        x = F.max_pool2d(F.relu(self.conv2(x)), 2)
        x = x.view(-1, self.num_flat_features(x))
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

    def num_flat_features(self, x):
        size = x.size()[1:]  # all dimensions except the batch dimension
        num_features = 1
        for s in size:
            num_features *= s
        return num_features

  • utils.py
import numpy as np
import torch
from torchvision.utils import make_grid
import matplotlib.pyplot as plt


def imshow(sample_batch):
    inputs, labels = sample_batch
    images_transformed = make_grid(inputs, nrow=4, pad_value=255)
    images_transformed = np.transpose(images_transformed.numpy(), (1, 2, 0))
    plt.imshow(images_transformed)

猜你喜欢

转载自blog.csdn.net/weixin_34072458/article/details/87130570
今日推荐