matplotlib - 展示预测图片 from trained model

1.效果图

2.完整代码

import torch
import numpy as np
import sys
import torchvision
from torch import nn
import matplotlib.pyplot as plt


def load_data_fashion_mnist(batch_size, resize=None, root='~/Datasets/FashionMNIST'):
    trans = []
    if resize:
        trans.append(torchvision.transforms.Resize(size=resize))
    trans.append(torchvision.transforms.ToTensor())
    
    transform = torchvision.transforms.Compose(trans)
    mnist_train = torchvision.datasets.FashionMNIST(root=root, train=True, download=True, transform=transform)
    mnist_test = torchvision.datasets.FashionMNIST(root=root, train=False, download=True, transform=transform)
    if sys.platform.startswith('win'):
        num_workers = 0  # 0 表示不用额外的进程来加速读取数据
    else:
        num_workers = 4
    train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
    test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)
    # 每个batch样本x的形状为(batch_size, 1, 28, 28),
    return train_iter, test_iter

batch_size = 256
train_iter, test_iter = load_data_fashion_mnist(batch_size)

######################################################################################
num_inputs, num_outputs, num_hiddens = 784, 10, 256

W1 = torch.tensor(np.random.normal(0, 0.01, (num_inputs, num_hiddens)), dtype=torch.float)
b1 = torch.zeros(num_hiddens, dtype=torch.float)
W2 = torch.tensor(np.random.normal(0, 0.01, (num_hiddens, num_outputs)), dtype=torch.float)
b2 = torch.zeros(num_outputs, dtype=torch.float)

params = [W1, b1, W2, b2]
for param in params:
    param.requires_grad_(requires_grad=True)
    
def relu(X):
    return torch.max(input=X, other=torch.tensor(0.0))

def net(X):
    X = X.view((-1, num_inputs))
    H = relu(torch.matmul(X, W1) + b1)
    return torch.matmul(H, W2) + b2
######################################################################################

loss = torch.nn.CrossEntropyLoss()


def evaluate_accuracy(data_iter, net):
    acc_sum, n = 0.0, 0
    for X, y in data_iter:
        acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
        n += y.shape[0]
    return acc_sum / n

def sgd(params, lr, batch_size):
    # 为了和原书保持一致,这里除以了batch_size,但是应该是不用除的,因为一般用PyTorch计算loss时就默认已经
    # 沿batch维求了平均了。
    for param in params:
        param.data -= lr * param.grad / batch_size # 注意这里更改param时用的param.data

def plot_two_picture1(train_loss_values, train_acc_values, test_acc_values):
    epochs = range(1, len(train_acc_values) + 1)
    plt.figure(1)

    plt.subplot(2,1,1)
    plt.plot(epochs, train_acc_values, 'bo', label='Training acc') 
    plt.plot(epochs, test_acc_values, 'b', label='Testing acc') 
    plt.title('Training and testing acc')
    plt.xlabel('Epochs')
    plt.ylabel('Acc')
    plt.legend()
    
    plt.subplot(2,1,2)
    plt.plot(epochs, train_loss_values, 'bo', label='Training loss') 
    plt.title('Training loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()  
    
    plt.tight_layout()
    plt.show()

def plot_two_picture2(train_loss_values, train_acc_values, test_acc_values):
    epochs = range(1, len(train_acc_values) + 1)
    plt.figure('Accuracy')
    plt.plot(epochs, train_acc_values, 'r-', label='Training acc') 
    plt.plot(epochs, test_acc_values, 'b:', label='Testing acc') 
    plt.title('Training and testing acc')
    plt.xlabel('Epochs')
    plt.ylabel('Acc')
    plt.legend()
    
    plt.figure('Loss')
    plt.plot(epochs, train_loss_values, 'y--*', label='Training loss')
    plt.title('Training loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()    
    
    plt.show()
    
    
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
              params=None, lr=None, optimizer=None):
    train_loss_list, train_acc_list, test_acc_list = [], [], []
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
        for X, y in train_iter:
            y_hat = net(X)
            l = loss(y_hat, y).sum()
            
            # 梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()
            
            l.backward()
            if optimizer is None:
                sgd(params, lr, batch_size)
            else:
                optimizer.step()  # “softmax回归的简洁实现”一节将用到
            
            
            train_l_sum += l.item()
            train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
            n += y.shape[0]
        test_acc = evaluate_accuracy(test_iter, net)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
              % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))
        
        # save the info for picture
        train_loss_list.append(train_l_sum / n)
        train_acc_list.append(train_acc_sum / n)
        test_acc_list.append(test_acc)
        
    # plot_picture(train_acc_list, test_acc_list)
    plot_two_picture1(train_loss_list, train_acc_list, test_acc_list)
    
num_epochs, lr = 2, 100.0
train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params, lr)     

        
from IPython import display
from matplotlib import pyplot as plt

def use_svg_display():
    """Use svg format to display plot in jupyter"""
    display.set_matplotlib_formats('svg')
    
def get_fashion_mnist_labels(labels):
    text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
                   'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
    return [text_labels[int(i)] for i in labels]

def show_fashion_mnist(images, labels):
    use_svg_display()
    # 这里的_表示我们忽略(不使用)的变量
    _, figs = plt.subplots(1, len(images), figsize=(12, 12))
    for f, img, lbl in zip(figs, images, labels):
        f.imshow(img.view((28, 28)).numpy())
        f.set_title('predicted: '+lbl)
        f.axes.get_xaxis().set_visible(False)
        f.axes.get_yaxis().set_visible(False)
    plt.savefig('./pred.png')
    # plt.show()
    
X, y = iter(test_iter).next()

true_labels = get_fashion_mnist_labels(y.numpy())
pred_labels = get_fashion_mnist_labels(net(X).argmax(dim=1).numpy())
titles = [pred for pred in pred_labels]

show_fashion_mnist(X[0:3], titles[0:3])

猜你喜欢

转载自blog.csdn.net/Zhou_Dao/article/details/104435954
今日推荐