Pytorch-Fashion MNIST数据集卷积神经网络实例代码程序 训练预测准确率94%

目录
一、数据集下载以及数据增强

二、观察Fashion mnist图片

三、建立模型

四、训练前准备

五、模型训练

六、预测结果分析(混淆矩阵)

七、保存模型方法

一、数据集下载以及数据增强

1、调用库:

import copy
import os
from tqdm import tqdm
from time import time
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
from IPython import display

import torch 
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
from sklearn.metrics import confusion_matrix 

import torchvision
import torch.utils.data.dataloader as loader
import torch.utils.data as Dataset 
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils import data
from torchvision import models

2、数据集下载以及数据增强

# 对训练集进行数据增强并通过ToTensor实例将图像数据从PIL类型变换tensor类型
transform = transforms.Compose(
    [
     transforms.RandomHorizontalFlip(),
     transforms.RandomGrayscale(),
     transforms.ToTensor(),
     transforms.Normalize((0.5,),(0.5,))])

transform1 = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5,),(0.5,))])


mnist_train = torchvision.datasets.FashionMNIST(
    root="data", train=True, transform=transform, download=True)
mnist_test = torchvision.datasets.FashionMNIST(
    root="data", train=False, transform=transform1, download=True)

BATCH_SIZE = 100
trainloader =loader.DataLoader(mnist_train,batch_size = BATCH_SIZE,shuffle = True)
testloader =loader.DataLoader(mnist_test,batch_size = BATCH_SIZE,shuffle = False)

二、观察数据集图片:

"""返回Fashion-MNIST数据集的文本标签。"""
labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
                   'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']

def show_images(imgs, num_rows, num_cols,targets,labels=None, scale=1.5):  
    """Plot a list of images."""
    figsize = (num_cols * scale, num_rows * scale)
    _, axes = plt.subplots(num_rows, num_cols, figsize=figsize)
    axes = axes.flatten()
    for ax, img,target in zip(axes, imgs,targets):
        if torch.is_tensor(img):
            # 图片张量
            ax.imshow(img.numpy())
        else:
            # PIL
            ax.imshow(img)
        # 设置坐标轴不可见
        ax.axes.get_xaxis().set_visible(False)
        ax.axes.get_yaxis().set_visible(False)
        plt.subplots_adjust(hspace = 0.35)
        if labels:
            ax.set_title('{}-'.format(target)+labels[target])
    return axes

# 将dataloader转换成迭代器才可以使用next方法
X, y = next(iter(data.DataLoader(mnist_train, batch_size=24,shuffle = True)))
show_images(X.reshape(24, 28, 28), 3, 8, labels=labels, targets = y)

三、建立模型

1、模型一:三层卷积加两层全连接,使用dropout层

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.conv = nn.Sequential(
        nn.Conv2d(1,40,2),
        nn.ReLU(),
        nn.MaxPool2d(2,1),
        nn.Conv2d(40,80,2),
        nn.ReLU(),
        nn.MaxPool2d(2,1),
        nn.Conv2d(80,160,3,padding = 1),
        nn.ReLU(),
        nn.Dropout(p = 0.5),
        nn.MaxPool2d(3,3),)
        
        
        self.classifier = nn.Sequential(
        nn.Linear(160*8*8,200),
        nn.ReLU(),
#         nn.Linear(120,84),
#         nn.ReLU(),
#         nn.Linear(84,42),
#         nn.ReLU(),
        nn.Dropout(p = 0.5),
        nn.Linear(200,10))
        
        
    def forward(self,x):
        x = self.conv(x)
        x = x.view(x.size(0),-1)
        x = self.classifier(x)

        return x
       

2、模型二:参考vgg模型使用两个vgg块和两个全连接,使用批标准化

# 训练30个epochs后测试集准确率高达93.8%
class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.conv1 = nn.Conv2d(1,128,1,padding=1)
        self.conv2 = nn.Conv2d(128,128,3,padding=1)
        self.pool1 = nn.MaxPool2d(2, 2)
        self.bn1 = nn.BatchNorm2d(128)
        self.relu1 = nn.ReLU()

        self.conv3 = nn.Conv2d(128,256,3,padding=1)
        self.conv4 = nn.Conv2d(256, 256, 3,padding=1)
        self.pool2 = nn.MaxPool2d(2, 2, padding=1)
        self.bn2 = nn.BatchNorm2d(256)
        self.relu2 = nn.ReLU()

        self.fc5 = nn.Linear(256*8*8,512)
        self.drop1 = nn.Dropout2d()
        self.fc6 = nn.Linear(512,10)


    def forward(self,x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.pool1(x)
        x = self.bn1(x)
        x = self.relu1(x)


        x = self.conv3(x)
        x = self.conv4(x)
        x = self.pool2(x)
        x = self.bn2(x)
        x = self.relu2(x)

        #print(" x shape ",x.size())
        x = x.view(-1,256*8*8)
        x = F.relu(self.fc5(x))
        x = self.drop1(x)
        x = self.fc6(x)
        return x

四、训练前准备:

1、模型函数初始化

net =Net() # 模型初始化
loss = nn.CrossEntropyLoss() # 交叉熵损失函数
optimizer = optim.Adam(net.parameters(),lr = 0.001) # 随机梯度下降优化算法
# xavier初始化
def init_xavier(model):
    for m in model.modules():
        if isinstance(m, (nn.Conv2d, nn.Linear)):
            nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu'))
# 凯明初始化
def init_kaiming(model):
    for m in model.modules():
        if isinstance(m, (nn.Conv2d, nn.Linear)):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')

init_kaiming(net)

2、使用GPU(无则自动使用CPU)

"""使用GPU"""
def use_gpu(net):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net.to(device)
    gpu_nums = torch.cuda.device_count()
    if gpu_nums > 1:
        print("Let's use", gpu_nums, "GPUs")
        net = nn.DataParallel(net)
    elif gpu_nums == 1:
        print("Let's use GPU")
    else:
        print("Let's use CPU")
    return device 

3、编写模型训练程序辅助函数

(1)可视化训练效果动画函数

class Animator:  #@save
    """在动画中绘制数据。"""
    def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
                 ylim=None, xscale='linear', yscale='linear',
                 fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
                 figsize=(5, 3.5)):
        # 增量地绘制多条线
        if legend is None:
            legend = []
            
        # 使用矢量图
        display.set_matplotlib_formats('svg')
        self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize)
        if nrows * ncols == 1:
            self.axes = [self.axes, ]
        # 使用lambda函数捕获、保存参数
        self.config_axes = lambda: self.set_axes(
            self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
        self.X, self.Y, self.fmts = None, None, fmts

    def add(self, x, *y):
        # 向图表中添加多个数据点
        n = len(y)
        x = [x] * n
        if not self.X:
            self.X = [[] for _ in range(n)]
        if not self.Y:
            self.Y = [[] for _ in range(n)]
        for i, (a, b) in enumerate(zip(x, y)):
            if a is not None and b is not None:
                self.X[i].append(a)
                self.Y[i].append(b)
                
        self.axes[0].cla() # 清除当前活动的axes
        for x, y, fmt in zip(self.X, self.Y, self.fmts):
            self.axes[0].plot(x, y, fmt,linewidth = 2)
            
        self.axes[0].set_yticks(ticks = np.linspace(0,1,11))
        self.config_axes()
        display.display(self.fig)
        # 清除输出,使重画的图在原位置输出,形成动图效果
        display.clear_output(wait=True)  

    def set_axes(self,axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
        # 设置matplotlib的轴。\

        axes.grid(True)
        axes.set_title("gaojianwen")
        axes.set_xlabel(xlabel)
        axes.set_ylabel(ylabel)
        axes.set_xscale(xscale)
        axes.set_yscale(yscale)
        axes.set_xlim(xlim)
        axes.set_ylim(ylim)
        if legend:
            axes.legend(legend)

(2)累加器(存储中间数据,如准确率等)

class Accumulator:
    """定义累加器"""
    def __init__(self, n):
        self.data = [0.0 ] * n
        
    def add(self, *args):
        # 累加
        self.data = [a + float(b) for a, b in zip(self.data, args)]
        
    def reset(self):
        # 重置累加器
        self.data = [0.0] * len(self.data)
        
    def __getitem__(self,index):
        return  self.data[index]

五、训练模型

def train_epoch(net, train_iter, loss, updater,device):
    """训练模型一个迭代周期"""
    # 将模型设置为训练模式
    if isinstance(net, torch.nn.Module):
        net.train()
    # metrix分别存储训练损失总和、训练准确数量总和、样本数
    metric = Accumulator(3)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    for x, y in train_iter:
        x,y =x.to(device),y.to(device)
        updater.zero_grad()
        output = net(x)
        l = loss(output, y) # 计算损失,注意这里l为平均损失
        y_pred = torch.argmax(output,dim = 1) # 取出预测标签
        if isinstance(updater, torch.optim.Optimizer):
            # 使用PyTorch内置的优化器和损失函数
            l.backward()
            updater.step()
            metric.add(float(l) * len(y), (y_pred == y).sum(), y.numel())

    # 返回训练集损失和准确率
    return metric[0] / metric[2], metric[1] / metric[2]
def evaluate_accuracy(net, test_iter,loss,device):  
    """计算在指定数据集上模型的精度。"""
    if isinstance(net, torch.nn.Module):
        net.eval()  # 将模型设置为评估模式
    metric = Accumulator(3)  # 测试集损失、正确预测数、预测总数
    for x, y in test_iter:
        x,y = x.to(device),y.to(device)
        output = net(x)
        l = loss(output,y) #注意l为平均损失
        y_pred = torch.argmax(output,dim = -1)
        metric.add(float(l) * len(y),(y_pred == y).sum(), y.numel())
    # 返回测试集损失和准确率
    return metric[0] / metric[2], metric[1] / metric[2]
def train(net, train_iter, test_iter, loss, updater, epochs):  
    """训练模型"""
    device = use_gpu(net)
    global animator
    animator = Animator(xlabel='epoch', xlim=[0, epochs], ylim=[0, 1],
                        legend=['train loss', 'train acc', 'test loss', 'test acc'])
    net_list = []  # 用来保存每次epoch后的模型
    start = time() # 开始时间
    for epoch in range(epochs+1):
        if epoch == 0:
            # 第0次epoch不训练,计算初始随机准确率。
            train_loss,train_acc = evaluate_accuracy(net, train_iter, loss,device)
        else:    
            train_loss,train_acc = train_epoch(net, train_iter, loss, updater,device)
        # 计算在测试集上的损失(平均损失)和准确率
        test_loss, test_acc = evaluate_accuracy(net, test_iter,loss,device)
        animator.add(epoch, train_loss, train_acc, test_loss, test_acc)
        # 保存每次epoch后的模型参数
        net_list.append(net.state_dict())
    end = time() # 结束时间
    max_net_index = animator.Y[3].index(max(animator.Y[3])) # 在测试集上最高准确率所对应的索引
    print("总花费时长:{} 秒 \n每次epoch花费时长:{} 秒".format(end-start, (end-start) / epochs))
    print("训练集最高准确率为:{}%".format(max(animator.Y[1])*100))
    print("测试集最高准确率为:{}%(第{}epoch)".format(max(animator.Y[3])*100, max_net_index))
    
    # 返回测试集准确率最高时对应的模型
    net.load_state_dict(net_list[max_net_index])
    return net

max_net = train(net, trainloader, testloader, loss , optimizer,30)

        使用模型一训练,使用gpu(数量为1)训练时间为半小时,cpu约为五六小时时间,在测试集上准确率达到94.15%。

六、预测结果分析(混淆矩阵)

1、预测函数编写

def predict(net,data,return_labels = False, labels = None):
    net.eval()
    with torch.no_grad():
        outputs = net(data)
        y_pred = outputs.argmax(axis = 1)
        if return_labels and labels != None:
            pred_labels = [labels[i] for i in y_pred]
            # 返回预测标签:语义标签
            return pred_labels
        # 返回预测标签:数字标签
        return y_pred.cpu()
    
def get_pred_targets(net,loader):
    pred_targets = np.array([])
    true_targets = np.array([])
    device = use_gpu(net)
    for data,targets in loader:
        pred_targets = np.hstack((pred_targets,predict(net.to(device),data.to(device))))
        true_targets = np.hstack((true_targets,targets))
    # 返回预测标签
    return (pred_targets, true_targets)

test_pred,test_true = get_pred_targets(max_net,testloader)

2、混淆矩阵

labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
                   'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
# 绘制混淆矩阵
def conf_mat_plot(y,y_pred,labels):
    fig = plt.figure(figsize = (8,5))
    ax = fig.add_subplot(111)
    conf_mat = confusion_matrix(y,y_pred)
    heatmap = sns.heatmap(conf_mat,annot=True,fmt ='d',cmap = "YlGnBu",xticklabels = labels,yticklabels=labels) #画热力图
    heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),rotation = 30)
    ax.set_title('confusion matrix',size = 20) #标题
    ax.set_xlabel('predict label') #x轴
    ax.set_ylabel('true label') #y轴
    plt.show()
conf_mat_plot(mnist_test.targets,test_pred,labels)

 七、保存模型

1、保存模型参数:注:提取参数时的模型结构需匹配

# 保存模型参数,注:提取参数时的net必须与保存的参数结构匹配
save_path = 'net_params.pkl'
torch.save(net.state_dict(),save_path)
# 提取参数
net.load_state_dict(torch.load(save_path))

2、保存模型:

# 保存整个模型
save_path = 'max_net.pkl'
torch.save(net,save_path)
# 提取
net_ = torch.load(save_path)

猜你喜欢

转载自blog.csdn.net/weixin_46707493/article/details/122624655