Pytorch实现卷积神经网络的分类

本文用于学习Pytorch实现卷积神经网络的分类!!!

import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision #数据库
import torch.nn.functional as F

torch.manual_seed(1)

epoches = 1 #训练整批数据的次数
batch_size = 500 #每次训练的样本数
lr = 0.001

#1.获得训练数据集
train_data = torchvision.datasets.MNIST(
    root='./mnist', #下载路径
    train=True, #下载训练数据
    transform=torchvision.transforms.ToTensor(), #将数据转化为tensor类型
    download=False #是否下载MNIST数据集
)
#获得测试数据集
test_data = torchvision.datasets.MNIST(
    root='./mnist',
    train=False
)

#将dataset放入DataLoader中  (batch, channel, 28, 28)
loader = Data.DataLoader(
    dataset=train_data,
    batch_size=batch_size,#设置batch size
    shuffle=True#打乱数据
)

#利用前20个样本和标签测试,并归一化
test_x = torch.unsqueeze(test_data.test_data, dim=1).type(
    torch.FloatTensor)[:20]/255
test_y = test_data.test_labels[:20]

#2.前向传播过程
class CNN(nn.Module):
    def __init__(self):
        super(CNN,self).__init__()
        self.conv1=nn.Sequential(#(batch, 1, 28 , 28)
            nn.Conv2d(
                in_channels=1,#输入图片深度
                out_channels=16,#滤波器数量
                kernel_size=5,#滤波器大小
                stride=1,#步长
                padding=2 #保持输出图片大小不变 padding=(kernel_size-stride)/2
            ),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2)
        )#(batch, 16, 14, 14)
        self.conv2=nn.Sequential(#(batch, 16, 14, 14)
            nn.Conv2d(16, 32, 5, 1, 2),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )#(batch, 32, 7, 7)
        self.out = nn.Linear(32*7*7, 10)#全连接层,入口参数是卷积层和输出层的节点数

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = x.view(x.size(0), -1)#将(batch, 32, 7, 7)展平为(batch, 32*7*7)
        out = self.out(x)
        return out

# 训练模型的同时保存网络模型参数
def save():
    #3.利用自定义的前向传播过程设计网络
    cnn = CNN()

    #4.设置优化算法、学习率
    optimizer = torch.optim.Adam( cnn.parameters(), lr=lr)

    #5.设置损失函数
    loss_func = torch.nn.CrossEntropyLoss()

    #6.迭代训练
    for epoch in range(epoches):
        for step, (batch_x, batch_y) in enumerate(loader):
            out = cnn(batch_x)#输入训练集,获得当前迭代输出值
            loss = loss_func(out, batch_y)#获得当前迭代的损失

            optimizer.zero_grad()#清除上次迭代的更新梯度
            loss.backward()#反向传播
            optimizer.step()#更新权重

            #打印训练过程中的测试集准确率
            if step%500==0:
                test_out = cnn(test_x)#输入测试集
                #获得当前softmax层最大概率对应的索引值
                pred = torch.max(test_out, 1)[1]
                #将二维压缩为一维
                pred_y = pred.data.numpy().squeeze()
                label_y = test_y.data.numpy()
                accuracy = sum(pred_y == label_y)/test_y.size()
                print("第 %d 个epoch,第 %d 次迭代,准确率为 %.2f"%(epoch+1, step/100+1, accuracy))

    #7.保存模型结构和参数
    torch.save(cnn, 'cnn.pkl')

if __name__ == '__main__':
    save()

参考资料:

1. 莫烦《Pytorch教程》

猜你喜欢

转载自blog.csdn.net/attitude_yu/article/details/80569875