深度学习入门之经典问题:手写数字识别(提供代码)

深度学习入门之经典问题:手写数字识别。手写数字识别课作为深度学习入门的体验操作,数据集是mnist。下面提供代码,可放在PyCharm中运行一下。深度学习相关配置请参照博客WIN10和Ubuntu系统深度学习环境安装(pytorch框架)Anaconda+CUDA+PyTorch+PyCharm
一共两个文件,当然两个文件的后缀都是.py。文件一可命名为“train”


import torch
from torch import nn #神经网络
from torch.nn import functional as F #常用的函数
from torch import optim #优化工具包

import torchvision #视觉
from matplotlib import pyplot as plt #画图

from utils import plot_image,plot_curve,one_hot


batch_size = 512

# step1. load dataset
train_loader = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('mnist_data', train=True, download=True,
                               transform=torchvision.transforms.Compose([
                                   torchvision.transforms.ToTensor(),
                                   torchvision.transforms.Normalize(
                                       (0.1307,), (0.3081,))
                               ])),
    batch_size=batch_size, shuffle=True)

test_loader = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('mnist_data/', train=False, download=True,
                               transform=torchvision.transforms.Compose([
                                   torchvision.transforms.ToTensor(),
                                   torchvision.transforms.Normalize(
                                       (0.1307,), (0.3081,))
                               ])),
    batch_size=batch_size, shuffle=False)

x, y = next(iter(train_loader))
print(x.shape, y.shape, x.min(), x.max())
plot_image(x, y, 'image sample')

#构建网络层
class Net(nn.Module):

    def __init__(self): #初始化
        super(Net, self).__init__()

        # xw+b
        self.fc1 = nn.Linear(28*28, 256)
        self.fc2 = nn.Linear(256, 64)
        self.fc3 = nn.Linear(64, 10)

    def forward(self, x): #前向传播
        # x: [b, 1, 28, 28]
        # h1 = relu(xw1+b1)
        x = F.relu(self.fc1(x))
        # h2 = relu(h1w2+b2)
        x = F.relu(self.fc2(x))
        # h3 = h2w3+b3
        x = self.fc3(x)

        return x



net = Net()
# [w1, b1, w2, b2, w3, b3]
optimizer = optim.Adam(net.parameters(), lr=0.01,momentum=0.9)
#torch.optim.SGD(params, lr=<required parameter>, momentum=0, dampening=0, weight_decay=0, nesterov=False)

train_loss = [] #保存loss,方便之后的画图可视化
#train训练
for epoch in range(3):

    for batch_idx, (x, y) in enumerate(train_loader):
        #print(x.shape,y.shape)

        # x: [b, 1, 28, 28], y: [512]
        # [b, 1, 28, 28] => [b, 784]
        x = x.view(x.size(0), 28*28)
        # => [b, 10]
        out = net(x) #全连接层
        # [b, 10]
        y_onehot = one_hot(y)
        # loss = mse(out, y_onehot) 均方差
        loss = F.mse_loss(out, y_onehot)

        optimizer.zero_grad() #清零梯度
        loss.backward() #计算梯度
        # w' = w - lr*grad
        optimizer.step() #梯度更新

        train_loss.append(loss.item())

        if batch_idx % 10==0:
            print(epoch, batch_idx, loss.item())

plot_curve(train_loss)#可视化

# we get optimal [w1, b1, w2, b2, w3, b3]
#test测试
#loss不是衡量指标,准确度才是
total_correct = 0
for x,y in test_loader:
    x  = x.view(x.size(0), 28*28)
    out = net(x)
    # out: [b, 10] => pred: [b]
    pred = out.argmax(dim=1) #argmax()返回最大值的索引值
    correct = pred.eq(y).sum().float().item() #预测值等于y的数量相加, item():把tensor类型转化为数值类型
    total_correct += correct

total_num = len(test_loader.dataset)
acc = total_correct / total_num # 准确度计算
print('test acc:', acc)

#直观化显示测试结果
x, y = next(iter(test_loader))
out = net(x.view(x.size(0), 28*28))
pred = out.argmax(dim=1)
plot_image(x, pred, 'test')

文件二命名为“utils”,代码如下:

import  torch
from    matplotlib import pyplot as plt


def plot_curve(data):
    fig = plt.figure()
    plt.plot(range(len(data)), data, color='blue')
    plt.legend(['value'], loc='upper right')
    plt.xlabel('step')
    plt.ylabel('value')
    plt.show()



def plot_image(img, label, name):

    fig = plt.figure()
    for i in range(6):
        plt.subplot(2, 3, i + 1)
        plt.tight_layout()
        plt.imshow(img[i][0]*0.3081+0.1307, cmap='gray', interpolation='none')
        plt.title("{}: {}".format(name, label[i].item()))
        plt.xticks([])
        plt.yticks([])
    plt.show()


def one_hot(label, depth=10): #编码工具
    out = torch.zeros(label.size(0), depth)
    idx = torch.LongTensor(label).view(-1, 1)
    out.scatter_(dim=1, index=idx, value=1)
    return out

运行第一个文件。出现图,如果需要,则手动保存;不需要,关闭(关闭后,才能继续往下运行)

Train loss大约为0.033

test acc 0.8846

会出现下图:
在这里插入图片描述
在这里插入图片描述

发布了61 篇原创文章 · 获赞 10 · 访问量 2921

猜你喜欢

转载自blog.csdn.net/weixin_42042056/article/details/105473364
今日推荐