利用卷积神经网络实现MNIST手写数据识别

代码:

import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision      # 数据库模块
import matplotlib.pyplot as plt

torch.manual_seed(1)    # reproducible
# Hyper Parameters
EPOCH = 1           # 训练整批数据多少次, 为了节约时间, 我们只训练一次
BATCH_SIZE = 50
LR = 0.001          # 学习率
DOWNLOAD_MNIST = False  # 如果你已经下载好了mnist数据就写上 False
# Mnist 手写数字
train_data = torchvision.datasets.MNIST(
    root='./mnist/',    # 保存或者提取位置
    train=True,  # this is training data
    transform=torchvision.transforms.ToTensor(),    # 转换 PIL.Image or numpy.ndarray 成
                                                    # torch.FloatTensor (C x H x W), 训练的时候 normalize 成 [0.0, 1.0] 区间
    download=DOWNLOAD_MNIST,          # 没下载就下载, 下载了就不用再下了
)
#plot one example
# print(train_data.test_data.shape)#torch.Size([60000, 28, 28])
# print(train_data.train_labels.shape)#torch.Size([60000])
# print(train_data.train_data[0].shape)#torch.Size([28, 28])
#
# plt.imshow(train_data.train_data[1],cmap='gray')
# plt.title('%d'%train_data.train_labels[1])
# plt.show()

#测试数据
test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)

# print(test_data.test_data.shape)#torch.Size([10000, 28, 28])
# 为了节约时间, 我们测试时只测试前2000个
test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[:2000]   # /255.shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
test_y = test_data.test_labels[:2000]

# 批训练 50samples, 1 channel, 28x28 (50, 1, 28, 28)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)

class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1=nn.Sequential(
            nn.Conv2d(
                in_channels=1,
                out_channels=16,#n_filters
                kernel_size=5,  # filter size
                stride=1,  # filter movement/step
                padding=2,  # 如果想要 con2d 出来的图片长宽没有变化, padding=(kernel_size-1)/2 当 stride=1
            ),# output shape (16, 28, 28)
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2)# output shape (16, 14, 14)
        )
        self.conv2=nn.Sequential(
            nn.Conv2d(16,32,5,1,2),# output shape (32, 14, 14)
            nn.ReLU(),
            nn.MaxPool2d(2)# output shape (32, 7, 7)
        )
        self.out=nn.Linear(32*7*7,10)# fully connected layer, output 10 classes
    def forward(self, x):
        x=self.conv1(x)
        x=self.conv2(x)
        #print(x.shape)#output:torch.Size([50, 32, 7, 7])
        x = x.view(x.size(0), -1) # 展平多维的卷积图成 (batch_size, 32 * 7 * 7)
        # print(x.shape)#output:torch.Size([50, 1568])
        output = self.out(x)
        return output
cnn=CNN()
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR)   # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss()   # the target label is not one-hotted
# training and testing
for epoch in range(EPOCH):
    for step, (b_x, b_y) in enumerate(train_loader):   # 分配 batch data, normalize x when iterate train_loader
        print('step:',step)
        output = cnn(b_x)               # cnn output
        loss = loss_func(output, b_y)   # cross entropy loss
        optimizer.zero_grad()           # clear gradients for this training step
        loss.backward()                 # backpropagation, compute gradients
        optimizer.step()                # apply gradients
test_output = cnn(test_x[:10])
#test_x[:10].shape=torch.Size([10, 1, 28, 28])
#test_output.shape=torch.Size([10, 10])
print('test_output:',test_output)
# test_output: tensor([[-1383.2828, -1148.1272,   311.1780,   153.0877, -3062.3340,  -886.6730,
#          -5819.7256,  3619.9558, -1544.4225,   193.6745],
#         [  282.6339,   647.2642,  3027.1570,  -379.0817, -3403.5310, -2406.4951,
#          -1117.4684, -4085.4429,  -306.6578, -3844.1602],
#         [-1329.7642,  1895.3890,  -755.7719, -1378.9316,  -314.2351, -1607.4249,
#          -1026.8795,  -428.1658,  -385.1328, -1404.5205],
#         [ 2991.5627, -3583.5374,  -554.1349, -2472.6204, -1712.7700, -1092.7367,
#            148.9156, -1580.6696, -1126.8331,  -477.7481],
#         [-1818.9655, -1502.3574, -1620.6603, -2142.3472,  2529.0496, -2008.2731,
#          -1585.5699,  -786.7817, -1372.2627,   848.0875],
#         [-1415.7609,  2248.9607,  -909.5534, -1656.6108,  -311.2874, -2255.2163,
#          -1643.2495,  -149.4040,  -342.9626, -1372.8961],
#         [-3766.0422,  -484.8116, -1971.9016, -2483.8538,  1448.3118, -1048.7388,
#          -2411.9790, -1089.5471,   422.1722,   249.8736],
#         [-2933.3752,  -877.4833,  -671.7119,  -573.4670,    63.9295,  -497.9561,
#          -2236.4597, -1218.2463,  -296.5850,  1256.0739],
#         [-2187.7292, -4899.0063, -2404.6597, -2595.0764, -2987.9624,  2052.1494,
#            335.9461, -2942.6995,   275.7964,  -551.2797],
#         [-1903.9233, -3449.5530, -1652.7020, -1087.9016,  -515.1445, -1170.5551,
#          -3734.2666,   628.9314,    69.0235,  2096.6257]],
#        grad_fn=<AddmmBackward>)
print('test_output.shape:',test_output.shape)
# test_output.shape: torch.Size([10, 10])

pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()
print(pred_y, 'prediction number')
print(test_y[:10].numpy(), 'real number')

猜你喜欢

转载自www.cnblogs.com/Archer-Fang/p/10651403.html