PyTorch 用全连接神经网络 实现MNIST数据集上面的手写数字识别

版权声明:转载请注明出处及原文地址。 https://blog.csdn.net/zl1085372438/article/details/84443562

网络定义:

文件net.py

import torch
from matplotlib import pyplot as ply
from torch import nn, optim
from torch.autograd import Variable


class simpleNet(nn.Module):
    """ Define Model """

    def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
        super().__init__()
        # self.layer1 = nn.Linear(in_dim, n_hidden_1)
        # self.layer2 = nn.Linear(n_hidden_1, n_hidden_2)
        # self.layer3 = nn.Linear(n_hidden_2, out_dim)

        # self.layer1 = nn.Sequential(nn.Linear(in_dim, n_hidden_1),nn.ReLU(True))
        # self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2),nn.ReLU(True))
        # self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))

        self.layer1 = nn.Sequential(
            nn.Linear(in_dim, n_hidden_1), nn.BatchNorm1d(n_hidden_1), nn.ReLU(True))
        self.layer2 = nn.Sequential(
            nn.Linear(n_hidden_1, n_hidden_2), nn.BatchNorm1d(n_hidden_2), nn.ReLU(True))
        self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))

    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        return x

数据集加载、模型训练与模型评估

import time
import numpy as np
import torch
from matplotlib import pyplot as plt
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import datasets, transforms

import net

start = time.time()

batch_size = 64
learning_rate = 1e-2
num_epoches = 20

data_transforms = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize([0.5], [0.5])]
)

train_dataset = datasets.MNIST(
    root='./data', train=True, transform=data_transforms, download=True)
test_dataset = datasets.MNIST(
    root='./data', train=False, transform=data_transforms)

train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)


model = net.simpleNet(28*28, 300, 100, 10)
""" CUDA """
# model = model.cuda()

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)

py = []


def plot(print_loss):
    plt.figure(1)
    plt.cla()
    py.append(print_loss)
    plt.plot(py, 'go-', linewidth=2, markersize=4,
             markeredgecolor='red', markerfacecolor='m')
    plt.pause(0.0000001)


def show(img):
    img = img.numpy()
    for i in range(0, 64):
        tmp = img[i, 0, :, :]
        print(tmp.shape)
        plt.imshow(tmp)
        plt.show()


for epoch in range(0, 1):
    cnt = 0
    for data in train_loader:
        cnt += 1
        img, label = data
        img = img.view(img.size(0), -1)
        img = Variable(img)
        label = Variable(label)
        """ CUDA """
        # img = img.cuda()
        # label = label.cuda()

        out = model(img)
        loss = criterion(out, label)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if cnt % 100 == 0:
            print_loss = loss.data.item()
            plot(print_loss)
            print('epoch:{} ,batch:{} ,loss: {}'.format(epoch, cnt, print_loss))


torch.save(model,'mnist')

model.eval()
eval_loss = 0
eval_acc = 0
for data in test_loader:
    img, label = data
    img = img.view(img.size(0), -1)
    img = Variable(img)
    label = Variable(label)
    """ CUDA """
    # img = img.cuda()
    # label = label.cuda()

    out = model(img)
    loss = criterion(out, label)
    print_loss = loss.data.item()
    eval_loss += print_loss * label.size(0)
    _, pred = torch.max(out, 1)
    num_correct = (pred == label).sum()
    eval_acc += num_correct.data.item()


print('Test Loss:{} ,Acc: {}'.format(
    eval_loss/len(test_dataset), eval_acc/(len(test_dataset))))

end = time.time()
print('time:{}'.format(end-start))
plt.show()

显示出测试集的部分图片

import torch
from matplotlib import pyplot as plt
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import numpy as np
import net


model=torch.load('mnist')
model.eval()

batch_size = 64
learning_rate = 1e-2
num_epoches = 20

data_transforms = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize([0.5], [0.5])]
)

test_dataset = datasets.MNIST(
    root='./data', train=False, transform=data_transforms)

test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)


for data in test_loader:
    img,label=data
    vimg = img.view(img.size(0), -1)
    vimg = Variable(vimg)
    out=model(vimg)
    v,pre=torch.max(out,1)
    pre=pre.numpy()
    img=img.numpy()
    label=label.numpy()
    for i in range(0,10):
        plt.figure(i)
        curimg=img[i,0,:,:]
        curlabel=label[i]
        curpre=pre[i]
        plt.imshow(curimg)
        plt.title('label:{} ,pre:{}'.format(curlabel,curpre))
        plt.pause(0.000001)
    break    #annotation this line, if want to show more picture
plt.show()

print('Show the wrong prediction')
cnt=0
for data in test_loader:
    img,label=data
    vimg = img.view(img.size(0), -1)
    vimg = Variable(vimg)
    out=model(vimg)
    v,pre=torch.max(out,1)
    pre=pre.numpy()
    img=img.numpy()
    label=label.numpy()
    for i in range(0,10):
        plt.figure(i)
        curimg=img[i,0,:,:]
        curlabel=label[i]
        curpre=pre[i]
        if(curpre!=curlabel):
            cnt+=1
            if(cnt<10):   # 10 is the total number of wrong prediction that you want to show. Change it as you want.
                plt.imshow(curimg)
                plt.title('label:{} ,pre:{}'.format(curlabel,curpre))
                plt.pause(0.000001)
print(cnt)
plt.show()

三个py文件放在同一个目录下

猜你喜欢

转载自blog.csdn.net/zl1085372438/article/details/84443562