pytorch MNIST数据集简单神经网络简单构建

参考的https://github.com/pytorch/examples/blob/master/mnist/main.py

CPU,舍弃命令行,Pycharm运行,python3.5,pytorch0.4     最开始下载数据集可能有点慢

输出的是计算的损失

# -*- coding: utf-8 -*- 

from torch.autograd import Variable         #####记录操作的历史记录、自动求导数
import torch                                #torch数据结构以及其上的多种运算
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms

#载入训练集
train_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data', train=True, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=64, shuffle=True)

#载入测试集
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data', train=False, transform=transforms.Compose([
                        transforms.ToTensor(),
                        transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=1000, shuffle=True)

class Net(nn.Module):#继承父类
    def __init__(self):
        super(Net, self).__init__()#载入父类的初始化,之后定义self就可以
        self.conv1 = nn.Conv2d(1,10,kernel_size=5)#Conv2d对卷积核宽高都卷积,1d只对宽卷积
        self.conv2 = nn.Conv2d(10,20,kernel_size=5)
        self.conv2_drop = nn.Dropout2d()#丢弃部分神经元,防止过拟合 0.5?
        self.fc1 = nn.Linear(320,50)
        self.fc2 = nn.Linear(50,10)

    def forward(self, x):
        #[1000, 1, 28, 28]---最开始1000张28*28的灰度图
        x = F.relu(F.max_pool2d(self.conv1(x),2))#conv1层池化2*2,激活函数
        #1000张10通道12*12大小的图
        #[1000, 10, 12, 12]->1对10(10张图,10可以理解为10个通道)->图像滤波后28-5+1=24   24*24->池化24/2=12 12*12
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)),2))#size:[64, 20, 4, 4]
        #1000张20通道4*4大小的图
        #[1000, 20, 4, 4]->1000张20个通道的图->卷积12-5+1=8   8*8  ->池化8/2=4   4*4
        x = x.view(-1,320)#size:[1000, 320]
        x = F.relu(self.fc1(x))
        x = F.dropout(x,training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)

model=Net()#实例化,后面要用
optimizer = optim.SGD(model.parameters(), lr=0.02, momentum=0.5)#lr 学习率   momentum  冲量


def train(epoch):
    model.train()#测试、训练区别开来,
    for jj in range(epoch):
        for batch_idx,(data,target) in enumerate(train_loader):#enumerate()使之成为索引文件,可迭代
            data, target = Variable(data), Variable(target)
            optimizer.zero_grad()
            output = model(data)
            loss = F.nll_loss(output,target)
            loss.backward()#误差反向传递
            optimizer.step()#更新权重

def test():
    model.eval()
    target_num=0
    test_num=0
    with torch.no_grad():#不需要反向传播,不需要保存梯度
        for batch_iidx,(data,target) in enumerate(test_loader):
            data=Variable(data)
            target=Variable(target)
            target_num =len(target)+target_num
            output=model(data)
            test_num = F.nll_loss(output,target,reduction='sum').item()+test_num
            loss=F.nll_loss(output,target)
        return loss


# def train
if __name__ == '__main__':
    for epoch in range(20):
        train(epoch)
        test_loss=test()
        print(epoch,"  ",test_loss)

猜你喜欢

转载自blog.csdn.net/jidushanzhu/article/details/81362536