pytorch 入门(一)手写数字识别

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/luoganttcc/article/details/82953115
import torch
from  torch.autograd import *
from  torch import nn,optim
from  torch.utils.data import DataLoader
from  torchvision import datasets,transforms

class Batch_Net(nn.Module):
    def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
        super().__init__()
        self.layer1 = nn.Sequential(nn.Linear(in_dim, n_hidden_1),nn.BatchNorm1d(n_hidden_1),nn.ReLU(True))
        self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2),nn.BatchNorm1d(n_hidden_2),nn.ReLU(True))
        self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))

    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        return x

batch_size=64
learning_rate=1e-2
num_epoches=5

data_tf=transforms.Compose([transforms.ToTensor(),transforms.Normalize([0.5],[0.5])])
train_dataset=datasets.MNIST(root='./data',train=True,transform=data_tf,download=True)
test_dataset=datasets.MNIST(root="./data",train=False,transform=data_tf)
train_loader=DataLoader(train_dataset,batch_size=batch_size,shuffle=True)
test_loader=DataLoader(test_dataset,batch_size=batch_size,shuffle=False)

model=Batch_Net(28*28,300,100,10)
if torch.cuda.is_available():
    model=model.cuda()
criterion=nn.CrossEntropyLoss()
optimizer=optim.SGD(model.parameters(),lr=learning_rate)

for epoch in range(num_epoches):
    loss_sum, cort_num_sum,acc = 0.0, 0,0
    for data in train_loader:
        img,label=data
        img=img.view(img.size(0),-1)
        if torch.cuda.is_available():
            inputs = Variable(img).cuda()
            target = Variable(label).cuda()
        else:
            inputs = Variable(img)
            target = Variable(label)
        output =model(inputs)
        loss = criterion(output, target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        loss_sum += loss.data
        _, pred = output.data.max(1)
        num_correct = pred.eq(target).sum()
        cort_num_sum += num_correct
    acc=cort_num_sum.float()/len(train_dataset)
    print( "After %d epoch , training loss is %.2f , correct_number is %d  accuracy is %.6f. "%(epoch,loss_sum,cort_num_sum,acc))


model.eval()
eval_loss=0
eval_acc=0
for data in test_loader:
    img ,label =data
    img=img.view(img.size(0),-1)
    if torch.cuda.is_available():
        img=Variable(img,volatile=True)
        label=Variable(label,volatile=True)
    else:
        img = Variable(img, volatile=True)
        label = Variable(label, volatile=True)
    out=model(img)
    loss=criterion(out,label)
    eval_loss+=loss.data*label.size(0)
    _,pred=out.data.max(1)
    num_correct=pred.eq(label).sum()
    eval_acc+=num_correct.data
print('Test loss: {:.6f},ACC: {:.6f}'.format(eval_loss.float()/(len(test_dataset)),eval_acc.float()/(len(test_dataset))))

猜你喜欢

转载自blog.csdn.net/luoganttcc/article/details/82953115