import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Hyper-parameters
input_size = 784
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.001
# MNIST dataset (images and labels)
train_dataset = torchvision.datasets.MNIST(root='../../data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../../data',
train=False,
transform=transforms.ToTensor())
# Data loader (input pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Logistic regression model
model = nn.Linear(input_size, num_classes)
# Loss and optimizer
# nn.CrossEntropyLoss() computes softmax internally
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader) # step :60000/100 = 600
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Reshape images to (batch_size, input_size)
images = images.reshape(-1, 28*28)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, 28*28)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
运行结果
Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
Processing…
Done!
Epoch [1/5], Step [100/600], Loss: 2.1767
Epoch [1/5], Step [200/600], Loss: 2.1495
Epoch [1/5], Step [300/600], Loss: 2.0069
Epoch [1/5], Step [400/600], Loss: 1.9497
Epoch [1/5], Step [500/600], Loss: 1.8636
Epoch [1/5], Step [600/600], Loss: 1.7935
Epoch [2/5], Step [100/600], Loss: 1.6886
Epoch [2/5], Step [200/600], Loss: 1.7598
Epoch [2/5], Step [300/600], Loss: 1.6549
Epoch [2/5], Step [400/600], Loss: 1.6528
Epoch [2/5], Step [500/600], Loss: 1.4315
Epoch [2/5], Step [600/600], Loss: 1.4409
Epoch [3/5], Step [100/600], Loss: 1.3806
Epoch [3/5], Step [200/600], Loss: 1.3615
Epoch [3/5], Step [300/600], Loss: 1.3272
Epoch [3/5], Step [400/600], Loss: 1.3311
Epoch [3/5], Step [500/600], Loss: 1.1579
Epoch [3/5], Step [600/600], Loss: 1.2168
Epoch [4/5], Step [100/600], Loss: 1.2214
Epoch [4/5], Step [200/600], Loss: 1.1283
Epoch [4/5], Step [300/600], Loss: 1.1131
Epoch [4/5], Step [400/600], Loss: 1.1301
Epoch [4/5], Step [500/600], Loss: 1.1497
Epoch [4/5], Step [600/600], Loss: 1.1135
Epoch [5/5], Step [100/600], Loss: 1.0703
Epoch [5/5], Step [200/600], Loss: 1.0444
Epoch [5/5], Step [300/600], Loss: 1.0590
Epoch [5/5], Step [400/600], Loss: 1.0483
Epoch [5/5], Step [500/600], Loss: 1.0203
Epoch [5/5], Step [600/600], Loss: 1.1195
Accuracy of the model on the 10000 test images: 82 %
做一些小改动
print(train_dataset) # 看看数据集的样子
Dataset MNIST
Number of datapoints: 10000
Split: test
Root Location: …/…/data
Transforms (if any): ToTensor()
Target Transforms (if any): None
print(train_dataset)
Dataset MNIST
Number of datapoints: 60000
Split: train
Root Location: …/…/data
Transforms (if any): ToTensor()
Target Transforms (if any): None
修改一下优化器的类型 (Adam,是当前通常做法)。
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
Accuracy of the model on the 10000 test images: 92 %
准确率从 82% 提升到 92%