import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Hyper parameters
num_epochs = 5
num_classes = 10
batch_size = 100
learning_rate = 0.001
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='../../data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../../data/',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Convolutional neural network (two convolutional layers)
class ConvNet(nn.Module):
def __init__(self, num_classes=10):
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc = nn.Linear(7*7*32, num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
model = ConvNet(num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Test the model
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
Epoch [1/5], Step [100/600], Loss: 0.1820
Epoch [1/5], Step [200/600], Loss: 0.0899
Epoch [1/5], Step [300/600], Loss: 0.0785
Epoch [1/5], Step [400/600], Loss: 0.1016
Epoch [1/5], Step [500/600], Loss: 0.0342
Epoch [1/5], Step [600/600], Loss: 0.0683
Epoch [2/5], Step [100/600], Loss: 0.0650
Epoch [2/5], Step [200/600], Loss: 0.0843
Epoch [2/5], Step [300/600], Loss: 0.0385
Epoch [2/5], Step [400/600], Loss: 0.0184
Epoch [2/5], Step [500/600], Loss: 0.0264
Epoch [2/5], Step [600/600], Loss: 0.0285
Epoch [3/5], Step [100/600], Loss: 0.0046
Epoch [3/5], Step [200/600], Loss: 0.0029
Epoch [3/5], Step [300/600], Loss: 0.0052
Epoch [3/5], Step [400/600], Loss: 0.0267
Epoch [3/5], Step [500/600], Loss: 0.0247
Epoch [3/5], Step [600/600], Loss: 0.0500
Epoch [4/5], Step [100/600], Loss: 0.1115
Epoch [4/5], Step [200/600], Loss: 0.0097
Epoch [4/5], Step [300/600], Loss: 0.0094
Epoch [4/5], Step [400/600], Loss: 0.0150
Epoch [4/5], Step [500/600], Loss: 0.0867
Epoch [4/5], Step [600/600], Loss: 0.0556
Epoch [5/5], Step [100/600], Loss: 0.0747
Epoch [5/5], Step [200/600], Loss: 0.0165
Epoch [5/5], Step [300/600], Loss: 0.0138
Epoch [5/5], Step [400/600], Loss: 0.0093
Epoch [5/5], Step [500/600], Loss: 0.0234
Epoch [5/5], Step [600/600], Loss: 0.0137
Test Accuracy of the model on the 10000 test images: 99.12 %
应用CNN后,准确率已经相当高了。