Pytorch_学习_cnn

直接代码:

import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision
import matplotlib.pyplot as plt


# Hyper parameters
torch.manual_seed(1)
EPOCH = 1  #训练一批次
BATCH_SICE = 50  # 一次50个
LR = 0.001  # 每次的学习率
DOWNLOAD_MNSIT = False  # 需要下载为True, 下载好了可以设置为False

# download data
train_data = torchvision.datasets.MNIST(
    root='./mnist/',
    train=True,
    transform=torchvision.transforms.ToTensor(),
    download=DOWNLOAD_MNSIT
)

test_data = torchvision.datasets.MNIST(
    root='./mnist/',
    train=False
)

# train_loader
train_loader = Data.DataLoader(
    dataset=train_data,
    batch_size=BATCH_SICE,
    shuffle=True
)

# ready_for_test 前20000个
test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.Tensor)[:2000]/255.
test_y = test_data.test_labels[:2000]
# print(test_x.size())
# print(test_y.size())
# print(test_x[1])
# print(test_y[1])

class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1  = nn.Sequential(     # (1, 28, 28)
            nn.Conv2d(
                in_channels=1,
                out_channels=16,
                kernel_size=5,
                stride=1,
                padding=2
            ),             # output shape : N = (W - F + 2P)/S + 1
            # W :图片大小 W*W 28, F:filter大小 F*F,5, S:步长, 1  P:padding 2
            # N = (28 - 5 + 2*2)/1 + 1 = 28 因为有16个filter
            # (16, 28, 28)
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2)
            # Maxpooling 2*2 向下采样取最大(想想如果4*4图片,采用2*2maxpooling,就变成了2*2图片)
            # 所以这里图片大小为 (16, 14, 14)
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(16, 32, 5, 1, 2),  # (32, 14, 14)
            nn.ReLU(),
            nn.MaxPool2d(2)  # (32, 7, 7)
        )
        self.out = nn.Linear(32 * 7 * 7, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = x.view(x.size(0), -1)  # 展平多维的卷积图成 (batch_size, 32 * 7 * 7)
        output = self.out(x)
        return output

cnn = CNN()
print(cnn)



# training
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR)
loss_func = nn.CrossEntropyLoss()


# training and testing
for epoch in range(EPOCH):
    for step, (b_x, b_y) in enumerate(train_loader):   # 分配 batch data, normalize x when iterate train_loader
        output = cnn(b_x)               # cnn output
        loss = loss_func(output, b_y)   # cross entropy loss
        optimizer.zero_grad()           # clear gradients for this training step
        loss.backward()                 # backpropagation, compute gradients
        optimizer.step()                # apply gradients

        if step % 50 == 0:
            print('train loss = ', loss.data.numpy())

结果

CNN(
  (conv1): Sequential(
    (0): Conv2d(1, 16, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (1): ReLU()
    (2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (conv2): Sequential(
    (0): Conv2d(16, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (1): ReLU()
    (2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (out): Linear(in_features=1568, out_features=10, bias=True)
)
train loss =  2.3104544
train loss =  0.61840093
train loss =  0.12703253
train loss =  0.23725168
train loss =  0.4044273
train loss =  0.08451731
train loss =  0.19528298
train loss =  0.109065436
train loss =  0.123532385
train loss =  0.06730688
train loss =  0.2240683
train loss =  0.2114728
train loss =  0.024014007
train loss =  0.08469809
train loss =  0.21586336
train loss =  0.10181876
train loss =  0.043114547
train loss =  0.09106462
train loss =  0.055737924
train loss =  0.10089029
train loss =  0.032855053
train loss =  0.021929108
train loss =  0.025414439
train loss =  0.117736794

Process finished with exit code 0

猜你喜欢

转载自blog.csdn.net/sinat_15355869/article/details/86552617