pytorch LSTM

import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt

# 超参数设置
EPOCH = 1        # 训练批次
BATCH_SIZE = 64  # 批次,N
# TIME_STEP = 28   #
# INPUT_SIZE = 128
LR = 0.01
DOWNLOAD_MNIST = False


# 下载数据集
train_data = dsets.MNIST(root='./mnist/', train=True, transform=transforms.ToTensor(), download=DOWNLOAD_MNIST)


# 设置dataloader
train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)

# 转化成为样本数据
test_data = dsets.MNIST(root='./mnist/', train=False, transform=transforms.ToTensor())
test_x = test_data.test_data.type(torch.FloatTensor)[:2000] / 255.
test_y = test_data.test_labels.numpy()[:2000]


# 定义循环神经网络
class RNN(nn.Module):
    def __init__(self):
        super(RNN, self).__init__()

        self.input = nn.Linear(28,128)
        self.rnn = nn.LSTM(
            input_size=128,
            hidden_size=64,
            num_layers=2,
            # batch_first=False  # (time_step,batch,input)
            batch_first = True   # (batch,time_step,input)
        )
        self.out = nn.Linear(64, 10)

    def forward(self, x):
        x = x.view(-1,28)      # NHWC -> (N*H,W*C)
        x = self.input(x)
        x = x.view(-1,28,128)  # (N*H,W*C) -> (N,time_step,input)
        r_out, (h_n, h_c) = self.rnn(x, None)
        out = self.out(r_out[:, -1, :])
        return out


rnn = RNN()
print(rnn)

optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)  # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss()

# 训练网络
for epoch in range(EPOCH):
    for step, (b_x, b_y) in enumerate(train_loader):
    	print(b_x.size())                       # [64,1,28,28]
        b_x = b_x.permute(0, 2, 3, 1)  # NCHW -> NHWC

        output = rnn(b_x)
        loss = loss_func(output, b_y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

猜你喜欢

转载自blog.csdn.net/luolinll1212/article/details/85244825