pytorch 安装与语法

1. 安装pytorch

在官网https://pytorch.org/get-started/locally/获取采用pip下载的链接。
【报错】检查发现python版本为3.6.0时出现该错误,升级python为3.6.3后torch可以正常导入
在这里插入图片描述

2. pytorch手写数字识别

pytorch的语法与tensorflow类似,也是采用张量定义变量,并提供较多的api接口供网络搭建,在此不作详述。
在网上下载mnist数据集http://deeplearning.net/data/mnist/,该数据集为50000条手写数字图像,图像维度28*28,数字分为0~9共10个分类标签。
【任务目标】:根据输入的训练样本的图像灰度值、图像分类标签训练一个线性回归模型,用于测试样本的分类标签预测。
在这里插入图片描述

import pickle
import gzip
from matplotlib import pyplot
import torch
import math

PATH = "minist/"
FILENAME = "mnist.pkl.gz"


def read_data():
    # 50000 * 784,一个样本为 28 * 28 的图像
    with gzip.open(PATH + FILENAME, "rb") as f:
        (x_train, y_train), (x_valid, y_valid), _ =\
            pickle.load(f, encoding="latin-1")

    pyplot.imshow(x_train[0].reshape((28, 28)), cmap="gray")
    # pyplot.show()

    # 将np.array转为torch.tensor
    x_train, y_train, x_valid, y_valid = map(
        torch.tensor, (x_train, y_train, x_valid, y_valid)
    )
    n, c = x_train.shape
    print(x_train.shape)
    return x_train, y_train, x_valid, y_valid, n


def log_softmax(x):
    return x - x.exp().sum(-1).log().unsqueeze(-1)


def model(xb):
    """
    @表示点积(dot production)操作
    :param xb:
    :return:
    """
    return log_softmax(xb @ weights + bias)


def nll(input, target):
    """
    损失函数
    :param input:
    :param target:
    :return:
    """
    return -input[range(target.shape[0]), target].mean()


def accuracy(out, yb):
    preds = torch.argmax(out, dim=1)
    return (preds == yb).float().mean()


if __name__ == "__main__":
    loss_func = nll

    x_train, y_train, x_valid, y_valid, n = read_data()

    # 随机生成初始权重值 784 * 10 ,10是标签的维度,共有10个分类标签
    weights = torch.randn(784, 10) / math.sqrt(784)
    weights.requires_grad_()
    bias = torch.zeros(10, requires_grad=True)

    bs = 64
    lr = 0.5
    # 全数据集训练次数
    epochs = 2

    for epoch in range(epochs):
        # 每次取batch_size进入训练
        for i in range((n - 1) // bs + 1):
            start_i = i * bs
            end_i = start_i + bs
            xb = x_train[start_i:end_i]
            yb = y_train[start_i:end_i]
            pred = model(xb)
            loss = loss_func(pred, yb)
            # 更新模型的梯度
            loss.backward()
            with torch.no_grad():
                # 用梯度更新权值、偏差
                weights -= weights.grad * lr
                bias -= bias.grad * lr
                # 梯度清零
                weights.grad.zero_()
                bias.grad.zero_()

            print('epoch is %s, batch is %s, loss is %s, acc is %s' % (epoch, i, loss, accuracy(pred, yb)))

3. 采用torch.nn等接口简化代码

torch.nn中提供了大量常用的函数接口,可以用于网络的搭建等。torch.optim可以用于更新训练过程的参数。TensorDataset可以用于训练数据的组装和切片,DataLoader可以用来加载Dataset的数据。

import torch.nn.functional as F
import torch
from linear import read_data
from torch import nn
from torch import optim
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader


class MnistLogistic(nn.Module):
    # 创建 nn.Module 的子类
    def __init__(self):
        super().__init__()
        # nn.Linear 定义线性层, 自动实现权重参数的定义和计算
        self.lin = nn.Linear(784, 10)

    def forward(self, xb):
        return self.lin(xb)


def fit(epoches, model, opt):
    for epoch in range(epoches):
        for xb, yb in train_dl:
            pred = model(xb)
            loss = loss_func(pred, yb)

            loss.backward()
            opt.step()
            opt.zero_grad()
            if loss < 0.09:
                torch.save(model, 'minist/linear_' + str(epoch) + '_' + str(i) + '.m')
                print('epoch is %s, batch is %s, loss is %s' % (epoch, i, loss))
                return


def get_model():
    model = MnistLogistic()
    return model, optim.SGD(model.parameters(), lr=lr)


if __name__ == "__main__":
    loss_func = F.cross_entropy
    x_train, y_train, x_valid, y_valid, n = read_data()

    bs = 64
    lr = 0.5
    epochs = 2

    train_ds = TensorDataset(x_train, y_train)
    train_dl = DataLoader(train_ds, batch_size=bs)

    model, opt = get_model()
    fit(epochs, model, opt)

4. 加入验证集

调用 model.train 和 model.eval 表示进入训练模式与验证模式。由于损失函数在训练、验证中均被使用到,因此将其封装为函数。

import torch.nn.functional as F
import torch
from linear import read_data
from torch import nn
from torch import optim
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
import numpy as np


class MnistLogistic(nn.Module):
    # 创建 nn.Module 的子类
    def __init__(self):
        super().__init__()
        # nn.Linear 定义线性层, 自动实现权重参数的定义和计算
        self.lin = nn.Linear(784, 10)

    def forward(self, xb):
        return self.lin(xb)


def fit(epoches, model, opt, train_dl, valid_dl):
    for epoch in range(epoches):
        # 调用 model.train 和 model.eval 表示进入训练模式与测试模式
        # 保证模型运行的准确性
        model.train()
        for xb, yb in train_dl:
            loss_batch(model, loss_func, xb, yb, opt)

        model.eval()
        with torch.no_grad():
            losses, nums = zip(
                *[loss_batch(model, loss_func, xb, yb) for xb, yb in valid_dl]
            )
            val_loss = np.sum(np.sum(np.multiply(losses, nums)) / np.sum(nums))

        print(epoch, val_loss)


def loss_batch(model, loss_func, xb, yb, opt=None):
    """
    封装损失函数,在训练集、验证集中均需使用
    :param model:
    :param loss_func:
    :param xb:
    :param yb:
    :param opt:
    :return:
    """
    loss = loss_func(model(xb), yb)

    if opt is not None:
        loss.backward()
        opt.step()
        opt.zero_grad()
	# tensor.item()可以将张量类型的数据转为float、list等
    return loss.item(), len(xb)


def get_model():
    model = MnistLogistic()
    return model, optim.SGD(model.parameters(), lr=lr)


def get_data(bs):
    x_train, y_train, x_valid, y_valid, n = read_data()
    train_ds = TensorDataset(x_train, y_train)
    # 打乱训练样本的分布
    train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)

    valid_ds = TensorDataset(x_valid, y_valid)
    valid_dl = DataLoader(valid_ds, batch_size=bs * 2)
    return train_dl, valid_dl


if __name__ == "__main__":
    loss_func = F.cross_entropy

    bs = 64
    lr = 0.5
    epochs = 2

    train_dl, valid_dl = get_data(bs)
    model, opt = get_model()
    fit(epochs, model, opt, train_dl, valid_dl)

4. 搭建神经网络

使用nn.Conv2d搭建3层卷积网络,神经网络的搭建类似搭积木,代码中的MnistCNN结构为:3个卷积层,每个卷积层后加relu激活函数,最后接一个平均池化层。
卷积中各项参数的含义参见:https://blog.csdn.net/weixin_42899627/article/details/108228008?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-3.channel_param&depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-3.channel_param

import torch.nn.functional as F
from torch import nn
from torch import optim
from linear_torch_nn import fit, get_data


class MnistCNN(nn.Module):
    def __init__(self):
        super().__init__()
        # 使用 16 个大小为 3 * 3,卷积核规模:3 * 3 * 16
        self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1)
        self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1)
        self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1)

    def forward(self, xb):
    	# 将一维 784 的图像转为 28 * 28 的输入
        xb = xb.view(-1, 1, 28, 28)
        # 线性计算后输出大小为:14 * 14 * 16
        xb = F.relu(self.conv1(xb))
        # 线性计算后输出大小为:7 * 7 * 16
        xb = F.relu(self.conv2(xb))
        # 线性计算后输出大小为:4 * 4 * 10
        xb = F.relu(self.conv3(xb))
        # 平均池化后输出大小为:1 * 1 * 10
        xb = F.avg_pool2d(xb, 4)
        return xb.view(-1, xb.size(1))


if __name__ == "__main__":
    loss_func = F.cross_entropy

    bs = 64
    lr = 0.1
    epochs = 2

    train_dl, valid_dl = get_data(bs)
    model = MnistCNN()
    # momentum 加快训练速度
    opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
    fit(epochs, model, loss_func, opt, train_dl, valid_dl)

5. nn.Sequential搭建网络

一个Sequential对象按顺序执行包含在内的每一个module,使用它可以很方便地建立一个网络。使用nn.AdaptiveAvgPool2d,可以自定义输出张量的维度。

import torch.nn.functional as F
from torch import nn
from torch import optim
from linear_torch_nn import fit, get_data


class Lambda(nn.Module):
    def __init__(self, func):
        super().__init__()
        self.func = func

    def forward(self, x):
        return self.func(x)


def preprocess(x):
    return x.view(-1, 1, 28, 28)


def get_model():
    model = nn.Sequential(
        Lambda(preprocess),
        nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1),
        nn.ReLU(),
        nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1),
        nn.ReLU(),
        nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1),
        nn.ReLU(),
        nn.AdaptiveAvgPool2d(1),
        Lambda(lambda x: x.view(x.size(0), -1)),
    )
    return model


if __name__ == "__main__":
    loss_func = F.cross_entropy

    bs = 64
    lr = 0.1
    epochs = 2

    train_dl, valid_dl = get_data(bs)
    model = get_model()
    # momentum 加快训练速度
    opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
    fit(epochs, model, loss_func, opt, train_dl, valid_dl)

猜你喜欢

转载自blog.csdn.net/shlhhy/article/details/108868249