构建一个简单多分类模型

"""多分类问题,softmax函数,softmax的样本分量之和为1"""
多分类交叉熵损失函数nn.CrossEntropyLoss(),nn.NLLLoss()
torchvision pytorch中处理图像视觉的库
处理数据加载的两个库 torch.utils.data.Dataset类,torch.utils.Data.Dataloader
我们在torchvision加载的内置图片数据集继承了torch.utils.data.Dataset类,所以可以直接食用内置数据集创建DataLoader
常见图片格式(高,宽,通道)---》(512,512,3)
#ToTensor()的作用,
# 1.转换成tensor格式
# 2.规范图片格式为(channel,height,width)
# 3.将像素取值范围规范到(0,1)

import torchvision
#transforms里面是一些处理图像的方法
from torchvision.transforms import ToTensor
import torch
import matplotlib.pyplot as plt
import numpy as np
from torch import nn

train_ds = torchvision.datasets.MNIST('./dataset',train=True,
                          transform = ToTensor(),download=True )


test_ds = torchvision.datasets.MNIST('./dataset',train=False,
                          transform = ToTensor(),download=True )#得到一个test数据集


#对dataset进行封装,
1。乱序 shuffle
# 2.将数据分为小批次。batch_size,单个会使得️整个训练导致模型震荡
# 3.num_workers 多进程进行训练
#  设置批次处理函数 collate_fn
#dataloader是可以迭代的

train_dl = torch.utils.data.DataLoader(train_ds,batch_size = 64,shuffle = True)
test_dl = torch.utils.data.DataLoader(test_ds,batch_size = 64)

imgs,lables = next(iter(train_dl)) #这里是一个迭代器
# print(imgs.shape)

# plt.figure(figsize=(10,1))
# for i ,img in enumerate(imgs[:10]):
#     npimg =img.numpy()
#     npimg = np.squeeze(npimg)
#     plt.subplot(1,10,i+1)
#     plt.imshow(npimg)
#     plt.axis('off')
#     plt.show()

#创建多层感知器模型进行分类

#nn.Linear() 全链接层 要求输入的数据都是一维的 (batch,features)

class model(nn.Module):
    def __init__(self):
        super().__init__()
        self.linear1 = nn.Linear(28*28,120)
        self.linear2 = nn.Linear(120,84)
        self.linear3 = nn.Linear(84,10) #输出是10类所以是10
    #C个可能值上的概率,C表示类别总数

    def forward(self,input):
        x = input.view(-1,1*28*28) #view展平
        x = torch.relu(self.linear1(x))
        x = torch.relu(self.linear2(x))
        logits =self.linear3(x)
        #softmax是在损失函数的地方做计算所以这里不做激活
        return  logits
#softmax将输出C个可能的概率值,np.argmax()会返回取值最大的索引。torch.argmax()也是一样的效果
#logits表示未激活的输出 约定俗成的
device = 'cuda' if torch.cuda.is_available() else 'cpu'

model =model().to(device) #初始化模型
loss_fn = torch.nn.CrossEntropyLoss() #要求的输入是logits

# 优化:根据计算得到的损失调整模型参数从而降低损失 Adam,SGD

opt = torch.optim.SGD(model.parameters(),lr=0.001)
#参数,lr ,设置lr对梯度做裁剪

#编写训练循环,是在dl上迭代一次
#参数 dl,model,loss_fn,optimizer
def train(dl,model,loss_fn,optimizer):
    size = len(dl.dataset) #全部数据集大小
    no_batches = len(dl)

    train_loss,correct =0,0

    for x,y in dl:
        x,y =x.to(device),y.to(device)
        pred = model(x)
        loss = loss_fn(pred,y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        with torch.no_grad():
          correct +=  ( pred.argmax(1) == y).type(torch.float).sum().item()
          train_loss += loss.item()
    correct /= size
    train_loss /=no_batches
    return correct ,train_loss

#测试函数
def test(test_dl,model,loss_fn):
    size = len(test_dl.dataset) #全部数据集大小
    no_batches = len(test_dl)

    test_loss,correct =0,0

    with torch.no_grad():
        for x,y in test_dl:
            x,y =x.to(device),y.to(device)
            pred = model(x)
            loss = loss_fn(pred,y)
            test_loss += loss.item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
#测试不做反向传播和优化,测试中框架不需要跟踪梯度
    correct /= size
    test_loss /=no_batches
    return correct ,test_loss

# 训练50个epoch,每个epoch代表将全部数据集训练一遍
#训练函数不用每次都写, 将其封装子fit函数
epochs = 50
def fit(epochs,train_dl,test_dl,model,loss_fn,opt):
    train_loss =[]
    train_acc =[]
    test_loss = []
    test_acc =[]

    for epoch in range(epochs):
        epoch_acc,epoch_loss =train(train_dl,model,loss_fn,opt)
        epoch_tst_acc,epoch_tst_loss = test(test_dl,model,loss_fn)
        train_acc.append(epoch_acc)
        train_loss.append(epoch_loss)
        test_acc.append(epoch_tst_acc)
        test_loss.append(epoch_tst_loss)
        # template =("epoch:{:2d}, trian_loss:{:.5f},train_acc:{:.1f},test_loss:{:.5f},test_acc:{:.1f}")
        # print(template.format(epoch,epoch_loss,epoch_acc*100,epoch_tst_loss,epoch_tst_acc*100))
    return   train_loss,train_acc ,test_loss,test_acc
print("Done")

(train_loss,train_acc ,test_loss,test_acc) =fit(50,train_dl,test_dl,model,loss_fn,opt)

#绘图查看
plt.plot(range(epochs),train_loss,label='train_loss')
plt.plot(range(epochs),test_loss,label='test_loss')
plt.legend()
plt.show()

猜你喜欢

转载自blog.csdn.net/qq_45675231/article/details/129820098
今日推荐