pytorch实现简单的softmax回归代码

import torch
from torch import  nn
from torch.nn import  init
import numpy as np
import  sys
from collections import OrderedDict
import  d2lzh_pytoch as d2l
import torchvision
import torchvision.transforms as transforms

mnist_train = torchvision.datasets.FashionMNIST(root='/home/xj/Python_lianxi/动手学习深度学习/data/FashionMNIST',train=True,download=True,transform=transforms.ToTensor())
mnist_test = torchvision.datasets.FashionMNIST(root='/home/xj/Python_lianxi/动手学习深度学习/data/FashionMNIST',train=False,download=True,transform=transforms.ToTensor())

#加载数据
def load_data_fashion_mnist(batch_size,mnist_train,mnist_test):
    if sys.platform.startswith('win'):
        num_worker = 0
    else:
        num_worker =4
    train_iter = torch.utils.data.DataLoader(mnist_train,batch_size=batch_size,shuffle=True,num_workers=num_worker)
    test_iter = torch.utils.data.DataLoader(mnist_test,batch_size=batch_size,shuffle=False,num_workers=num_worker)
    return train_iter,test_iter
batch_size = 256
# train_iter,test_iter = d2l.load_data_fashion_mnist(batch_size,mnist_train,mnist_test)

train_iter,test_iter = load_data_fashion_mnist(batch_size,mnist_train,mnist_test)

#小批量随机梯度下降算法
def sgd(params,lr,batch_size):
    for param in params:
        param.data -= lr*param.grad / batch_size


#计算准确率
def evaluate_accuracy(data_iter,net):
    acc_sum,n = 0.0,0
    for X,y in data_iter:
        acc_sum+=(net(X).argmax(dim=1)==y).float().sum().item()
        n+=y.shape[0]
        return acc_sum/n

num_inputs =784
num_outputs = 10

class LinearNet(nn.Module):
    def __init__(self,num_inputs,num_outputs):
        super(LinearNet,self).__init__()
        self.linear = nn.Linear(num_inputs,num_outputs)
    def forward(self,x):
        y = self.linear(x.view(x.shape[0],-1))
        return y

net = LinearNet(num_inputs,num_outputs)

net = nn.Sequential(
    OrderedDict([
        ('flatten',d2l.FlattenLayer()),
        ('linear',nn.Linear(num_inputs,num_outputs))
    ])
)

def train_ch3(net,train_iter,test_iter,loss,num_epochs,batch_size,params=None,lr=None,optimizer=None):
    for epoch in range(num_epochs):
        train_l_sum,train_acc_sum,n=0.0,0.0,0
        for X,y in train_iter:
            y_hat = net(X)
            l = loss(y_hat,y).sum()

            #梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()
            l.backward()
            if optimizer is None:
                sgd(params,lr,batch_size)
            else:
                optimizer.step()

            train_l_sum += l.item()
            train_acc_sum += (y_hat.argmax(dim=1)==y).sum().item()
            n += y.shape[0]
        test_acc = evaluate_accuracy(test_iter,net)
        print('ecpoch %d ,loss %.4f,train acc %.3f,test acc %.3f'%(epoch+1,train_l_sum/n,train_acc_sum/n,test_acc))

#使用均值为0,标准差为0.01的正太分布随机初始化模型权重参数
init.normal_(net.linear.weight,mean=0,std=0.01)
init.constant_(net.linear.bias,val=0)

#一个包含softmax运算和交叉熵损失的函数
loss = nn.CrossEntropyLoss()

#使用学习率为0.1的小批量随机梯度下降作为优化算法
optimizer = torch.optim.SGD(net.parameters(),lr=0.05)

#进行训练
num_epochs = 160
# d2l.train_ch3(net,train_iter,test_iter,loss,num_epochs,batch_size,None,None,optimizer)
train_ch3(net,train_iter,test_iter,loss,num_epochs,batch_size,None,None,optimizer)
发布了125 篇原创文章 · 获赞 63 · 访问量 22万+

猜你喜欢

转载自blog.csdn.net/u011573853/article/details/103875773
今日推荐