pytorch优化器

实验环境:pytorch1.5
多元线性回归为例,分别采用自带优化器求解和手动求解。

自定义Dataset:

#自定义DataSet,需要继承Dataset类,并实现__getItem__和__len__方法
#模拟数据生成
w1, w2, w3, w4, b = 3, 4, 5, 6, 7
class MyDataSet(Dataset):
    def __init__(self):
        super(MyDataSet, self).__init__()
        #创建数据集
        self.train_x = []
        self.train_y = []
        for i in range(0, 1000):
            self.train_x.append(torch.randint(1, 10, (4,)).tolist())
            self.train_y.append(w1 * self.train_x[i][0] + w2 * self.train_x[i][1] + w3 * self.train_x[i][2] + w4 * self.train_x[i][3] + b)
        x_w = np.array(self.train_x).reshape((1000, 4))
        x_b = np.ones((1000, 1))
        self.train_x = torch.tensor(np.concatenate((x_w, x_b), axis=1), dtype=torch.float, requires_grad=True)
        self.train_y = torch.tensor(self.train_y, dtype=torch.float)
    def __getitem__(self, idx):
        return self.train_x[idx], self.train_y[idx]

    def __len__(self):
        return len(self.train_x)

利用SGB手动更新参数

import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
#模拟数据生成
w1, w2, w3, w4, b = 3, 4, 5, 6, 7
 
def solve_with_sgd():
    lr = 0.0001
    epoch = 100000
    batch_size = 64
    w = torch.randn((5, 1), dtype=torch.float, requires_grad=True)
    my_dataset = MyDataSet()
    myloader = DataLoader(my_dataset, batch_size=batch_size,shuffle=True)
    for e in tqdm(range(epoch)):
        for x,y in myloader:
            #定义损失函数
            loss =  (1/(2 * len(x))) * ((x.mm(w) - y.view((-1,1))) ** 2).sum()
            loss.backward()
            with torch.no_grad():
                w -= lr * w.grad
                w.grad.zero_()
    print(w)
    #sgd迭代100000次结果
    # tensor([[3.0009],
    #         [4.0007],
    #         [5.0008],
    #         [6.0008],
    #         [6.9831]], requires_grad=True)

solve_witg_sgd()

利用系统自带Adam优化器更新参数

初始化参数:

torch.optim.Adam(params,  #params必须是一个可迭代对象
                lr=0.001, 
                betas=(0.9, 0.999),
                eps=1e-08,
                weight_decay=0,
                amsgrad=False)
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
#模拟数据生成
w1, w2, w3, w4, b = 3, 4, 5, 6, 7

def solve_with_adam():
    lr = 0.001
    epoch = 10000
    batch_size = 64
    w = torch.randn((5, 1), dtype=torch.float, requires_grad=True)
    my_dataset = MyDataSet()
    myloader = DataLoader(my_dataset, batch_size=batch_size, shuffle=True)

    optimizer = torch.optim.Adam([w],lr = lr)
    #利用pytorch自带的MSE(均方误差)损失函数
    loss = torch.nn.MSELoss(reduction = 'mean') 
    for e in tqdm(range(epoch)):
        for x,y in myloader:
            optimizer.zero_grad()
            #定义损失函数
            out = loss(x.mm(w),y.view((-1,1)))
            out.backward()
            #step()操作相当于对optimizer对象里面的所有参数进行w -= lr * w.grad。optimizer.zero_grad()类似
            optimizer.step() 
    print(w)
    #迭代10000次后的参数结果,和真实值完全一样
    # tensor([[3.0000],
    #         [4.0000],
    #         [5.0000],
    #         [6.0000],
    #         [7.0000]], requires_grad=True)

solve_with_adam()

参考

优化器 torch.optim.Optimizer

猜你喜欢

转载自blog.csdn.net/qq_35268841/article/details/108965536