用PyThoch手撸一个最简单的线性回归

import numpy as np
import torch
import torch.nn as nn

class LinearRegressionModel(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(LinearRegressionModel, self).__init__()
        # 添加一个nn里面的全连接层
        self.linear = nn.Linear(input_dim, output_dim)  # 用到什么层写什么层

    def forward(self, x):
        out = self.linear(x)
        return out


x_value = [i for i in range(11)]
x_train = np.array(x_value, dtype=np.float32)
x_train = x_train.reshape(-1,1)
print(x_train.shape)

y_value = [4*i +3 for i in x_value]
y_train = np.array(y_value, dtype=np.float32)
y_train = y_train.reshape(-1,1)
print(y_train.shape)

inputdim = 1
outputdim = 1
model = LinearRegressionModel(inputdim, outputdim) # 实例化一个自己的网络

# 转入GPU训练
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)

epochs = 1000
lr = 0.01
# 指定一个优化器,回归问题用SGD比较好。分类问题用交叉熵
optimizer = torch.optim.SGD(model.parameters(), lr = lr)
criterion = nn.MSELoss()

for epoch in range(epochs):
    epoch += 1
    # 数据都转为tensor
    inputs = torch.from_numpy(x_train).to(device)
    labels = torch.from_numpy(y_train).to(device)

    # 每一次迭代后,梯度要清零,否则梯度是会累加的
    optimizer.zero_grad()

    # 前向传播
    outputs = model(inputs)

    # 计算损失
    loss = criterion(outputs, labels)

    # 反向传播
    loss.backward()

    # 更新权重参数
    optimizer.step()
    # print(f"epoch = {epoch}, loss = {loss.item()}")

# GPU下的tensor数据无法转换到numpy,得回到CPU
predicted = model(inputs).data.to("cpu").numpy()
print(predicted)

# 保存与导入模型数据
torch.save(model.state_dict(), "model.pkl")
# model.load_state_dict(torch.load(r"F:\PyTorchPro\model.pkl"))

para = list(model.parameters())
print("para=",para)


默认把映射关系设计成y=4x+3,我们看看用这套逻辑能不能把w=4,b=3给找出来。

猜你喜欢

转载自blog.csdn.net/huangzhuofei/article/details/125240979