torch_linear_regression

torch_linear_regression

  • 手打了一波linear_regression,再次体验了一下torch的流程
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt

input_size = 1
output_size = 1
num_epochs = 60
learning_rate = 0.001

# Toy dataset
x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
                    [9.779], [6.182], [7.59], [2.167], [7.042],
                    [10.791], [5.313], [7.997], [3.1]], dtype=np.float32)

y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
                    [3.366], [2.596], [2.53], [1.221], [2.827],
                    [3.465], [1.65], [2.904], [1.3]], dtype=np.float32)


# Linear regression model
model = nn.Linear(input_size, output_size)


# Loss and optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

# Train the model
for epoch in range(num_epochs):
    # Convert numpy arrays to torch tensors
    # 训练资料
    inputs = torch.from_numpy(x_train)
    # 就是GT
    targets = torch.from_numpy(y_train)

    # Forward pass
    # Forward的话就是把数据扔到model中去过一遍,如果说是那种class xxx(nn.model): ....的自定义model类的话,数据会进入重写的forward里面去
    outputs = model(inputs)

    # loss的计算就是把predict和gt计算,这里选的是MSELoss
    loss = criterion(outputs, targets)

    # Backward and optimize
    # 相当于初始化,这里的optimizer选的SGD,还有Adam这种
    optimizer.zero_grad()
    # 反向传播
    loss.backward()
    # 用优化器去更新值
    optimizer.step()
    # 每5个epoch print一次
    if (epoch+1) % 5 == 0:
        print('Epoch [{}/{}], Loss:{:.4f}'.format(epoch+1, num_epochs, loss.item()))


# Plot the graph
predicted = model(torch.from_numpy(x_train)).detach().numpy()
# 这里是画的散点图
plt.plot(x_train, y_train, 'ro', label='Original data')
# 这里画的是一条直线
plt.plot(x_train, predicted, label='Fitted line')
plt.legend()
plt.show()


# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')

Guess you like

Origin blog.csdn.net/symuamua/article/details/120155068