pytorch 线性回归拟合sin函数

目录

1. 库文件

2. 定义超参数

3. 获取数据集

4. 加载训练集+测试集

5. 搭建线性网络

6. 实例化网络和优化器

7. 训练网络

8. 可视化

9. 结果展示

10. 完整代码

扫描二维码关注公众号,回复: 15491533 查看本文章

1. 库文件

os 文件是为了消除matplotlib 绘图的错误

TensorDataset、DataLoader 是类似图像分类的数据加载

torch.nn 帮助搭建神经网络

import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'

from torch.utils.data import TensorDataset,DataLoader
import torch.nn as nn
import numpy as np
import torch
import matplotlib.pyplot as plt

2. 定义超参数

超参数这里定义在一块,这样方便随时进行调试

np.random.seed(0)   # 生成的随机数据不变

# 定义超参数
NUMBER_TRAIN_DATA = 50                              # 训练样本的个数
NUMBER_TEST_DATA = NUMBER_TRAIN_DATA // 5           # 测试样本的个数 0.2倍的训练集
BATCH_SIZE = 10
LEARNING_RATE = 0.01
EPOCHS = 2000
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'

3. 获取数据集

这里要完成的任务是用线性神经网络拟合sin函数,sin函数的生成在-2pi~2pi之间,生成样本的个数由之前的超参数(NUMBER_TRAIN_DATA)决定

random 生成 0.0-1.0之间的浮点数,这里扩大4pi倍,就是0.0~4pi,再减去2pi,就是-2pi~2pi之间

# 获取数据集
def get_dataset(num):
    data_x = np.random.random((num,1)) * np.pi * 4 - np.pi * 2  # 返回 -2pi~2pi 随机值
    data_y=np.sin(data_x).reshape(-1,1)
    return data_x,data_y

4. 加载训练集+测试集

因为本章是用pytorch的tensor实现的,因此这里数据的加载的类型要保证是tensor类型

# 加载训练集
train_x, train_y = get_dataset(NUMBER_TRAIN_DATA)
trainSet=TensorDataset(torch.tensor(train_x, dtype=torch.float32), torch.tensor(train_y, dtype=torch.float32))
trainLoader=DataLoader(trainSet,batch_size=BATCH_SIZE,shuffle=True)

# 加载测试集
test_x,test_y = get_dataset(NUMBER_TEST_DATA)
testSet=TensorDataset(torch.tensor(test_x,dtype=torch.float32),torch.tensor(test_y,dtype=torch.float32))
testLoader=DataLoader(trainSet,batch_size=BATCH_SIZE,shuffle=True)

5. 搭建线性网络

这里用torch.nn 搭建一个简单的线性神经网络

代码如下,搭建的是一个3层的包含两个隐藏层的网络,激活函数用sigmoid

因为这里是回归预测的任务,而输入的就是自变量x,输出应该是一个尽可能对应sin(x)的标量

所以网络第一层的输入是1,最后输出也为1

# 定义网络
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc=nn.Sequential(
            nn.Linear(1,10),
            nn.Sigmoid(),
            nn.Linear(10,10),
            nn.Sigmoid(),
            nn.Linear(10,1)
        )

    def forward(self, x):
        x = self.fc(x)
        return x

6. 实例化网络和优化器

这里用Adam优化器,损失函数为MSE均方误差

# 定义网络
model=Net()
model.to(DEVICE)
optim=torch.optim.Adam(model.parameters(),lr=LEARNING_RATE)
loss_fn =nn.MSELoss(reduction='sum')

7. 训练网络

网络训练的过程较为简单

# 训练
epoch_train = []  # 存放训练误差
epoch_test = []   # 存放测试误差
for epoch in range(EPOCHS):
    train_running_loss = 0.0
    for x, y in trainLoader:        # 加载训练集
        x_trainData, y_trainData = x.to(DEVICE), y.to(DEVICE)

        y_predict = model(x_trainData)             # forward
        loss = loss_fn(y_predict, y_trainData)     # loss
        optim.zero_grad()                          # gradient to zero
        loss.backward()                            # backward
        optim.step()                               # gradient descent
        train_running_loss += loss.item()

    with torch.no_grad():
        test_running_loss = 0.0
        for x,y in testLoader:
            x_testData,y_testData = x.to(DEVICE),y.to(DEVICE)
            y_predict = model(x_testData)
            loss = loss_fn(y_predict,y_testData)
            test_running_loss += loss.item()

    train_running_loss = train_running_loss / NUMBER_TRAIN_DATA
    test_running_loss = test_running_loss / NUMBER_TEST_DATA

    epoch_train.append(train_running_loss)
    epoch_test.append(test_running_loss)

    if (epoch+1) % 100 == 0:
        print("epoch:%d,train loss:%.5f,test_loss:%.5f" % (epoch+1,train_running_loss,test_running_loss))

8. 可视化

可视化的过程也较为简单

需要注意的是,这里用了GPU训练,所以数据是不能之间numpy的,因为numpy不能在GPU上面运行

# 可视化结果
with torch.no_grad():
    axis_x = np.linspace(-2*np.pi,2*np.pi,200).reshape(-1,1)
    axis_x = torch.from_numpy(axis_x).type(torch.float32)
    axis_x = axis_x.to(DEVICE)

    y_predict = model(axis_x)

    plt.figure(figsize=(12,8))

    plt.subplot(1,2,1),plt.title('loss curve')
    plt.plot(epoch_train,label = 'train loss',color='r')    # 训练损失
    plt.plot(epoch_test,label = 'test loss',color = 'b')    # 测试损失
    plt.legend()

    plt.subplot(1,2,2),plt.title('performance')
    plt.scatter(train_x,train_y,label='trainSet',color='r')     # 训练样本
    plt.scatter(test_x,test_y,label='testSet',color='g')        # 测试样本
    plt.plot(axis_x.cpu().numpy(),y_predict.detach().cpu().numpy(),label='predict',color = 'b')
    plt.legend()

    plt.show()

9. 结果展示

训练过程:

展示的结果:

观察可知,拟合的结果还是很好的

10. 完整代码

import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'

from torch.utils.data import TensorDataset,DataLoader
import torch.nn as nn
import numpy as np
import torch
import matplotlib.pyplot as plt


np.random.seed(0)   # 生成的随机数据不变

# 定义超参数
NUMBER_TRAIN_DATA = 50                              # 训练样本的个数
NUMBER_TEST_DATA = NUMBER_TRAIN_DATA // 5           # 测试样本的个数 0.2倍的训练集
BATCH_SIZE = 10
LEARNING_RATE = 0.01
EPOCHS = 2000
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'


# 获取数据集
def get_dataset(num):
    data_x = np.random.random((num,1)) * np.pi * 4 - np.pi * 2  # 返回 -2pi~2pi 随机值
    data_y=np.sin(data_x).reshape(-1,1)
    return data_x,data_y


# 加载训练集
train_x, train_y = get_dataset(NUMBER_TRAIN_DATA)
trainSet=TensorDataset(torch.tensor(train_x, dtype=torch.float32), torch.tensor(train_y, dtype=torch.float32))
trainLoader=DataLoader(trainSet,batch_size=BATCH_SIZE,shuffle=True)

# 加载测试集
test_x,test_y = get_dataset(NUMBER_TEST_DATA)
testSet=TensorDataset(torch.tensor(test_x,dtype=torch.float32),torch.tensor(test_y,dtype=torch.float32))
testLoader=DataLoader(trainSet,batch_size=BATCH_SIZE,shuffle=True)


# 定义网络
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc=nn.Sequential(
            nn.Linear(1,10),
            nn.Sigmoid(),
            nn.Linear(10,10),
            nn.Sigmoid(),
            nn.Linear(10,1)
        )

    def forward(self, x):
        x = self.fc(x)
        return x


# 定义网络
model=Net()
model.to(DEVICE)
optim=torch.optim.Adam(model.parameters(),lr=LEARNING_RATE)
loss_fn =nn.MSELoss(reduction='sum')

# 训练
epoch_train = []  # 存放训练误差
epoch_test = []   # 存放测试误差
for epoch in range(EPOCHS):
    train_running_loss = 0.0
    for x, y in trainLoader:        # 加载训练集
        x_trainData, y_trainData = x.to(DEVICE), y.to(DEVICE)

        y_predict = model(x_trainData)             # forward
        loss = loss_fn(y_predict, y_trainData)     # loss
        optim.zero_grad()                          # gradient to zero
        loss.backward()                            # backward
        optim.step()                               # gradient descent
        train_running_loss += loss.item()

    with torch.no_grad():
        test_running_loss = 0.0
        for x,y in testLoader:
            x_testData,y_testData = x.to(DEVICE),y.to(DEVICE)
            y_predict = model(x_testData)
            loss = loss_fn(y_predict,y_testData)
            test_running_loss += loss.item()

    train_running_loss = train_running_loss / NUMBER_TRAIN_DATA
    test_running_loss = test_running_loss / NUMBER_TEST_DATA

    epoch_train.append(train_running_loss)
    epoch_test.append(test_running_loss)

    if (epoch+1) % 100 == 0:
        print("epoch:%d,train loss:%.5f,test_loss:%.5f" % (epoch+1,train_running_loss,test_running_loss))

# 可视化结果
with torch.no_grad():
    axis_x = np.linspace(-2*np.pi,2*np.pi,200).reshape(-1,1)
    axis_x = torch.from_numpy(axis_x).type(torch.float32)
    axis_x = axis_x.to(DEVICE)

    y_predict = model(axis_x)

    plt.figure(figsize=(12,8))

    plt.subplot(1,2,1),plt.title('loss curve')
    plt.plot(epoch_train,label = 'train loss',color='r')    # 训练损失
    plt.plot(epoch_test,label = 'test loss',color = 'b')    # 测试损失
    plt.legend()

    plt.subplot(1,2,2),plt.title('performance')
    plt.scatter(train_x,train_y,label='trainSet',color='r')     # 训练样本
    plt.scatter(test_x,test_y,label='testSet',color='g')        # 测试样本
    plt.plot(axis_x.cpu().numpy(),y_predict.detach().cpu().numpy(),label='predict',color = 'b')
    plt.legend()

    plt.show()

猜你喜欢

转载自blog.csdn.net/qq_44886601/article/details/129784312