pytorch笔记01)初体验

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/jiangpeng59/article/details/79519345

下面是使用一个1个隐藏层来简单拟合2次函数的栗子

import torch
from torch.autograd import Variable
import torch.nn.functional as func
import matplotlib.pyplot as plt

#生成模拟的数据
#unsqueeze-扩维 
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
y = x.pow(2) + .3 * torch.rand(x.shape)
x, y = Variable(x), Variable(y)

class NetWork(torch.nn.Module):
    def __init__(self, n_input, n_hidden, n_out):
        super(NetWork, self).__init__() #这句 约定俗成
        self.hidden = torch.nn.Linear(n_input, n_hidden)
        self.out = torch.nn.Linear(n_hidden, n_out)

    def forward(self, x):
        x = func.relu(self.hidden(x))
        y = self.out(x)
        return y


net = NetWork(1, 32, 1)
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
loss_func = torch.nn.MSELoss()
epoch = 100
plt.ion()  # 开启交互模式
for i in range(epoch):
    prediction = net(x)
    optimizer.zero_grad()
    loss_func(prediction, y).backward()
    optimizer.step()
    if i % 5 == 0:
        plt.cla()  # 清空画板
        plt.scatter(x.data.numpy(), y.data.numpy())
        plt.plot(x.data.numpy(), prediction.data.numpy())
        plt.pause(0.1)
plt.ioff()  # 关闭交互模式
plt.show()  # 记得show,不然会自动关闭

猜你喜欢

转载自blog.csdn.net/jiangpeng59/article/details/79519345
今日推荐