pytorch一般神经网络的代码

根据课程手写代码,归纳总结笔记

#导入包
import torch
import matplotlib.pyplot as plt
import torch.nn.functional as F
from sklearn.datasets import load_iris
from torch.autograd import Variable
from torch.optim import SGD
#检查GPU
use_cuda = torch.cuda.is_available()
print(use_cuda)
False
#导入数据
iris = load_iris()
print(iris.keys())
dict_keys(['data', 'target', 'frame', 'target_names', 'DESCR', 'feature_names', 'filename'])
#数据预处理
x = iris['data']
y = iris['target']
print(x.shape)
print(y.shape)
x = torch.FloatTensor(x)
y = torch.LongTensor(y)
x,y = Variable(x),Variable(y)
print(x,y)
(150, 4)
(150,)
tensor([[5.1000, 3.5000, 1.4000, 0.2000],
        [4.9000, 3.0000, 1.4000, 0.2000],
        ................................
        [6.2000, 3.4000, 5.4000, 2.3000],
        [5.9000, 3.0000, 5.1000, 1.8000]]) tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
        1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
        2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
        2, 2, 2, 2, 2, 2])

class Net(torch.nn.Module):
    def __init__(self,n_feature,n_hidden,n_hidden1,n_output):
    #定义神经网络的层,注意这采用线性层torch.nn.Linear,无激活函数
        super(Net,self).__init__()
        self.hidden = torch.nn.Linear(n_feature,n_hidden)
        self.hidden1 = torch.nn.Linear(n_hidden,n_hidden1)
        self.predict = torch.nn.Linear(n_hidden1,n_output)
    def forward(self,x):
    #前向传播的数据流,注意第一个隐藏层后进入了激活函数,上面还定义了一个self.hidden1,这里的数据流未流过这一层,因为下文n_hidden,n_hidden1参数相等,下面不会报错,当不相等时,由于层的数据不合适会报错
        x = F.sigmoid(self.hidden(x))
        x = self.predict(x)
        #最后一层通过得分概率判定归类标签
        out = F.log_softmax(x,dim = 1)
        return out
#看这里,是不是相等n_hidden=5,n_hidden1=5
net = Net(n_feature=4,n_hidden=5,n_hidden1=5,n_output=4)
print(net)
Net(
  (hidden): Linear(in_features=4, out_features=5, bias=True)
  (hidden1): Linear(in_features=5, out_features=5, bias=True)
  (predict): Linear(in_features=5, out_features=4, bias=True)
)
#如果GPU可以则采用GPU,这个意思是将数据类型转换为GPU使用
if use_cuda:
    x = x.cuda()
    y = y.cuda()
    net = net.cusa()
#优化器设置,这里采用SGD,优化目标为net.parameters(),学习率为0.5
optimizer = SGD(net.parameters(),lr=0.5)
px,py = [],[]
for i in range(1000):
    prediction = net(x)
    #误差计算
    loss = F.nll_loss(prediction,y)
    #参数梯度归零
    optimizer.zero_grad()
    #后向更新参数
    loss.backward()
    optimizer.step()
    
    #打印
    print(i,"loss",loss.data.item())
    px.append(i)
    py.append(loss.data.item())

0 loss 1.448301076889038
1 loss 1.3052066564559937
..............
998 loss 0.0880339965224266
999 loss 0.07817251980304718
# if i %10 == 0:
#     plt.cla()
#     plt.plot(px,py,'r-',lw = 1)
#     plt.text(0,0,'loss = %.4f'% loss.data.item())
#     plt.pause(0.1)
plt.plot(px,py,'r-',lw = 1)
plt.show()

在这里插入图片描述

猜你喜欢

转载自blog.csdn.net/qq_42830971/article/details/126466543
今日推荐