《深度学习之PyTorch实战计算机视觉》学习笔记(4)

这部分利用pytorch, 分别用两种方式定义神经网络模型

代码基于python3.7, pytorch 1.0,cuda 10.0 .

import torch
from torch.autograd import Variable

batch_n = 100
hidden_layer = 100
input_data = 1000
output_data = 10

x = Variable(torch.randn(batch_n, input_data), requires_grad = False)
y = Variable(torch.randn(batch_n, output_data), requires_grad = False)

# 定义模型的两种方式:直接定义,命名从0开始
models = torch.nn.Sequential(
    torch.nn.Linear(input_data, hidden_layer),
    torch.nn.ReLU(),
    torch.nn.Linear(hidden_layer, output_data)
)
print(models)

epoch_n = 10000
learning_rate = 1e-4
loss_fn = torch.nn.MSELoss()

#  这里不用自动化的梯度下降(手动)
for epoch in range(epoch_n):
    y_pred = models(x)
    loss = loss_fn(y_pred, y)
    if epoch % 1000 == 0:
        print('Epoch:{}, Loss:{:.4f}'.format(epoch, loss.item()))
    models.zero_grad()
    
    loss.backward()
    
    for param in models.parameters():
        param.data -= param.grad.data * learning_rate
    
    
Sequential(
  (0): Linear(in_features=1000, out_features=100, bias=True)
  (1): ReLU()
  (2): Linear(in_features=100, out_features=10, bias=True)
)
Epoch:0, Loss:1.0354
Epoch:1000, Loss:0.9617
Epoch:2000, Loss:0.8971
Epoch:3000, Loss:0.8395
Epoch:4000, Loss:0.7875
Epoch:5000, Loss:0.7402
Epoch:6000, Loss:0.6969
Epoch:7000, Loss:0.6568
Epoch:8000, Loss:0.6192
Epoch:9000, Loss:0.5839
#  利用OrderedDict 可以对每一层进行命名
import torch
from torch.autograd import Variable
from collections import OrderedDict

batch_n = 100
hidden_layer = 100
input_data = 1000
output_data = 10

x = Variable(torch.randn(batch_n, input_data), requires_grad = False)
y = Variable(torch.randn(batch_n, output_data), requires_grad = False)

models2 = torch.nn.Sequential(OrderedDict([
    ('Line1', torch.nn.Linear(input_data, hidden_layer)),
    ('Relu1', torch.nn.ReLU()),
    ('Line2', torch.nn.Linear(hidden_layer, output_data))
]))
print(models2)

epoch_n = 20
learning_rate = 1e-4
loss_fn = torch.nn.MSELoss()

# 这里用自动化的梯度下降
optimzer = torch.optim.Adam(models.parameters(), lr = learning_rate)

for epoch in range(epoch_n):
    y_pred = models(x)
    loss = loss_fn(y_pred, y)
    if epoch % 1 == 0:
        print('Epoch:{}, Loss:{:.4f}'.format(epoch, loss.item()))
    
    optimzer.zero_grad()
    
    loss.backward()
    
    optimzer.step()
Sequential(
  (Line1): Linear(in_features=1000, out_features=100, bias=True)
  (Relu1): ReLU()
  (Line2): Linear(in_features=100, out_features=10, bias=True)
)
Epoch:0, Loss:1.0817
Epoch:1, Loss:1.0557
Epoch:2, Loss:1.0303
Epoch:3, Loss:1.0055
Epoch:4, Loss:0.9813
Epoch:5, Loss:0.9577
Epoch:6, Loss:0.9347
Epoch:7, Loss:0.9123
Epoch:8, Loss:0.8903
Epoch:9, Loss:0.8689
Epoch:10, Loss:0.8479
Epoch:11, Loss:0.8275
Epoch:12, Loss:0.8075
Epoch:13, Loss:0.7880
Epoch:14, Loss:0.7690
Epoch:15, Loss:0.7504
Epoch:16, Loss:0.7323
Epoch:17, Loss:0.7146
Epoch:18, Loss:0.6974
Epoch:19, Loss:0.6806

猜你喜欢

转载自blog.csdn.net/weixin_40017911/article/details/89015269