PyTorch Lecture 04: Back-propagation and Autograd

import torch
from torch import nn
from torch.autograd import Variable

x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
w = Variable(torch.Tensor([1.0]), requires_grad=True)  # Any random value
print(w)


# our model forward pass
def forward(x):
    return x * w


# Loss function
def loss(x, y):
    y_pred = forward(x)
    return (y_pred - y) * (y_pred - y)


# Training loop

for epoch in range(10):
    for x, y in zip(x_data, y_data):
        l = loss(x, y)
        l.backward() # 自动计算
        print("\t grad", x, y, w.grad.data[0])  # 打印 x,y 以及
        w.data = w.data - 0.01 * w.grad.data # 直接使用
        # manually zero the gradients after running the backward pass and update w
        w.grad.data.zero_()  # 一定要置0

        print("progress:", epoch, l.data[0])

# After training

print("predict (after training)", 4, forward(4))
Variable containing:
 1
[torch.FloatTensor of size 1]

	 grad 1.0 2.0 -2.0
progress: 0 1.0
	 grad 2.0 4.0 -7.840000152587891
progress: 0 3.841600179672241
	 grad 3.0 6.0 -16.228801727294922
progress: 0 7.315943717956543
	 grad 1.0 2.0 -1.478623867034912
progress: 1 0.5465821623802185
	 grad 2.0 4.0 -5.796205520629883
progress: 1 2.099749803543091
	 grad 3.0 6.0 -11.998146057128906
progress: 1 3.9987640380859375
	 grad 1.0 2.0 -1.0931644439697266
progress: 2 0.2987521290779114
	 grad 2.0 4.0 -4.285204887390137
progress: 2 1.1476863622665405
	 grad 3.0 6.0 -8.870372772216797
progress: 2 2.1856532096862793
	 grad 1.0 2.0 -0.8081896305084229
progress: 3 0.16329261660575867
	 grad 2.0 4.0 -3.1681032180786133
progress: 3 0.6273048520088196
	 grad 3.0 6.0 -6.557973861694336
progress: 3 1.1946394443511963
	 grad 1.0 2.0 -0.5975041389465332
progress: 4 0.08925279974937439
	 grad 2.0 4.0 -2.3422164916992188
progress: 4 0.34287363290786743
	 grad 3.0 6.0 -4.848389625549316
progress: 4 0.6529689431190491
	 grad 1.0 2.0 -0.4417421817779541
progress: 5 0.048784039914608
	 grad 2.0 4.0 -1.7316293716430664
progress: 5 0.18740876019001007
	 grad 3.0 6.0 -3.58447265625
progress: 5 0.35690122842788696
	 grad 1.0 2.0 -0.3265852928161621
progress: 6 0.02666448801755905
	 grad 2.0 4.0 -1.2802143096923828
progress: 6 0.10243429243564606
	 grad 3.0 6.0 -2.650045394897461
progress: 6 0.195076122879982
	 grad 1.0 2.0 -0.24144840240478516
progress: 7 0.014574333094060421
	 grad 2.0 4.0 -0.9464778900146484
progress: 7 0.055988773703575134
	 grad 3.0 6.0 -1.9592113494873047
progress: 7 0.10662525147199631
	 grad 1.0 2.0 -0.17850565910339355
progress: 8 0.007966067641973495
	 grad 2.0 4.0 -0.699742317199707
progress: 8 0.030602457001805305
	 grad 3.0 6.0 -1.4484672546386719
progress: 8 0.0582793727517128
	 grad 1.0 2.0 -0.1319713592529297
progress: 9 0.004354109987616539
	 grad 2.0 4.0 -0.5173273086547852
progress: 9 0.016726721078157425
	 grad 3.0 6.0 -1.070866584777832
progress: 9 0.03185431286692619
predict (after training) 4 Variable containing:
 7.8049
[torch.FloatTensor of size 1]


Process finished with exit code 0


猜你喜欢

转载自blog.csdn.net/zhuoyuezai/article/details/80382252