Depth hands-on science learning _2.2_autograd

Tensor

Import Torch 

X = torch.ones (2, 2, requires_grad = True)   # which .requires_grad property set to True, it will begin to track (Track) all operations thereon. After completing the calculation, you can call .backward () to complete all the gradient calculation 
Print (the X-)
 Print (x.grad_fn)   # Each Tensor has a .grad_fn property that is created in the Tensor Function (Tensors unless the user created when set = None grad_fn) 

# Tensor ([[. 1., 1.],
#        [1., 1.]], requires_grad=True)
# None 
y = x + 2
print(y)
print(y.grad_fn)

# tensor([[3., 3.],
#         [3., 3.]], grad_fn=<AddBackward0>)
# <AddBackward0 object at 0x7fecef6f5320>

attension: x is created directly, so he did not grad_fn, and y created by an addition operation, so it has one grad_fn

# Called the leaf node x submenus that this created directly, submenus leaf node corresponding grad_fn is None 
Print (x.is_leaf, y.is_leaf) 

# True False
z = y * y * 3
out = z.mean()
print(z, out)

# tensor([[27., 27.],
#         [27., 27.]], grad_fn=<MulBackward0>) tensor(27., grad_fn=<MeanBackward0>)
# By .requires_grad_ () to use in-place properties changed in a manner requires_grad 
A = torch.randn (2, 2)   # Default requires_grad = False Missing where 
A = ((A *. 3) / (A -. 1 ))
 Print (a.requires_grad) 

a.requires_grad_ (True) 
Print (a.requires_grad) 

B = (A * A) .sum ()
 Print (b.grad_fn) 

# False 
# True 
# <Object SumBackward0 AT 0x7fecef6d17b8>

gradient

# Need not be specified when the request is out as a scalar, so Backward call () variable guide 
out.backward ()   # equivalent to out.backward (torch.tensor (. 1.)) 
Print (x.grad) 

# Tensor ([ [4.5000, 4.5000], 
#          [4.5000, 4.5000]])
# Again back propagation, grad note is accumulated 
OUT2 of the = x.sum () 
out2.backward () 
Print (x.grad) 

OUT3 of = x.sum () 
x.grad.data.zero_ () 
out3.backward ( ) 
Print (x.grad) 

# Tensor ([[5.5000, 5.5000], 
#          [5.5000, 5.5000]]) 
# Tensor ([[. 1., 1.], 
#          [1., 1.]])
# Y.backward (W) is not required derivative of y x, but l = torch.sum (y * w) derivative of x. 
torch.tensor = X ([1.0, 2.0, 3.0, 4.0], requires_grad = True) 
Y = 2 * X 
Z = y.view (2, 2 )
 Print (Z) 

# Tensor ([[2., 4.] , 
#          [6. The, 8. The]], grad_fn = <ViewBackward>)

Now y is not a scalar, so when you call backward need to pass the weight vector y and a fellow of formaldehyde were summation of a scalar

v = torch.tensor([[1.0, 0.1], [0.01, 0.001]], dtype=torch.float)
z.backward(v)
print(x.grad)

# tensor([2.0000, 0.2000, 0.0200, 0.0020])
# Interrupt gradient tracking 
the X-torch.tensor = (1.0, requires_grad = True) 
y1 = the X-** 2 
with torch.no_grad ():   # and y2 gradient is not related to return, only to would-related gradient y1 return 
    y2 the X-** 3 = 
y3 = y1 + y2 

Print (the X-, x.requires_grad)
 Print (y1, y1.requires_grad)
 Print (y2, y2.requires_grad)   # False, so you can not call y2.backward () 
Print ( Y3, y3.requires_grad) 


# Tensor (. 1., requires_grad = True) True 
# Tensor (. 1., grad_fn = <PowBackward0>) True 
# Tensor (. 1.) False 
# Tensor (2., grad_fn = <AddBackward0>) True
y3.backward()
print(x.grad)

# tensor(2.)

Want to modify the value of the tensor, but do not want to be recorded autograd (i.e., does not affect the back propagation), may then operate on tensor.data

x = torch.ones(1, requires_grad=True)

print(x.data)
print(x.data.requires_grad)

y = 2 * x
x.data *= 100

y.backward()
print(x)
print(x.grad)

# tensor([1.])
# False
# tensor([100.], requires_grad=True)
# tensor([2.])

Guess you like

Origin www.cnblogs.com/harbin-ho/p/11962014.html