Tensorflow2.0学习(9):求导

Tensorflow2.0中导数的求解

  • 导包
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
print(sys.version_info)
for module in mpl, np ,pd, sklearn, tf, keras:
    print(module.__name__, module.__version__)
2.0.0
sys.version_info(major=3, minor=7, micro=6, releaselevel='final', serial=0)
matplotlib 3.1.3
numpy 1.18.1
pandas 1.0.0
sklearn 0.22.1
tensorflow 2.0.0
tensorflow_core.keras 2.2.4-tf
  • 导数的普通求解
# 导数的近似简单求法

def f(x):
    return 3. * x ** 2 + 2. * x - 1
def approximae_derivative(f, x, eps=1e-3):
    return(f(x+eps)-f(x-eps))/(2. * eps)
print(approximae_derivative(f, 1.))
7.999999999999119
# 偏导的简单近似求法

def g(x1, x2):
    return(x1 + 5) * (x2 ** 2)
def approximate_gradient(g, x1, x2, eps=1e-3):
    dg_x1 = approximae_derivative(lambda x :g(x, x2), x1, eps)
    dg_x2 = approximae_derivative(lambda x :g(x1, x), x2, eps)
    return dg_x1, dg_x2

print(approximate_gradient(g, 2., 3.))
(8.999999999993236, 41.999999999994486)
  • Tensorflow中导数的求解
  • 求解一个偏导
# Tensorflow中导数的求解

x1 = tf.Variable(2.0)
x2 = tf.Variable(3.0)
z = g(x1, x2)
with tf.GradientTape() as tape:
    z = g(x1, x2)
dz_x1 = tape.gradient(z, x1)
print(dz_x1)
# 在求x2偏导时还需要在写一次 with tf.GradientTape() as tape
# 因为tape只能用一次
# 解决的办法是在tf.GradientTape()中加入persistent = True以保存tape
tf.Tensor(9.0, shape=(), dtype=float32)
  • 求解多个偏导
x1 = tf.Variable(2.0)
x2 = tf.Variable(3.0)
z = g(x1, x2)
with tf.GradientTape(persistent = True) as tape:
    z = g(x1, x2)
dz_x1 = tape.gradient(z, x1)
dz_x2 = tape.gradient(z, x2)
print(dz_x1,dz_x2)
# 此时系统不自动删除Tape,需要手动删除
del tape
tf.Tensor(9.0, shape=(), dtype=float32) tf.Tensor(42.0, shape=(), dtype=float32)
x1 = tf.Variable(2.0)
x2 = tf.Variable(3.0)
with tf.GradientTape() as tape:
    z = g(x1, x2)
dz_x1x2 = tape.gradient(z, [x1,x2])
print(dz_x1x2)
[<tf.Tensor: id=290, shape=(), dtype=float32, numpy=9.0>, <tf.Tensor: id=296, shape=(), dtype=float32, numpy=42.0>]
  • 常量求导
# 常量可以求导吗?
x1 = tf.constant(2.0)
x2 = tf.constant(3.0)
with tf.GradientTape() as tape:
    z = g(x1, x2)
dz_x1x2 = tape.gradient(z, [x1,x2])
print(dz_x1x2)
[None, None]
# 将常量变得可导
# 在tape中关注变量
x1 = tf.constant(2.0)
x2 = tf.constant(3.0)
with tf.GradientTape() as tape:
    tape.watch(x1)
    tape.watch(x2)
    z = g(x1, x2)
dz_x1x2 = tape.gradient(z, [x1,x2])
print(dz_x1x2)
[<tf.Tensor: id=317, shape=(), dtype=float32, numpy=9.0>, <tf.Tensor: id=323, shape=(), dtype=float32, numpy=42.0>]
  • 对多个函数求导
# 一个变量对两个函数求导
x = tf.Variable(5.0)
with tf.GradientTape() as tape:
    z1 = 3 * x
    z2 = x ** 2
tape.gradient([z1, z2], x)

<tf.Tensor: id=346, shape=(), dtype=float32, numpy=13.0>
  • 求二阶导数
# 求解二阶导数,利用嵌套
x1 = tf.Variable(2.0)
x2 = tf.Variable(3.0)
with tf.GradientTape(persistent=True) as outer_tape:
    with tf.GradientTape(persistent=True) as inner_tape:
        z = g(x1, x2)
    inner_grads = inner_tape.gradient(z, [x1, x2])
outer_grads = [outer_tape.gradient(inner_grad, [x1, x2])
              for inner_grad in inner_grads]
print(outer_grads)
del inner_tape
del outer_tape
[[None, <tf.Tensor: id=489, shape=(), dtype=float32, numpy=6.0>], [<tf.Tensor: id=500, shape=(), dtype=float32, numpy=6.0>, <tf.Tensor: id=498, shape=(), dtype=float32, numpy=14.0>]]
  • 手动计算梯度下降
# 梯度下降

learning_rate = 0.1
x = tf.Variable(0.0)

for _ in range(100):
    with tf.GradientTape() as tape:
        z = f(x)
    dz_dx = tape.gradient(z, x)
    x.assign_sub(learning_rate * dz_dx)
print(x)

<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=-0.3333333>
# 使用optimizer
learning_rate = 0.1
x = tf.Variable(0.0)

optimizer = keras.optimizers.SGD(lr = learning_rate)
for _ in range(100):
    with tf.GradientTape() as tape:
        z = f(x)
    dz_dx = tape.gradient(z, x)
    optimizer.apply_gradients([(dz_dx, x)])
print(x)

<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=-0.3333333>
发布了35 篇原创文章 · 获赞 3 · 访问量 2504

猜你喜欢

转载自blog.csdn.net/Smile_mingm/article/details/104513712