C = tf.add(A, B) # 计算矩阵A和B的和
D = tf.matmul(A, B) # 计算矩阵A和B的乘积
3.自动求导
import tensorflow as tf
x = tf.Variable(initial_value=3.)with tf.GradientTape()as tape:# 在 tf.GradientTape() 的上下文内,所有计算步骤都会被记录以用于求导
y = tf.square(x)
y_grad = tape.gradient(y, x)# 计算y关于x的导数print([y, y_grad])
4.自动求偏导数
X = tf.constant([[1.,2.],[3.,4.]])
y = tf.constant([[1.],[2.]])
w = tf.Variable(initial_value=[[1.],[2.]])
b = tf.Variable(initial_value=1.)with tf.GradientTape()as tape:
L =0.5* tf.reduce_sum(tf.square(tf.matmul(X, w)+ b - y))
w_grad, b_grad = tape.gradient(L,[w, b])# 计算L(w, b)关于w, b的偏导数print([L.numpy(), w_grad.numpy(), b_grad.numpy()])
5.线性回归
X = tf.constant(X)
y = tf.constant(y)
a = tf.Variable(initial_value=0.)
b = tf.Variable(initial_value=0.)
variables =[a, b]
num_epoch =10000
optimizer = tf.keras.optimizers.SGD(learning_rate=1e-3)for e inrange(num_epoch):# 使用tf.GradientTape()记录损失函数的梯度信息with tf.GradientTape()as tape:
y_pred = a * X + b
loss =0.5* tf.reduce_sum(tf.square(y_pred - y))# TensorFlow自动计算损失函数关于自变量(模型参数)的梯度
grads = tape.gradient(loss, variables)# TensorFlow自动根据梯度更新参数
optimizer.apply_gradients(grads_and_vars=zip(grads, variables))print(a, b)