30天干掉tensorflow2.0-day08低阶API示范

TensorFlow的层次结构

本章我们介绍TensorFlow中5个不同的层次结构:即硬件层,内核层,低阶API,中阶API,高阶API。并以线性回归为例,直观对比展示在不同层级实现模型的特点。

TensorFlow的层次结构从低到高可以分成如下五层。

最底层为硬件层,TensorFlow支持CPU、GPU或TPU加入计算资源池。

第二层为C++实现的内核,kernel可以跨平台分布运行。

第三层为Python实现的操作符,提供了封装C++内核的低级API指令,主要包括各种张量操作算子、计算图、自动微分.
如tf.Variable,tf.constant,tf.function,tf.GradientTape,tf.nn.softmax…
如果把模型比作一个房子,那么第三层API就是【模型之砖】。

第四层为Python实现的模型组件,对低级API进行了函数封装,主要包括各种模型层,损失函数,优化器,数据管道,特征列等等。
如tf.keras.layers,tf.keras.losses,tf.keras.metrics,tf.keras.optimizers,tf.data.DataSet,tf.feature_column…
如果把模型比作一个房子,那么第四层API就是【模型之墙】。

第五层为Python实现的模型成品,一般为按照OOP方式封装的高级API,主要为tf.keras.models提供的模型的类接口。
如果把模型比作一个房子,那么第五层API就是模型本身,即【模型之屋】。
在这里插入图片描述

低阶API示范

下面的范例使用TensorFlow的低阶API实现线性回归模型。

低阶API主要包括张量操作,计算图和自动微分。

import tensorflow as tf

#打印时间分割线
@tf.function
def printbar():
    ts = tf.timestamp()
    today_ts = ts%(24*60*60)

    hour = tf.cast(today_ts//3600+8,tf.int32)%tf.constant(24)
    minite = tf.cast((today_ts%3600)//60,tf.int32)
    second = tf.cast(tf.floor(today_ts%60),tf.int32)
    
    def timeformat(m):
        # 对于个位数的数字,在前面添加0
        if tf.strings.length(tf.strings.format("{}",m))==1:
            return(tf.strings.format("0{}",m))
        else:
            return(tf.strings.format("{}",m))
    
    timestring = tf.strings.join([timeformat(hour),timeformat(minite),
                timeformat(second)],separator = ":")
    tf.print("=========="*8,end = "")
    tf.print(timestring)
#样本数量
n = 400

# 生成测试用数据集
X = tf.random.uniform([n,2],minval=-10,maxval=10) 
w0 = tf.constant([[2.0],[-1.0]])
b0 = tf.constant(3.0)
Y = X@w0 + b0 + tf.random.normal([n,1],mean = 0.0,stddev= 2.0)  # @表示矩阵乘法,增加正态扰动

#使用动态图调试

w = tf.Variable(tf.random.normal(w0.shape))
b = tf.Variable(0.0)

def train(epoches):
    for epoch in tf.range(1,epoches+1):
        with tf.GradientTape() as tape:
            #正向传播求损失
            Y_hat = X@w + b
            # 均方损失
            # squeeze是转换成标量
            loss = tf.squeeze(tf.transpose(Y-Y_hat)@(Y-Y_hat))/(2.0*n)   

        # 反向传播求梯度
        dloss_dw,dloss_db = tape.gradient(loss,[w,b])
        # 梯度下降法更新参数
        w.assign(w - 0.001*dloss_dw)
        b.assign(b - 0.001*dloss_db)
        if epoch%1000 == 0:
            printbar()
            tf.print("epoch =",epoch," loss =",loss,)
            tf.print("w =",w)
            tf.print("b =",b)
            tf.print("")
            
train(5000)
================================================================================16:12:30
epoch = 1000  loss = 2.51871967
w = [[1.98677838]
 [-1.00795388]]
b = 1.93567574

================================================================================16:12:32
epoch = 2000  loss = 1.97233057
w = [[1.98137867]
 [-1.01008439]]
b = 2.64583397

================================================================================16:12:34
epoch = 3000  loss = 1.89814222
w = [[1.97938931]
 [-1.01086855]]
b = 2.90751576

================================================================================16:12:36
epoch = 4000  loss = 1.8880688
w = [[1.9786551]
 [-1.01115859]]
b = 3.00394082

================================================================================16:12:38
epoch = 5000  loss = 1.88670135
w = [[1.97838593]
 [-1.01126552]]
b = 3.03947139
##使用autograph机制转换成静态图加速

w = tf.Variable(tf.random.normal(w0.shape))
b = tf.Variable(0.0)

@tf.function
def train(epoches):
    for epoch in tf.range(1,epoches+1):
        with tf.GradientTape() as tape:
            #正向传播求损失
            Y_hat = X@w + b
            loss = tf.squeeze(tf.transpose(Y-Y_hat)@(Y-Y_hat))/(2.0*n)   

        # 反向传播求梯度
        dloss_dw,dloss_db = tape.gradient(loss,[w,b])
        # 梯度下降法更新参数
        w.assign(w - 0.001*dloss_dw)
        b.assign(b - 0.001*dloss_db)
        if epoch%1000 == 0:
            printbar()
            tf.print("epoch =",epoch," loss =",loss,)
            tf.print("w =",w)
            tf.print("b =",b)
            tf.print("")
train(5000)
================================================================================16:13:44
epoch = 1000  loss = 2.52051115
w = [[1.98679066]
 [-1.00794923]]
b = 1.93408406

================================================================================16:13:44
epoch = 2000  loss = 1.972574
w = [[1.98138297]
 [-1.0100826]]
b = 2.64524698

================================================================================16:13:45
epoch = 3000  loss = 1.89817536
w = [[1.97939098]
 [-1.01086783]]
b = 2.90729809

================================================================================16:13:45
epoch = 4000  loss = 1.88807368
w = [[1.9786557]
 [-1.01115835]]
b = 3.00385976

================================================================================16:13:45
epoch = 5000  loss = 1.88670206
w = [[1.97838616]
 [-1.01126552]]
b = 3.03944039

原创文章 58 获赞 7 访问量 6207

猜你喜欢

转载自blog.csdn.net/Elenstone/article/details/105405697