tf2学习笔记

chapter 2 的主要内容

  • classification example

  • regression example

    • 整理数据:train,test,valid
    • model = keras.models.Sequential()
    • model.compile(loss=, optimizer=, metrics=)
    • model.fit(x_train,y_train,validation_data = (x_valid_scaled,y_valid),epochs = 100,callbacks = callbacks)
    • model.evaluate(x_test,y_test)
  • 过拟合的解决办法

    • dropout
  • 梯度消失的解决办法

    • BatchNormalization
    • activation=“selu”
  • 架模型的方式

    • 方式一:model = keras.models.Sequential([keras.layers.Dense(30,input_shape,activation)])
    • 方式二:model = keras.models.Sequential();model.add(keras.layers.Dense(30,input_shape,activation))
    • 方式三:函数式API
      • input = keras.layers.Input(shape = x_train.shape[1:])
      • hidden1 = keras.layers.Dense(30,activation=“relu”)(input)
      • hidden2 = keras.layers.Dense(30,activation=“relu”)(hidden1)
      • concat = keras.layers.concatenate([input,hidden2])
      • output = keras.layers.Dense(1)(concat)
      • model = keras.models.Model(inputs=[input],outputs = [output]) # 固化model
    • 方式四:类
      • class WideDeepModel(keras.models.Model):

      • def init(self):

        • super(WideDeepModel, self).init()
        • “”“定义模型的层次”""
        • self.hidden1_layer = keras.layers.Dense(30, activation=“relu”)
        • self.hidden2_layer = keras.layers.Dense(30, activation=“relu”)
        • self.ouput_layer = keras.layers.Dense(1)
      • def call(self,input):

        • “”“完成模型的正向计算”""
        • hidden1 = self.hidden1_layer(input)
        • hidden2 = self.hidden2_layer(hidden1)
        • concat = keras.layers.concatenate([input, hidden2])
        • output = self.output_layer(concat)
        • return output
      • model = keras.models.Sequential([WideDeepModel()])

      • model.build(input_shape=(None,8))

  • 超参数的搜索

    • 网格搜索
    • 转为sklearn模型,使用sklearn中的randomsearchCV进行搜索
  • callbacks的使用:

    • Tensorboard # 需要一个文件夹
      • 如何查看tensorboard的记录结果:在终端中输入 tensorboard --logdir = callbacks
    • earlystopping # 需要一个文件
    • ModelCheckPoints

classification example

import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import time
import sys
import tensorflow as tf

from tensorflow import keras

print(tf.__version__)
print(sys.version_info)
for module in mpl,np,pd,sklearn,tf,keras:
    print(module.__name__,module)
2.1.0
sys.version_info(major=3, minor=6, micro=10, releaselevel='final', serial=0)
matplotlib <module 'matplotlib' from '/Users/stacy/anaconda3/envs/tf2_py3/lib/python3.6/site-packages/matplotlib/__init__.py'>
numpy <module 'numpy' from '/Users/stacy/anaconda3/envs/tf2_py3/lib/python3.6/site-packages/numpy/__init__.py'>
pandas <module 'pandas' from '/Users/stacy/anaconda3/envs/tf2_py3/lib/python3.6/site-packages/pandas/__init__.py'>
sklearn <module 'sklearn' from '/Users/stacy/anaconda3/envs/tf2_py3/lib/python3.6/site-packages/sklearn/__init__.py'>
tensorflow <module 'tensorflow' from '/Users/stacy/anaconda3/envs/tf2_py3/lib/python3.6/site-packages/tensorflow/__init__.py'>
tensorflow_core.keras <module 'tensorflow_core.keras' from '/Users/stacy/anaconda3/envs/tf2_py3/lib/python3.6/site-packages/tensorflow_core/python/keras/api/_v2/keras/__init__.py'>
# 加载数据集
mnist = keras.datasets.mnist

(x_train_all,y_train_all),(x_test,y_test) = mnist.load_data()  # 加载数据,数据集都为numpy格式
x_train_all,y_test_all = x_train_all / 255.0,x_test / 255.0
x_valid,x_train = x_train_all[:5000],x_train_all[5000:]
y_valid,y_train = y_train_all[:5000],y_train_all[5000:]
# 绘制一张数据集的图片
def show_single_image(img_arr):
    plt.imshow(img_arr,cmap="binary")
    plt.show()
show_single_image(x_train[1])

在这里插入图片描述

# 显示多张图片
def show_imgs(n_rows,n_cols,x_data,y_data,class_names):
    assert len(x_data) == len(y_data)
    assert n_rows * n_cols < len(x_data)
    plt.figure(figsize = (n_cols * 1.4,n_rows * 1.6))
    for row in range(n_rows):
        for col in range(n_cols):
            index = n_cols * row + col
            plt.subplot(n_rows,n_cols,index+1)
            plt.imshow(x_data[index],cmap="binary")
            plt.axis("off")
            plt.title(class_names[y_data[index]])
    plt.show()
class_names = ["0","1","2","3","4","5","6","7","8","9"]
show_imgs(1,10,x_train,y_train,class_names)

在这里插入图片描述

# tf.keras.models.Sequential()
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28,28]))
model.add(keras.layers.Dense(300,activation="relu"))
model.add(keras.layers.Dense(100,activation="relu"))
model.add(keras.layers.Dense(10,activation="softmax"))

# relu: y = max(0,x)
# softmax:将向量转变为概率分布 x = [x1,x2,x3],
# y = [e^x1/sum,e^x2/sum,e^x3/sum]


# 方式二:
model = keras.models.Sequential([
    keras.layers.Dense(30,activation="relu",input_shape=x_train.shape[1:]),
    keras.layers.Dense(1)
])

# 方式三:
model = keras.model.Sequential()
model.add(keras.layers.Flatten(input_shape=[28,28]))
for _ in range(20):
    model.add(keras.layers.Dense(100, activation="relu"))
model.add(keras.layers.Dense(10,activation="softmax"))



# 对于过深的神经网络
#1.参数众多,训练不充分
#2.梯度消失-->链式法则-->复合函数求导f(g(x))

# 解决办法:添加批归一化
# 方式一:在激活函数之前添加批归一化
model.add(keras.layers.Dense(100))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Activation("relu"))

# 方式二:在激活函数后添加批归一化
model.add(keras.layers.Dense(100,activation="relu"))
model.add(keras.layers.BatchNormalization())

# 解决方法:使用selu作为激活函数,自带归一化功能,在一定程度上可以缓解梯度消失
model.add(keras.layers.Dense(100, activation="selu"))  

# 解决方法:添加Dropout
model.add(keras.layers.AlphaDropout(rate=0.5))  # 一般设置为0.5,更强大的Dropout,一般在后几层添加dropout
# 强大的地方在于:
# 1.均值和方差不变
# 2.归一化性质也不变
model.layers
[<tensorflow.python.keras.layers.core.Flatten at 0x1054922e8>,
 <tensorflow.python.keras.layers.core.Dense at 0x13d577b38>,
 <tensorflow.python.keras.layers.core.Dense at 0x1108cfda0>,
 <tensorflow.python.keras.layers.core.Dense at 0x139ee9160>]
model.summary()

# 其中参数的计算
# 例:dense(Dense)
# 将[None,784] -> [None,300] W.shape(783*300)+b(300)
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
flatten (Flatten)            (None, 784)               0         
_________________________________________________________________
dense (Dense)                (None, 300)               235500    
_________________________________________________________________
dense_1 (Dense)              (None, 100)               30100     
_________________________________________________________________
dense_2 (Dense)              (None, 10)                1010      
=================================================================
Total params: 266,610
Trainable params: 266,610
Non-trainable params: 0
_________________________________________________________________
model.compile(loss = "sparse_categorical_crossentropy",
              optimizer = 'sgd',
              metrics = ["accuracy"]
             )

# 因为y是序号,所以loss = sparse_categorical_crossentropy
# 如果y为one-hot编码的结果,则loss = categorical_crossentropy

history = model.fit(x_train,y_train,
                    epochs = 10,
                    validation_data = (x_valid,y_valid)
                   )
type(history)

#为keras.callbacks.History格式
history.history  # 获取history中的信息
def plot_learning_curves(history):
    pd.DataFrame(history.history).plot(figsize=(8,5))
    plt.grid(True)
    plt.ylim(0,1)
    plt.show()

plot_learning_curves(history)

regression example

from sklearn.model_selection import train_test_split
from sklearn.datasets import fetch_california_housing

housing = fetch_california_housing()
x_train_all,x_test,y_train_all,y_test = train_test_split(housing.data,housing.target,random_state=7)
x_train,x_valid,y_train,y_valid = train_test_split(x_train_all,y_train_all,random_state=11)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train)
x_valid_scaled = scaler.transform(x_valid)
x_test_scaled = scaler.transform(x_test)
model = keras.models.Sequential([
    keras.layers.Dense(30,activation="relu",input_shape=x_train.shape[1:]),
    keras.layers.Dense(1)
])
model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_3 (Dense)              (None, 30)                270       
_________________________________________________________________
dense_4 (Dense)              (None, 1)                 31        
=================================================================
Total params: 301
Trainable params: 301
Non-trainable params: 0
_________________________________________________________________
model.compile(loss="mean_squared_error",optimizer="sgd")  #  也可以自己写函数进行映射
# callbacks
# 常用的callbacks Tensorboard,earlystopping,ModelCheckPoints
# Tensorboard需要一个文件夹
logdir = "./callbacks"
if not os.path.exists(logdir):os.mkdir(logdir)

#ModelCheckPoints需要一个文件
output_model_file = os.path.join(logdir,"fashion_mnist_model.h5")



# 使用callbacks
callbacks = [
    keras.callbacks.TensorBoard(logdir),
    keras.callbacks.ModelCheckPoint(output_model_file,save_best_only=True)  # 默认保存最近的模型
    keras.callbacks.EarlyStopping(patience = 5,min_delta = 1e-3)
]

# 如何查看tensorboard的记录结果
# 在终端中输入 tensorboard --logdir = callbacks
history = model.fit(
    x_train,y_train,
    validation_data = (x_valid_scaled,y_valid),
    epochs = 100,
    callbacks = callbacks
)
Train on 11610 samples, validate on 3870 samples
Epoch 1/100
11610/11610 [==============================] - 1s 65us/sample - loss: nan - val_loss: nan
Epoch 2/100
11610/11610 [==============================] - 0s 38us/sample - loss: nan - val_loss: nan
Epoch 3/100
11610/11610 [==============================] - 0s 38us/sample - loss: nan - val_loss: nan
Epoch 4/100
11610/11610 [==============================] - 0s 39us/sample - loss: nan - val_loss: nan
Epoch 5/100
11610/11610 [==============================] - 0s 38us/sample - loss: nan - val_loss: nan
model.evaluate(x_test_scaled,y_test)
5160/5160 [==============================] - 0s 22us/sample - loss: nan





nan

wide deep模型的建立

# 方式一:函数式API 功能API
input = keras.layers.Input(shape = x_train.shape[1:])
hidden1 = keras.layers.Dense(30,activation="relu")(input)
hidden2 = keras.layers.Dense(30,activation="relu")(hidden1)
concat = keras.layers.concatenate([input,hidden2])
output = keras.layers.Dense(1)(concat)

model = keras.models.Model(inputs=[input],outputs = [output])  # 固化model


# 改成多输入多输出形式
input_wide = keras.layers.Input(shape = [5]) # 前5个特征给input_wide
input_deep = keras.layers.Inpt(shape= [6])  # 后6个特征给deep
hidden1 = keras.layers.Dense(30,activation="relu")(input)
hidden2 = keras.layers.Dense(30,activation="relu")(hidden1)
concat = keras.layers.concatenate([input,hidden2])
output = keras.layers.Dense(1)(concat)
output2 = keras.layers.Dense(1)(hidden2)
model = keras.models.Model(inputs=[input],outputs = [output,output2])
#对应在进行训练时
history = model.fit([x_train_scaled_wide,x_train_scaled_deep],
                    [y_train,y_train],
                    validation_data = ([x_valid_scaled_wide,x_valid,scaled_deep],
                                      [y_valid,y_valid]),
                    epochs = 100,
                    callbacks=callbacks
                   )
# 方式二:子类API来实现

class WideDeepModel(keras.models.Model):
    def __init__(self):
        super(WideDeepModel, self).__init__()
        """定义模型的层次"""
        self.hidden1_layer = keras.layers.Dense(30, activation="relu")
        self.hidden2_layer = keras.layers.Dense(30, activation="relu")
        self.ouput_layer = keras.layers.Dense(1)
        
    def call(self, input):
        """完成模型的正向计算"""
        hidden1 = self.hidden1_layer(input)
        hidden2 = self.hidden2_layer(hidden1)
        concat = keras.layers.concatenate([input, hidden2])
        output = self.output_layer(concat)
        return output
model = keras.models.Sequential([
    WideDeepModel(),
])
model.build(input_shape=(None,8))
model.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_2 (InputLayer)            [(None, 8)]          0                                            
__________________________________________________________________________________________________
dense_8 (Dense)                 (None, 30)           270         input_2[0][0]                    
__________________________________________________________________________________________________
dense_9 (Dense)                 (None, 30)           930         dense_8[0][0]                    
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 38)           0           input_2[0][0]                    
                                                                 dense_9[0][0]                    
__________________________________________________________________________________________________
dense_10 (Dense)                (None, 1)            39          concatenate_1[0][0]              
==================================================================================================
Total params: 1,239
Trainable params: 1,239
Non-trainable params: 0
__________________________________________________________________________________________________
# 超参数搜索
learning_rates = [1e-4,3e-4,1e-3,3e-3,1e-2,3e-2]
histories = []
for lr in learning_rates:
    model = keras.models.Sequential([
        keras.layers.Dense(30,activaiton="relu",input_shape = x_train.shape[1:]),
        keras.layers.Dense(1)
    ])
    
    optimizer = keras.optimizers.Adam(lr)  # 在这里修改学习率
    
    model.compile(loss = "mean_squared_error",optimizer=optimizer)  # 也可以自己写函数进行映射
    callbacks = [keras.callbacks.EarlyStopping(patience = 5,min_delta = 1e-3)]
    
    history = model.fit(x_train_scaled,y_train,
                        validation_data = (x_valid_scaled,y_valid),
                        epochs = 100,
                        callbacks = callbacks
                       )
    histories.append(history)
# 使用sklearn中RandomizedSearchCV进行参数搜索
# 1.转化为sklearn model
# 2.定义参数集合
# 3.搜索参数

def build_model(hidden_layers = 1,layer_size = 30,learning_rate = 3e-3):
    model = keras.models.Sequential()
    model.add(keras.layers.Dense(layer_size, activation="relu",input_shape=x_train.shape[1:]))
    for _ in range(hidden_layers - 1):
        model.add(keras.layers.Dense(layer_size, activaiton="relu"))
    model.add(keras.layers.Dense(1))
    optimizer = keras.optimizers.Adam(learning_rate)
    model.compile(loss="mse",optimizer=optimizer)
    return model
sklearn_model = keras.wrappers.scikit_learn.KerasRegressor(build_model)
history = sklearn_model.fit(x_train_scaled,y_train,epochs = 100,
                           validation_data = (x_valid_scaled,y_valid))
from scipy.stats import reciprocal  # 倒数
# f(x) = 1/(x*log(b/a)) a<=x<=b  # 每个值出现的概率

param_distribution = {
    "hidden_layers":[1,2,3,4],
    "layer_size":np.arange(1,3),
    "learning_rate":np.linspace(0.01,20)
}

from sklearn.model_selection import RandomizedSearchCV
random_search_cv = RandomizedSearchCV(
    sklearn_model,
    param_distribution,
    n_iters = 2,  # 从原本的参数中sample里出多少个参数
    n_jobs = 1
)
random_search_cv.fit(x_train_scaled,y_train,epochs = 100)
random_search_cv.best_params_
random_search_cv.best_score_
random_search_cv.best_estimator_
model = random_search_cv.best_estimator_.model  # 取原本的tensorflow模型
model.evaluate(x_test_scaled,y_test)
发布了38 篇原创文章 · 获赞 9 · 访问量 2393

猜你喜欢

转载自blog.csdn.net/weixin_44264662/article/details/104418126
tf2