04-07 Tensorflow2.0 deep neural network + batch normalization + activation function selu+Dropout

1. Deep neural network DNN

Refers to a network formed by connecting multiple fully connected layers. Too many layers may not necessarily result in better performance, but will decrease. This is because

  • Too many layers, too many parameters
  • Causes the gradient to disappear

2. Batch Normalization

  • Alleviate the disappearance of gradients

3. New activation function selu

  • Shorter training time
  • Training performance is higher

4.Dropout prevents overfitting

Generally used in the last few layers of the fully connected layer

  • Dropout
  • AlphaDropout: Better performance:
    1. The mean and variance remain unchanged
    2. Normalization properties remain unchanged
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras
# 输出库的名字和版本
print(sys.version_info)
for module in tf, mpl, np, pd, sklearn, tf, keras:
    print(module.__name__, module.__version__)
 
# 指定GPU
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)

# 导入数据集 fashion_mnist
fashion_mnist = keras.datasets.fashion_mnist
(x_train_all,y_train_all),(x_test_all,y_test_all) = fashion_mnist.load_data()
x_valid , x_train = x_train_all[:5000],x_train_all[5000:]
y_valid , y_train = y_train_all[:5000],y_train_all[5000:]
x_test , y_test = x_test_all,y_test_all

print(x_train.shape,y_train.shape)
print(x_valid.shape,y_valid.shape)
print(x_test.shape,y_test.shape)

# 数据归一化
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(
    x_train.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)

x_valid_scaled = scaler.transform(
    x_valid.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)

x_test_scaled = scaler.transform(
    x_test.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)


# 模型建立
model = tf.keras.Sequential()
model.add(keras.layers.Flatten(input_shape=[28,28]))
for _ in range(10):#DNN,多层全连接层
    model.add(keras.layers.Dense(100,activation='selu')) # 使用selu激活函数代替relu
    model.add(keras.layers.BatchNormalization()) # 批归一化只需要在此处加上即可
    '''
    激活函数可以放在批归一化的后面
    model.add(keras.layers.Dense(100))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.Activation('relu'))
    '''
model.add(keras.layers.AlphaDropout(rate=0.5)) #在最后一层进行Dropout
model.add(keras.layers.Dense(10, activation='softmax'))


# 模型编译
model.compile(loss='sparse_categorical_crossentropy',
               optimizer='sgd',
               metrics=['accuracy'])

# 模型训练 history.history是一个重要的参数
# callbacks : Tensorborad,earlystopping,ModelCheckpoint
logdir = './07-callbacks'
if not os.path.exists(logdir):
    os.makedirs(logdir)
output_model_file = os.path.join(logdir,'fashion_minst_models.h5')
callbacks = [
    keras.callbacks.TensorBoard(logdir),
    keras.callbacks.ModelCheckpoint(output_model_file,
                                    save_best_only = True),# 只保存最好的模型
    keras.callbacks.EarlyStopping(patience=5,min_delta=1e-3)
]

history = model.fit(x_train_scaled,y_train,
                    epochs=10,
                    validation_data=(x_valid_scaled,y_valid),
                    callbacks = callbacks)

# 绘制history图像
def plot_learning_curves(history):
    pd.DataFrame(history.history).plot(figsize=(8,5))
    plt.grid(True)
    plt.gca().set_ylim(0,1)
    plt.show()
plot_learning_curves(history)

# 测试集上进行测试
model.evaluate(x_test_scaled,y_test)

Guess you like

Origin blog.csdn.net/qq_44783177/article/details/108088777