conv_output_length 在全连接之前获取定义的卷积和池化的输出维度

1.问题描述

自定义Keras类的时候,在build方法里定义好卷积、池化、全连接。因为全连接要给输入维度,它牵扯到每次卷积-池化后的维度,所以想在build的时候获取定义卷积和池化后的输出尺寸。

全连接可以定义在call里,不用提前获得尺寸,但是要通过自定义循环的方式训练。
自定义循环训练看:https://blog.csdn.net/qq_42363032/article/details/122703588?spm=1001.2014.3001.5501

因为我要做FGCNN,所以此处演示数据为结构化数据

2.Dense定义在build的时候提前获得卷积、池化的输出尺寸

关键代码:build

def build(self, input_shape):
    embedding_size = int(input_shape[-1])
    pooling_shape = input_shape.as_list() + [1, ]  # [None, n, k, 1]

    self.conv_layers = []
    self.pooling_layers = []
    self.dense_layers = []
    for i in range(len(self.filters)):
        conv_output_shape = self._conv_output_shape(pooling_shape, (self.kernel_with[i], 1))
        pooling_shape = self._pooling_output_shape(conv_output_shape, (self.pooling_width[i], 1))

        self.conv_layers.append(
            layers.Conv2D(filters=self.filters[i], kernel_size=(self.kernel_with[i], 1), strides=(1, 1), padding='same', activation='tanh')
        )
        self.pooling_layers.append(
            layers.MaxPooling2D(pool_size=(self.pooling_width[i], 1))
        )
        self.dense_layers.append(layers.Dense(pooling_shape[1] * embedding_size * self.dnn_maps[i], activation='tanh', use_bias=True))
    self.flatten_layer = layers.Flatten()
def _conv_output_shape(self, input_shape, kernel_size):
    space = input_shape[1:-1]  # [n, k]
    new_space = []
    for i in range(len(space)):
        new_dim = utils.conv_output_length(space[i], kernel_size[i], padding='same', stride=1, dilation=1)
        new_space.append(new_dim)
    return ([input_shape[0]] + new_space + [self.filters])

def _pooling_output_shape(self, input_shape, pool_size):
    rows = input_shape[1]
    cols = input_shape[2]
    rows = utils.conv_output_length(rows, pool_size[0], 'valid', pool_size[0])
    cols = utils.conv_output_length(cols, pool_size[1], 'valid', pool_size[1])
    return [input_shape[0], rows, cols, input_shape[3]]

关键代码:call

def call(self, inputs, **kwargs):
    k = inputs.shape[-1]
    new_feature_list = []
    x = tf.expand_dims(inputs, axis=-1)  # [None, n, k, 1] 最后一维为通道
    for i in range(len(self.filters)):
        x = self.conv_layers[i](x)     # [None, n, k, filters[i]]
        x = self.pooling_layers[i](x)  # [None, n/poolwidth[i], k, filters[i]]
        out = self.flatten_layer(x)    # [None, n/poolwidth[i] * k * filters[i]]
        out = self.dense_layers[i](out)

        out = tf.reshape(out, shape=(-1, out.shape[1] // k, k))

        new_feature_list.append(out)
    output = tf.concat(new_feature_list, axis=1)
    return output

全部代码

# coding:utf-8
import time
import numpy as np, pandas as pd
import tensorflow as tf
from tensorflow.python.layers import utils
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras import optimizers
from tensorflow.keras import metrics
from tensorflow.keras import regularizers
from keras import backend as K
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, roc_auc_score
from sklearn.utils import shuffle

from tools import *
from settings import *

class DNN_layers(layers.Layer):

    def __init__(self, hidden_units, activation, droup_out):
        super(DNN_layers, self).__init__()
        self.DNN = tf.keras.Sequential()
        for hidden in hidden_units:
            # self.DNN.add(layers.Dense(hidden, kernel_regularizer=regularizers.l2()))
            self.DNN.add(layers.Dense(hidden))
            self.DNN.add(layers.BatchNormalization())
            self.DNN.add(layers.Activation(activation))
            self.DNN.add(layers.Dropout(droup_out))

    def call(self, inputs, **kwargs):
        return self.DNN(inputs)

class FGCNN_layers(layers.Layer):
    def __init__(self, filters=[14, 16], kernel_with=[7, 7], dnn_maps=[3, 3], pooling_width=[2, 2]):
        super(FGCNN_layers, self).__init__()
        self.filters = filters
        self.kernel_with = kernel_with
        self.dnn_maps = dnn_maps
        self.pooling_width = pooling_width

    def build(self, input_shape):
        embedding_size = int(input_shape[-1])
        pooling_shape = input_shape.as_list() + [1, ]  # [None, n, k, 1]

        self.conv_layers = []
        self.pooling_layers = []
        self.dense_layers = []
        for i in range(len(self.filters)):
            conv_output_shape = self._conv_output_shape(pooling_shape, (self.kernel_with[i], 1))
            pooling_shape = self._pooling_output_shape(conv_output_shape, (self.pooling_width[i], 1))

            self.conv_layers.append(
                layers.Conv2D(filters=self.filters[i], kernel_size=(self.kernel_with[i], 1), strides=(1, 1), padding='same', activation='tanh')
            )
            self.pooling_layers.append(
                layers.MaxPooling2D(pool_size=(self.pooling_width[i], 1))
            )
            self.dense_layers.append(layers.Dense(pooling_shape[1] * embedding_size * self.dnn_maps[i], activation='tanh', use_bias=True))
        self.flatten_layer = layers.Flatten()

    def call(self, inputs, **kwargs):
        k = inputs.shape[-1]
        new_feature_list = []
        x = tf.expand_dims(inputs, axis=-1)  # [None, n, k, 1] 最后一维为通道
        for i in range(len(self.filters)):
            x = self.conv_layers[i](x)     # [None, n, k, filters[i]]
            x = self.pooling_layers[i](x)  # [None, n/poolwidth[i], k, filters[i]]
            out = self.flatten_layer(x)    # [None, n/poolwidth[i] * k * filters[i]]
            out = self.dense_layers[i](out)

            out = tf.reshape(out, shape=(-1, out.shape[1] // k, k))

            new_feature_list.append(out)
        output = tf.concat(new_feature_list, axis=1)
        return output

    def _conv_output_shape(self, input_shape, kernel_size):
        space = input_shape[1:-1]  # [n, k]
        new_space = []
        for i in range(len(space)):
            new_dim = utils.conv_output_length(space[i], kernel_size[i], padding='same', stride=1, dilation=1)
            new_space.append(new_dim)
        return ([input_shape[0]] + new_space + [self.filters])

    def _pooling_output_shape(self, input_shape, pool_size):
        rows = input_shape[1]
        cols = input_shape[2]
        rows = utils.conv_output_length(rows, pool_size[0], 'valid', pool_size[0])
        cols = utils.conv_output_length(cols, pool_size[1], 'valid', pool_size[1])
        return [input_shape[0], rows, cols, input_shape[3]]

class FGCNN(Model):
    def __init__(self, dense_feature_columns, sparse_feature_columns, hidden_units, activation, dropout,
                 filters=[14, 16], kernel_with=[7, 7], dnn_maps=[3, 3], pooling_width=[2, 2]):
        super(FGCNN, self).__init__()

        self.sparse_feature_columns = sparse_feature_columns
        self.dense_feature_columns = dense_feature_columns

        self.emb_layers = [layers.Embedding(feat['vocabulary_size'], feat['embed_dim']) for i, feat in enumerate(self.sparse_feature_columns)]

        self.fgcnn_layers = FGCNN_layers(filters=filters, kernel_with=kernel_with, dnn_maps=dnn_maps, pooling_width=pooling_width)
        self.dnn_layers = DNN_layers(hidden_units, activation, dropout)
        self.out_layers = layers.Dense(1)

    def call(self, inputs, training=None, mask=None):
        dense_inputs, sparse_inputs = inputs[:, :13], inputs[:, 13:]

        sparse_embed = [layer(sparse_inputs[:, i]) for i, layer in enumerate(self.emb_layers)]  # 26 * [None, k]
        # 卷积要求输入为4维
        sparse_embed = tf.transpose(tf.convert_to_tensor(sparse_embed), [1, 0, 2])  # [None, 26, k]

        fgcnn_out = self.fgcnn_layers(sparse_embed)  # [None, new_n, k]

        sparse_embed = tf.concat([sparse_embed, fgcnn_out], axis=1)
        sparse_embed = tf.reshape(sparse_embed, shape=[-1, sparse_embed.shape[1] * sparse_embed.shape[2]])

        input = tf.concat([dense_inputs, sparse_embed], axis=1)

        out = self.dnn_layers(input)
        return tf.nn.sigmoid(self.out_layers(out))


if __name__ == '__main__':
    data = pd.read_csv(criteo_sampled_data_path)

    data = shuffle(data, random_state=42)

    data_X = data.iloc[:, 1:]
    data_y = data['label'].values

    # I1-I13:总共 13 列数值型特征
    # C1-C26:共有 26 列类别型特征
    dense_features = ['I' + str(i) for i in range(1, 14)]
    sparse_features = ['C' + str(i) for i in range(1, 27)]

    dense_feature_columns = [denseFeature(feat) for feat in dense_features]
    sparse_feature_columns = [sparseFeature(feat, data_X[feat].nunique(), 8) for feat in sparse_features]

    feature_columns = [dense_feature_columns + sparse_feature_columns]

    tmp_X, test_X, tmp_y, test_y = train_test_split(data_X, data_y, test_size=0.05, random_state=42, stratify=data_y)
    train_X, val_X, train_y, val_y = train_test_split(tmp_X, tmp_y, test_size=0.05, random_state=42, stratify=tmp_y)
    print(len(train_y))
    print(len(val_y))
    print(len(test_y))

    model = FGCNN(dense_feature_columns=dense_feature_columns, sparse_feature_columns=sparse_feature_columns,
                  hidden_units=(64, 128, 256), activation='relu', dropout=0.0)

    callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=1e-8, patience=3, verbose=1)]

    # adam = optimizers.Adam(lr=config['train']['adam_lr'], beta_1=0.95, beta_2=0.96, decay=config['train']['adam_lr'] / config['train']['epochs'])
    adam = optimizers.Adam(lr=1e-4, beta_1=0.95, beta_2=0.96)

    model.compile(
        optimizer=adam,
        loss='binary_crossentropy',
        metrics=[metrics.AUC(), metrics.Precision(), metrics.Recall(), 'accuracy'],
        # metrics=[metrics.AUC(), 'accuracy'],
        # run_eagerly=True,
    )
    model.fit(
        train_X.values, train_y,
        validation_data=(val_X.values, val_y),
        batch_size=2000,
        epochs=10,
        verbose=2,
        shuffle=True,
        # callbacks=callbacks,
    )

    scores = model.evaluate(test_X.values, test_y, verbose=2)
    print(' %s: %.4f' % (model.metrics_names[0], scores[0]))
    print(' %s: %.4f' % (model.metrics_names[1], scores[1]))
    print(' %s: %.4f' % (model.metrics_names[2], scores[2]))
    print(' %s: %.4f' % (model.metrics_names[3], scores[3]))
    print(' %s: %.4f' % ('F1', (2 * scores[2] * scores[3]) / (scores[2] + scores[3])))
    print(' %s: %.4f' % (model.metrics_names[4], scores[4]))
    # y_pre_sc = model.predict(test_X.values, batch_size=256)

在这里插入图片描述

猜你喜欢

转载自blog.csdn.net/qq_42363032/article/details/122836932