【学习笔记】l1正则化编程

这里前提要求我们weights+biases的数量不能超过600个,并且loss(对数损失函数)的loss不能超过0.35 。现在让我们来处理数据集。

这里我们是median_house_value作为targets,并且超过75%的设为1,低于75%的设为0。这里我们不再按照之前的写法,而是将所有的特征进行分箱。

import numpy as np
import pandas as pd
import tensorflow as tf

def gets_onehot(inputs, features, buckets):
    _inputs = {features: inputs.values}
    df_fc = tf.feature_column.numeric_column(features)
    _range = np.linspace(inputs.min(), inputs.max(), buckets+1)
    _range = np.delete(_range, -1)
    _range = np.delete(_range, 0)
    _column = tf.feature_column.bucketized_column(df_fc, list(_range))
    _tensor = tf.feature_column.input_layer(_inputs, [_column])
    return _tensor


df = pd.read_csv('california_housing_train.csv')
df['rooms_per_person'] = df['total_rooms'] / df['population']
df = df.reindex(np.random.permutation(df.index))
df = df.sort_index()
sess = tf.Session()
df_longitude = gets_onehot(df['longitude'], 'longitude', 50)
df_latitude = gets_onehot(df['latitude'], 'latitude', 50)
df_housing_median_age = gets_onehot(df['housing_median_age'], 'housing_median_age', 10)
df_households = gets_onehot(df['households'], 'households', 10)
df_total_rooms = gets_onehot(df['total_rooms'], 'total_rooms', 10)
df_total_bedrooms = gets_onehot(df['total_bedrooms'], 'total_bedrooms', 10)
df_population = gets_onehot(df['population'], 'population', 10)
df_median_income = gets_onehot(df['median_income'], 'median_income', 10)
df_rooms_per_person = gets_onehot(df['rooms_per_person'], 'rooms_per_person', 10)
np_targets = np.array(((df['median_house_value'] > 265000).astype('float32')))[:, np.newaxis]
np_concat = np.concatenate(sess.run([df_longitude, df_latitude, df_housing_median_age,
                                 df_households, df_total_rooms, df_total_bedrooms, df_population,
                                 df_median_income, df_rooms_per_person]), axis=1).copy()


np.save('california_one_hot.npy', np_concat)
np.save('california_targets.npy', np_targets)

这里因为设置columns太麻烦了,我直接把数据做成矩阵(每一列代表一个特征,同时用numpy来储存)。

下面我们来设计我们的神经网络,网络的设计和之前类似:

import tensorflow as tf
from tensorflow.data import Dataset

class _dnn():
    def my_input_fn(self, inputs, targets, batch_size=1, shuffle=False, num_epochs=None):
        ds = Dataset.from_tensor_slices((inputs, targets))
        ds = ds.batch(batch_size).repeat(num_epochs)
        if shuffle:
            ds = ds.shuffle(10000)
        features, labels = ds.make_one_shot_iterator().get_next()
        return features, labels

    def add_layers(self, n_layers, inputs, inputs_size, outputs_size, activation_function=None):
        layer_name = 'Layer_%s' % n_layers
        with tf.name_scope(layer_name):
            with tf.name_scope('Weights'):
                weights = tf.Variable(tf.random_normal([inputs_size, outputs_size], stddev=0.1))
                tf.add_to_collection('losses', tf.contrib.layers.l1_regularizer(0.005)(weights))
                tf.summary.histogram(layer_name + 'weights', weights)
            with tf.name_scope('biases'):
                biases = tf.Variable(tf.zeros(outputs_size) + 0.1)
                tf.summary.histogram(layer_name + 'biases', biases)
            wx_b = tf.matmul(inputs, weights) + biases
            if activation_function is None:
                outputs = wx_b
            else:
                outputs = activation_function(wx_b)
            return weights, biases, outputs

    def loss(self, pred, targets, regularizer=True):
            loss = tf.reduce_mean(-targets*tf.log(pred) - (1-targets)*tf.log(1-pred))
            if regularizer is True:
                loss_total = loss + tf.add_n(tf.get_collection('losses'))
            else:
                loss_total = loss
            tf.summary.scalar('loss', loss_total)
            return loss_total

    def train_step(self, learning_rate, loss):
        train = tf.train.AdamOptimizer(learning_rate).minimize(loss)
        return train


我这里并没有用Ftrl,而是继续用的Adam,我们这里的scalar是训练的loss,而不是最后输出的loss,这里我用一个限定条件来决定loss是否加入l1正则化的损失,因为我们训练集是要加入正则化,而验证集则不希望加入。

import l1_regularization_dnn
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt


def roc(pred, targets, biases):
    if len(pred) != len(targets):
        raise Exception('预测长度与目标长度不等')
    else:
        TP = 0
        TN = 0
        FP = 0
        FN = 0
        for i in range(len(pred)):
            if pred[i] > biases and targets[i] == 1:
                TP += 1
            elif pred[i] > biases and targets[i] == 0:
                FP += 1
            elif pred[i] < biases and targets[i] == 1:
                FN += 1
            else:
                TN += 1
    accuracy = (TP+TN)/(TP+TN+FP+FN)
    TPR = TP/(TP+FN)
    FPR = FP/(FP+TN)
    return accuracy, TPR, FPR


dnn = l1_regularization_dnn._dnn()
features = np.load('california_one_hot.npy')
targets = np.load('california_targets.npy')
train_features = features[0:12000, :]
validation_features = features[12000:, :]
train_targets = targets[0:12000, :]
validation_targets = targets[12000:, :]

xs, ys = dnn.my_input_fn(train_features, train_targets, batch_size=100, num_epochs=500)
w1, b1, layer1 = dnn.add_layers('1', xs, 170, 3, activation_function=None)
w2, b2, layer2 = dnn.add_layers('2', layer1, 3, 1, activation_function=tf.nn.sigmoid)
loss = dnn.loss(layer2, ys)
train = dnn.train_step(0.005, loss)

sess = tf.Session()
init = tf.global_variables_initializer()

sess.run(init)
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('logs/', sess.graph)

for i in range(2000):
    sess.run(train)
    if i % 10 == 0:
        l1_output = tf.matmul(validation_features, w1) + b1
        pred = tf.nn.sigmoid(tf.matmul(l1_output, w2) + b2)
        mean = tf.reduce_mean(pred - validation_targets)
        loss = dnn.loss(pred, validation_targets, regularizer=False)
        run = sess.run([mean, loss])
        print('steps:', i, 'mean:', run[0], 'loss:', run[1])
        result = sess.run(merged)
        writer.add_summary(result, i)


l1_output = tf.matmul(validation_features, w1) + b1
predition = sess.run(tf.nn.sigmoid(tf.matmul(l1_output, w2) + b2)).copy()

fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.ion()
plt.show()
ax.set_xlabel('FPR')
ax.set_ylabel('TPR')
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)


for i in np.arange(0., 1., 0.001):
    accuracy, tpr, fpr = roc(predition, validation_targets, i)
    print('accuracy:', accuracy, 'biases:', i)
    ax.scatter(fpr, tpr)
    plt.pause(0.1)


pred_tensor = tf.convert_to_tensor(predition)
targets_tensor = tf.convert_to_tensor(validation_targets)
auc_value, auc_op = tf.metrics.auc(targets_tensor, pred_tensor, num_thresholds=1000)
sess.run(tf.local_variables_initializer())
sess.run(auc_op)
print('AUC:', sess.run(auc_value))

这里我们输入特征170,输出如果为4 weights的数量就会变为170*4 超过了600,因为条件600的限制,我们这里用170*3+3+3*1+1 = 517的参数,符合600的条件(如果l1正则化将部分weights变为0,我们的模型还会更小)。

这里我训练了2k次(实际500左右就够了),batch_size按照以前提过的一般在10-1000之间,原文batch_size用的100,这里我们也用100,这里的roc曲线我依旧是自己编的,貌似tf自带,以后我可能直接用自带的写法了。

在sess.run(auc_op)之前sess.run(auc_value)会导致auc_value为0出现问题。

在用tf.metrics.auc(targets_tensor,pred_tensor , num_thresholds=)的时候,记得把局部变量初始化,否则会报错。tf.convert_to_tensor是个很有用的函数。可以帮助我们把tensor,list,numpy array,python 列表和python scalar,转化为tensor,至于scalar都包括什么我们可以用np.ScalarType来显示。

for i in np.ScalarType:
    print(i)
    
<class 'int'>
<class 'float'>
<class 'complex'>
<class 'int'>
<class 'bool'>
<class 'bytes'>
<class 'str'>
<class 'memoryview'>
<class 'numpy.bool_'>
<class 'numpy.int8'>
<class 'numpy.uint8'>
<class 'numpy.int16'>
<class 'numpy.uint16'>
<class 'numpy.int32'>
<class 'numpy.uint32'>
<class 'numpy.int64'>
<class 'numpy.uint64'>
<class 'numpy.int64'>
<class 'numpy.uint64'>
<class 'numpy.float16'>
<class 'numpy.float32'>
<class 'numpy.float64'>
<class 'numpy.float128'>
<class 'numpy.complex64'>
<class 'numpy.complex128'>
<class 'numpy.complex256'>
<class 'numpy.object_'>
<class 'numpy.bytes_'>
<class 'numpy.str_'>
<class 'numpy.void'>
<class 'numpy.datetime64'>
<class 'numpy.timedelta64'>

当神经网络训练完成后,会开始画图,最后结果类似这样的:

当然,这个图没有太大意义(biases选择问题),如果要是感觉不想保留的话,删除掉这段代码就好。

对我们来说主要还是最后的AUC值,这个值说明了我们模型的好坏,运行以上程序,得到的AUC应该是在0.9左右,效果还算可以。比之前的分类编程练习要好的多得多。至此我们算是完成了第一个分类(sigmoid只适用与2分类)神经网络。

原文的全连接神经网络还剩下4个章节,我会尽快分4天写完。同时谷歌还给出了cnn的课程。

我会翻译内容,并附上代码解析。至于cnn之后,更多的课程还暂未推出,不过我们会介绍其他内容,当然也是和tensorflow有关。

猜你喜欢

转载自blog.csdn.net/Canon__/article/details/82914316