自己用tensorflow实现了linear模型,但是和sklearn提供的模型效果相比,实验结果差了很多,所以尝试了修改优化算法,正则化,损失函数和归一化,记录尝试的所有过程和自己的实验心得。
import numpy as np
import tensorflow as tf
import sklearn
import pandas as pd
class Model:
def __init__(self, sess, feature_size, step, learning_rate, regulation):
self.sess = sess
self.feature_size = feature_size
self.step = step
self.learning_rate = learning_rate
self.regulation = regulation
self.build_model()
self.add_loss()
self.add_optimizer()
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
def build_model(self):
self.x = tf.placeholder(shape=[None, self.feature_size], dtype=tf.float32)
self.y_true = tf.placeholder(shape=[None, 1], dtype=tf.float32)
with tf.name_scope('linear_model'):
l2_reg = tf.contrib.layers.l2_regularizer(0.1)
self.w = tf.get_variable(name='w', shape=[self.feature_size, 1],
initializer=tf.truncated_normal_initializer(), regularizer=l2_reg)
self.b = tf.get_variable(name='b', shape=[1],
initializer=tf.truncated_normal_initializer(stddev=1, seed=1))
self.y_pred = tf.matmul(self.x, self.w) + self.b
def add_loss(self, loss='l2'):
reg_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if loss == 'l2':
self.loss = tf.reduce_mean(tf.square(self.y_true - self.y_pred))
if loss == 'l1':
self.loss = tf.reduce_mean(tf.abs(self.y_true - self.y_pred))
if loss == 'huber':
delta = tf.constant(0.25) # delta越大两边线性部分越陡峭,损失越大
self.loss = tf.multiply(tf.square(delta), tf.sqrt(1. + tf.square((self.y_true - self.y_pred) / delta)) - 1.)
self.loss += tf.add_n(reg_variables)
def add_optimizer(self):
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_step = self.optimizer.minimize(self.loss)
def predict(self, x_test):
self.pred_test = tf.matmul(tf.cast(x_test, tf.float32), self.w) + self.b
pred_test = self.sess.run([self.pred_test])
return pred_test
def train(self, train_data, train_label):
loss, y_pred = self.sess.run([self.loss, self.y_pred], feed_dict={self.x: train_data, self.y_true: train_label})
return loss, y_pred
if __name__ == '__main__':
feature_size, step, learning_rate, regulation = 100, 1000, 0.0001, 'L2'
sample_size = 30
x = [list(np.random.rand(feature_size)) for _ in range(sample_size)]
y = [np.random.rand(1) for _ in range(sample_size)]
x = pd.DataFrame(x).apply(lambda x: (x - np.mean(x)) / (np.max(x) - np.min(x))).values
with tf.Session() as sess:
model = Model(sess, feature_size, step, learning_rate, regulation)
_, _ = model.train(x, y)
# print('loss is ', loss)
pred_test = model.predict(x)
# print('pred label\tture label')
# for each in zip(pred, y):
# print(round(each[0][0], 6), '\t', round(each[1][0], 6))
loss = sum([(each[0][0] - each[1][0]) ** 2 for each in zip(pred_test, y)])
print('LR net loss ', loss)
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(x, y)
pred_test1 = reg.predict(x)
loss = sum([(each[0][0] - each[1][0]) ** 2 for each in zip(pred_test1, y)])
print('sklearn loss ', loss)
import matplotlib.pyplot as plt
fig = plt.figure()
x = [i for i in range(len(x))]
plt.plot(x, y, 'k*-', markersize=12)
plt.plot(x, [each[0] for each in pred_test[0]], 'r.-', markersize=12)
plt.plot(x, [each[0] for each in pred_test1], 'b.-', markersize=12)
plt.legend(('true', 'my', 'Linear Fit'), loc='lower right')
plt.title('regression compare')
plt.show()
以下是实验记录:
权重的初始化很重要,初始化成为为0,结果全为0,初始化阶段的正太分布,结果全为负数
GradientDescentOptimizer
LR net loss [74.816734]
sklearn loss 4.22780141391886e-30MomentumOptimizer
LR net loss [1.5802944]
sklearn loss 2.1308488904700377e-30
如果数据是稀疏的,用以下四种算法,同时保证可以更快的退出鞍点,用Adam算法,加入动量。
https://segmentfault.com/a/1190000012668819
AdagradOptimizer 对稀疏数据很友好
LR net loss [19.184008]
sklearn loss 1.1571757477856268e-29RMSPropOptimizer
LR net loss [1.3790985]
sklearn loss 5.1738182026018704e-30AdadeltaOptimizer 结果会非常不稳定
LR net loss [16.51035]
sklearn loss 7.90786835165399e-30AdamOptimizer
LR net loss [0.98462635]
sklearn loss 5.571330143123396e-30AdamOptimizer + 加入L2正则化项
LR net loss [0.0768552]
sklearn loss 4.7639803104362666e-30AdamOptimizer + 加入L1正则化项
LR net loss [5.0768552]
sklearn loss 4.7639803104362666e-30AdamOptimizer + 加入L2正则化项 + huber 损失(之前默认L2损失)
LR net loss [0.58679754]
sklearn loss 3.3163743270370446e-29AdamOptimizer + 加入L2正则化项 + L2 损失 + 归一化
LR net loss [0.989549]
sklearn loss 1.8846380063795735e-29AdamOptimizer + 加入L2正则化项 + L2 损失 + 归一化 + 修改infer的方式
LR net loss [1.4737219]
sklearn loss 8.079969451484434e-29