笔记 - tensorflow小案例:线性回归(批量梯度下降策略)

import tensorflow as tf
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler

# TODO tensorflow 线性回归问题求解 批量梯度下降策略

"""
思路:

导包
导数据集
定义梯度下降相关超参
处理梯度下降算法需要的数据格式

X
y
theta
y_pred
error
gradients
rmse 观察更新效果
更新theta的op

主体程序逻辑:开始计算图
"""

# 迭代轮次
n_epochs = 36500
# 学习率
learning_rate = 0.001

housing = fetch_california_housing(data_home='C:/Users/Mcdonald/Documents/workplace/Pycharm/scikit_learn_data',
                                   download_if_missing=True)
train_data = housing.data
m, n = train_data.shape
scaler = StandardScaler().fit(housing.data)
scaled_train_data = scaler.transform(train_data)
""" 添加x0列 """
scaled_train_data_with_bias = np.c_[np.ones((m, 1)), scaled_train_data]

X = tf.constant(scaled_train_data_with_bias, dtype=tf.float32, name='X')
y = tf.constant(housing.target.reshape((-1, 1)), dtype=tf.float32, name='y')

theta = tf.Variable(tf.random_uniform([n+1, 1], -1, 1), name='theta')
y_pred = tf.matmul(X, theta, name='y_pred')
error = y_pred - y
rmse = tf.sqrt(tf.reduce_mean(tf.square(error), name='rmse'))
""" 手算线性回归梯度公式,并编程实现 """
gradients = 1/m * tf.matmul(tf.transpose(X), error)  # 神奇的bug  如果不除以m的话会怎样
""" 更新theta参数 """
training_op = tf.assign(theta, theta - learning_rate * gradients)

init = tf.global_variables_initializer()

""" main程序 """

with tf.Session() as sess:
    init.run()
    for i in range(n_epochs):
        if i % 100 == 0:
            print("Epoch ", i, " RMSE: ", rmse.eval())

        sess.run(training_op)

    best_theta = theta.eval()
    print(best_theta)

猜你喜欢

转载自blog.csdn.net/chen_holy/article/details/89880082