笔记 - tensorflow小案例:线性回归(小批量随机梯度下降+优化器的使用)

"""
线性回归 小批量梯度下降

sklearn数据集
标准归一化
划分训练数据集与测试数据集
tensorflow
numpy
"""

import tensorflow as tf
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split

housing = fetch_california_housing(data_home='C:/Users/Mcdonald/Documents/workplace/Pycharm/scikit_learn_data',
                                   download_if_missing=True)
data = housing.data[:1000]
m, n = data.shape
target = housing.target[:1000]
scaler = StandardScaler().fit(data)
scaled_data = scaler.transform(data)
scaled_data_with_bias = np.c_[np.ones((m, 1)), scaled_data]
X_train, X_test, y_train, y_test = train_test_split(scaled_data_with_bias, target, random_state=0)

# 超参
learning_rate = 0.001
n_epochs = 100
batch_size = 200

# 参数
"""编程实现损失函数"""
theta = tf.Variable(tf.random_uniform([n+1, 1], -1, 1), name='theta')
X = tf.placeholder(dtype=tf.float32, name='X')
y = tf.placeholder(dtype=tf.float32, name='y')
y_pred = tf.matmul(X, theta)
error = y_pred - y
mse = 0.5 * tf.reduce_mean(tf.square(error))

"""使用优化器"""
training_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(mse)

init = tf.global_variables_initializer()

"""开始训练"""
with tf.Session() as sess:
    init.run()

    n_batch = int(len(X_train)/batch_size)

    for epoch in range(n_epochs):
        if epoch % 10 == 0:
            print("Epoch ", epoch)
            print("Train MSE =  ", sess.run(mse, feed_dict={
                X: X_train,
                y: y_train
            }))

            print("Test MSE = ", sess.run(mse, feed_dict={
                X: X_test,
                y: y_test
            }))

        """每一轮,打乱数据集"""
        arr = np.arange(len(X_train))
        np.random.shuffle(arr)
        X_train = X_train[arr]
        y_train = y_train[arr]

        for i in range(n_batch):
            sess.run(training_op, feed_dict={
                X: X_train[i*batch_size: i*batch_size+batch_size],
                y: y_train[i*batch_size: i*batch_size+batch_size]
            })
    best_theta = theta.eval()
    print(best_theta)

# 总结
"""
归一化的对象需要保存下来
进行预测的时候,需要先将输入数据进行同样的归一化,再输入模型进行预测
"""

猜你喜欢

转载自blog.csdn.net/chen_holy/article/details/89968413