TensorFlow实战房价预测

import pandas as pd 
import numpy as np 



'''
##############################################################################################
########################                   分析数据                   ########################
##############################################################################################
from mpl_toolkits import mplot3d

# import pandas as pd 
import matplotlib.pyplot as plt 

df1 = pd.read_csv('data1.csv', names=['square', 'bedrooms', 'price'])


# 数据归一化
def normalize_feature(df):
    return df.apply(lambda column: (column - column.mean()) / column.std())

df = normalize_feature(df1)

fig = plt.figure()  # 先通过plt创建一张空的图片
ax = plt.axes(projection='3d') # 告诉plt我们将要创建一个3d对象
                               # 这一步会建立一个三维坐标系,但到目前为止这个图依旧是空的
ax.set_xlabel('square')
ax.set_ylabel('bedrooms')
ax.set_zlabel('price')
# scatter3D的前3个数据分别填充x,y,和z轴的数据
# 参数c是为了给不同的点赋予不同的颜色深度,这里颜色深度具体为多少,我们设置其根据price \
# 的大小来设定:price越大,颜色就越深
# cmap指定要用什么颜色去绘制散点
ax.scatter3D(df['square'], df['bedrooms'], df['price'], c=df['price'], cmap='Reds')
plt.show()
'''



##############################################################################################
########################                   处理数据                   ########################
##############################################################################################
# 这一步的目的是为了得到能够输入到模型中的标准的数据格式

def normalize_feature(df):
    return df.apply(lambda column: (column - column.mean()) / column.std())


df = normalize_feature(pd.read_csv('data1.csv', names=['square', 'bedrooms', 'price']))

# 创建一个n行1列全为1的数据框
# len(df)返回df的长度(在这里为行数)
# 这里是以字典的形式创建的,其中 ones 是对应的标题,value才是真正的值
ones = pd.DataFrame({'ones': np.ones(len(df))})

# 将全1数据合并到已有的数据框里
df = pd.concat([ones, df], axis=1) # 这里是根据列合并
# 如何根据axis判断是在哪一维合并:左手准则。大拇指指向自己,\
# axis = 0, 1, 2分别对应x轴,y轴和z轴

# 将DataFrame中的数据表示成矩阵的形式
X_data = np.array(df[df.columns[0:3]]) # 注意区间是左闭右开的
y_data = np.array(df[df.columns[-1]]).reshape(len(df), 1)

# print(X_data.shape, type(X_data))
# print(y_data.shape, type(y_data))
'''
输出:
(47, 3) <class 'numpy.ndarray'>
(47, 1) <class 'numpy.ndarray'>
'''




##############################################################################################
########################                   创建模型                   ########################
##############################################################################################
import tensorflow as tf 

alpha = 0.01  # 设置学习率
epoch = 500   # 设置总共要训练的轮数

#-------------- 创建一个线性回归模型 --------------#
# 首先定义模型中要用到的一些数据
X = tf.placeholder(tf.float32, X_data.shape)   # 输入X,形状为[47, 3]
y = tf.placeholder(tf.float32, y_data.shape)   # 输入Y,形状为[47, 1]
# 权重变量W,形状为[3, 1]
W = tf.get_variable('weights',(X_data.shape[1], 1), initializer=tf.constant_initializer())

# 假设模型的表达式为:h(x) = w0 * x0 + w1 * x1 + w2 * x2,其中x0恒为1,用来计算偏置项
# 将该表达式用矩阵乘法来表示
y_pred = tf.matmul(X, W) # 得到的y_pred的形状为[47, 1]

#------------- 接下来要考虑损失函数和梯度下降策略 -------------#
# 这里损失函数用L2损失:
# 注意tf.matmul(a, b, transpose_a=True)表示矩阵a的转置乘以矩阵b,在这里即为 [1, 47] x [47, 1]
# 因此最终loss_op得到的是一个数
loss_op = 1 / (2 * len(X_data)) * tf.matmul((y_pred - y), (y_pred - y), transpose_a=True)
# 这里采用随机梯度下降优化器,设置学习率为alpha
opt = tf.train.GradientDescentOptimizer(learning_rate=alpha)

# 定义每一步的训练目标:最小化loss函数
train_op = opt.minimize(loss_op)




##############################################################################################
########################                     训练                     ########################
##############################################################################################
# 创建会话
with tf.Session() as sess:
    # 初始化全局变量
    sess.run(tf.global_variables_initializer())
    # 开始训练模型
    # 这里因为数据集比较小(只有47个样本),所以每次都用全部的数据进行训练
    for e in range(1, epoch + 1):
        sess.run(train_op, feed_dict={X: X_data, y: y_data})
        if e % 10 == 0:
            loss, w = sess.run([loss_op, W], feed_dict={X: X_data, y: y_data})
            log_str = "Epoch %d \t Loss=%.4g \t Model: y = %.4gx1 + %.4gx2 + %.4g"
            print(log_str % (e, loss, w[1], w[2], w[0]))

输出结果为:

Epoch 10         Loss=0.4116     Model: y = 0.0791x1 + 0.03948x2 + 3.353e-10
Epoch 20         Loss=0.353      Model: y = 0.1489x1 + 0.07135x2 + -5.588e-11
Epoch 30         Loss=0.3087     Model: y = 0.2107x1 + 0.09676x2 + 3.912e-10
Epoch 40         Loss=0.2748     Model: y = 0.2655x1 + 0.1167x2 + -1.863e-11
Epoch 50         Loss=0.2489     Model: y = 0.3142x1 + 0.1321x2 + 1.77e-10
Epoch 60         Loss=0.2288     Model: y = 0.3576x1 + 0.1436x2 + -4.47e-10
Epoch 70         Loss=0.2131     Model: y = 0.3965x1 + 0.1519x2 + -8.103e-10
Epoch 80         Loss=0.2007     Model: y = 0.4313x1 + 0.1574x2 + -6.985e-10
Epoch 90         Loss=0.1908     Model: y = 0.4626x1 + 0.1607x2 + -4.936e-10
Epoch 100        Loss=0.1828     Model: y = 0.4909x1 + 0.1621x2 + -6.147e-10
Epoch 110        Loss=0.1763     Model: y = 0.5165x1 + 0.162x2 + -7.87e-10
Epoch 120        Loss=0.1709     Model: y = 0.5397x1 + 0.1606x2 + -5.821e-10
Epoch 130        Loss=0.1664     Model: y = 0.5609x1 + 0.1581x2 + -9.08e-10
Epoch 140        Loss=0.1625     Model: y = 0.5802x1 + 0.1549x2 + -9.965e-10
Epoch 150        Loss=0.1592     Model: y = 0.5979x1 + 0.1509x2 + -9.756e-10
Epoch 160        Loss=0.1564     Model: y = 0.6142x1 + 0.1465x2 + -4.144e-10
Epoch 170        Loss=0.1539     Model: y = 0.6292x1 + 0.1416x2 + -1.001e-10
Epoch 180        Loss=0.1518     Model: y = 0.643x1 + 0.1364x2 + -3.236e-10
Epoch 190        Loss=0.1498     Model: y = 0.6559x1 + 0.131x2 + -6.286e-11
Epoch 200        Loss=0.1481     Model: y = 0.6678x1 + 0.1255x2 + 2.119e-10
Epoch 210        Loss=0.1466     Model: y = 0.6789x1 + 0.1199x2 + -1.956e-10
Epoch 220        Loss=0.1452     Model: y = 0.6892x1 + 0.1142x2 + -1.758e-10
Epoch 230        Loss=0.1439     Model: y = 0.6989x1 + 0.1085x2 + -4.307e-11
Epoch 240        Loss=0.1428     Model: y = 0.708x1 + 0.1029x2 + 3.376e-10
Epoch 250        Loss=0.1418     Model: y = 0.7165x1 + 0.09736x2 + 2.841e-10
Epoch 260        Loss=0.1408     Model: y = 0.7245x1 + 0.09189x2 + 3.295e-10
Epoch 270        Loss=0.14       Model: y = 0.732x1 + 0.08653x2 + -8.033e-11
Epoch 280        Loss=0.1392     Model: y = 0.7391x1 + 0.08128x2 + 1.141e-10
Epoch 290        Loss=0.1385     Model: y = 0.7458x1 + 0.07616x2 + 1.321e-10
Epoch 300        Loss=0.1378     Model: y = 0.7522x1 + 0.07118x2 + 5.087e-10
Epoch 310        Loss=0.1372     Model: y = 0.7582x1 + 0.06634x2 + 7.398e-10
Epoch 320        Loss=0.1367     Model: y = 0.7639x1 + 0.06165x2 + 6.845e-10
Epoch 330        Loss=0.1362     Model: y = 0.7693x1 + 0.0571x2 + 8.423e-10
Epoch 340        Loss=0.1357     Model: y = 0.7744x1 + 0.0527x2 + 9.252e-10
Epoch 350        Loss=0.1353     Model: y = 0.7793x1 + 0.04845x2 + 1.104e-09
Epoch 360        Loss=0.1349     Model: y = 0.784x1 + 0.04435x2 + 1.145e-09
Epoch 370        Loss=0.1346     Model: y = 0.7884x1 + 0.0404x2 + 1.631e-09
Epoch 380        Loss=0.1343     Model: y = 0.7926x1 + 0.03658x2 + 1.446e-09
Epoch 390        Loss=0.134      Model: y = 0.7966x1 + 0.03291x2 + 1.429e-09
Epoch 400        Loss=0.1337     Model: y = 0.8004x1 + 0.02938x2 + 1.694e-09
Epoch 410        Loss=0.1334     Model: y = 0.8041x1 + 0.02598x2 + 1.697e-09
Epoch 420        Loss=0.1332     Model: y = 0.8076x1 + 0.02271x2 + 2.125e-09
Epoch 430        Loss=0.133      Model: y = 0.8109x1 + 0.01957x2 + 2.292e-09
Epoch 440        Loss=0.1328     Model: y = 0.8141x1 + 0.01655x2 + 2.913e-09
Epoch 450        Loss=0.1326     Model: y = 0.8171x1 + 0.01366x2 + 3.412e-09
Epoch 460        Loss=0.1325     Model: y = 0.82x1 + 0.01087x2 + 3.749e-09
Epoch 470        Loss=0.1323     Model: y = 0.8228x1 + 0.008204x2 + 3.499e-09
Epoch 480        Loss=0.1322     Model: y = 0.8254x1 + 0.005641x2 + 3.663e-09
Epoch 490        Loss=0.1321     Model: y = 0.828x1 + 0.003183x2 + 4.2e-09
Epoch 500        Loss=0.132      Model: y = 0.8304x1 + 0.0008239x2 + 4.138e-09

猜你喜欢

转载自www.cnblogs.com/tbgatgb/p/11442136.html