Tensor flow小案例——02多变量线性回归

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

#源数据

#从1到10,分为100等分
data_x = np.linspace(-10, 10, 200)

#噪音,使数据更加真实,均值为0,标准差为7
noise = np.random.normal(0, 7, data_x.shape)

#定义函数,y = 3(x^2) - 10x - 20
data_y = np.square(data_x) * 3 + data_x * -10 - 20 + noise


#定义变量

#X,Y作为输入值
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)

#W_1,W_2为权重,相当于斜率,B为偏移量,初值全为0
W_1 = tf.Variable(tf.zeros([1]))
W_2 = tf.Variable(tf.zeros([1]))
B = tf.Variable(tf.zeros([1]))


#Y的预测值,Y = W_1 * X^2 + W_2 * X + B
Y_prediction = tf.add(tf.add(tf.multiply(tf.square(X), W_1), tf.multiply(X, W_2)), B)

#定义损失函数
Loss = tf.reduce_sum(tf.square(Y - Y_prediction))

#定义学习率,注意一定不要太大了,否则不会收敛
rateLearning = 0.0000001

#定义训练方法,使得Loss最小化(这里是梯度下降,还有其他更好的训练方法,以后介绍)
Train = tf.train.GradientDescentOptimizer(rateLearning).minimize(Loss)


sess = tf.Session()

#初始化所有变量
init = tf.global_variables_initializer()
sess.run(init)

#训练100000次
for i in range(100000):
    sess.run(Train, feed_dict={X: data_x, Y: data_y})

#得到w_1, w_2, b
w_1, w_2, b = sess.run([W_1, W_2, B])


sess.close()

#画出源数据的散点图
plt.scatter(data_x, data_y, color = 'b')
#画出预测直线
plt.plot(data_x, np.square(data_x) * w_1 + data_x * w_2 + b, color = 'r')

plt.show()

猜你喜欢

转载自blog.csdn.net/iv__vi/article/details/82890252