tensorflow构造线性回归模型

# -*- coding: utf-8 -*-
"""
Created on Sun Jul 22 10:39:10 2018

@author: Administrator
"""

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt

#随机生成1000个点,围绕在y=0.1x + 0.3的直线周围
num_points = 1000
vectors_set = []

for i in range(num_points):
    x1 = np.random.normal(0.0, 0.55)
    y1 = x1*0.1 +0.3 + np.random.normal(0.0, 0.03)
    vectors_set.append([x1,y1])
    
#生成一些样本

x_data = [v[0] for v in vectors_set]
y_data = [v[1] for v in vectors_set]

plt.scatter(x_data,y_data,c='r')
plt.show()

#生成1维的W矩阵,取值是【-1,1】之间的随机数
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name = 'W')
#生成1维的b矩阵,初始值为0
b = tf.Variable(tf.zeros([1]), name='b')
#经过计算得出预估值y
y = W*x_data +b

#以预估值y和实际值y_data之间的均方误差作为损失
loss = tf.reduce_mean(tf.square(y - y_data), name='loss')
#初始化梯度下降优化器
optimizer = tf.train.GradientDescentOptimizer(0.5)
#用梯度下降的方法最小化loss值,就完成了训练的过程了
train = optimizer.minimize(loss,name='train')

sess = tf.Session()

init = tf.global_variables_initializer()
sess.run(init)

#先来打印一下初始化的W和b都是多少
print("W=",sess.run(W), "b=", sess.run(b), "loss=", sess.run(loss))
# 执行20次训练

for step in range(20):
    sess.run(train)  # train就是我们刚才定义好的,怎么去求解这个模型
    print ("W=", sess.run(W), "b=", sess.run(b), "loss=", sess.run(loss))
    
    

  

猜你喜欢

转载自blog.csdn.net/qq_41858768/article/details/81152889