线性回归模型
by - YH
Date - 20/11/2019
-
从今天开始我的机器学习之路,以后会不定期的更新,敬请大家期待!
本文主要运用TensorFlow实现了机器学习中的线性回归模型,这里是在Jupyter Notebook上面实现的。
%matplotlib inline
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
np.random.seed(28) #如果使用相同的num,那么每一次产生的随机数都相同
N=100
train_x=np.linspace(0,6,N) + np.random.normal(loc=0.0,scale=2,size=N)
train_y=14*train_x-7+np.random.normal(loc=0.0,scale=5.0,size=N)
plt.scatter(train_x,train_y)
plt.show()
def print_info(r_w, r_b, r_loss):
print('w={},b={},loss={}'.format(r_w, r_b, r_loss))
开启会话,运行计算图,训练模型
#创建计算图
with tf.Graph().as_default():
with tf.name_scope('Input'):
#s输入占位符
X=tf.placeholder(tf.float32,name='X')
Y_true=tf.placeholder(tf.float32,name='Y_true')
with tf.name_scope('Inference'):
#模型参数变量
w=tf.Variable(tf.zeros([1]),name='weight')
b=tf.Variable(tf.zeros([1]),name='bias')
y_pred=tf.add(tf.multiply(X,w),b) #y=wx+b
with tf.name_scope('Loss'):
#损失函数
TrainLoss=tf.reduce_mean(tf.square(Y_true-y_pred))
with tf.name_scope('Train'):
#建立一个优化器
Optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05)
TrainOp=Optimizer.minimize(TrainLoss)
# 添加所有variable类型的变量的初始化节点
init_op = tf.global_variables_initializer()
# 保存计算图
writer = tf.summary.FileWriter(logdir='logs_linear_regression', graph=tf.get_default_graph())
writer.close()
# 构建会话,运行
print('开启会话,运行计算图,训练模型')
with tf.Session() as sess:
sess.run(init_op)
for step in range(N):
_, train_w, train_b, train_loss = sess.run([TrainOp, w, b, TrainLoss], feed_dict={X: train_x, Y_true: train_y})
print_info(train_w, train_b, train_loss)
plt.scatter(train_x,train_y)
plt.plot(train_x,r_w*train_x+r_b,label='Fitted Line')
plt.legend()
plt.show()
w=[13.829246],b=[-6.4065833],loss=29.9353084564209