直线拟合:y=w*x+b
"""
回归:直线拟合
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
x_data=np.random.rand(100)
y_data=x_data*5+1
W=tf.Variable(0.)
b=tf.Variable(0.)
y_pred=W*x_data+b
x=tf.placeholder(shape=None,dtype=tf.float32)
loss=tf.reduce_mean(tf.square(y_data-y_pred))
optimizer=tf.train.GradientDescentOptimizer(0.3).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(100):
sess.run(optimizer)
if i %10==0:
W1,b1=sess.run([W,b])
print('step={},W={},b={}'.format(i,W1,b1))
prediction=sess.run(y_pred,feed_dict={x:x_data})
plt.scatter(x_data,y_data)
plt.plot(x_data,prediction)
plt.show()
二,二次拟合 :y=w*x*x+b ,一层hidden layer,10个节点,一个output一个节点
"""
回归:二次拟合
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#生成两百个随机点
x_data=np.linspace(-1,1,200).reshape([-1,1]).astype(np.float32)
noise=np.random.normal(loc=0.,scale=0.02,size=x_data.shape)
y_data=np.square(x_data)+noise
x=tf.placeholder(shape=[None,1],dtype=tf.float32)
y=tf.placeholder(shape=[None,1],dtype=tf.float32)
#hidden_layer
W1=tf.Variable(tf.random_normal(shape=[1,10],stddev=tf.sqrt(2.)),dtype=tf.float32)
b1=tf.Variable(tf.zeros(shape=[1,10]),dtype=tf.float32)
#output_layer
W2=tf.Variable(tf.random_normal(shape=[10,1],stddev=tf.sqrt(2./10)),dtype=tf.float32)
b2=tf.Variable(tf.zeros(shape=[1,1]),dtype=tf.float32)
Z1=tf.matmul(x_data,W1)+b1
A1=tf.nn.relu(Z1)
y_pred=tf.matmul(A1,W2)+b2
# y_pred=tf.nn.relu(Z2)
loss=tf.reduce_mean(tf.reduce_sum(tf.square(y-y_pred),axis=1))
optimizer=tf.train.GradientDescentOptimizer(0.2).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
costs=[]
for i in range(2000):
cost,_=sess.run([loss,optimizer],feed_dict={x:x_data,y:y_data})
if i%100==0:
costs.append(cost)
prediction = sess.run(y_pred, feed_dict={x: x_data})
plt.scatter(x_data, y_data)
plt.plot(x_data,prediction)
plt.show()
plt.plot(costs)
plt.show()
打印结果: