TF0005、实现非线性回归

训练一个输入层为1,隐藏层为30,输入层为1,的神经网络,拟合  y =x^{2}

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# numpy生成300个随机点
np.random.seed(1)
#x_data = np.random.rand(300)[:,np.newaxis]-0.5
x_data = np.linspace(-1,1,300)[:,np.newaxis]
#打印前10个
print(x_data[:10])
#添加噪声
noise = np.random.normal(0,0.05,x_data.shape)
#y = x**2
y_data = np.square(x_data) + noise

plt.scatter(x_data, y_data)
plt.show()

输出:

# 定义两个placeholder,任意行一列
x = tf.placeholder(tf.float32,[None,1])
y = tf.placeholder(tf.float32,[None,1])

# 构造神经网络结构:1-30-1
#1.输入层到隐层
w1 = tf.Variable(tf.random_normal([1,30]))
b1 = tf.Variable(tf.zeros([30]))
#w1*x
wx_plus_b_1 = tf.matmul(x,w1) + b1
#设置激活函数,双曲正切函数
l1 = tf.nn.tanh(wx_plus_b_1)

#2.隐层到输出层
w2 = tf.Variable(tf.random_normal([30,1]))
b2 = tf.Variable(tf.zeros([1]))
wx_plus_b_2 = tf.matmul(l1,w2) + b2
#设置激活函数,双曲正切函数
prediction = tf.nn.tanh(wx_plus_b_2)

# 二次代价函数
loss = tf.losses.mean_squared_error(y,prediction)
#设置学习率
lr = 0.1
#定义梯度下降优化器
optimizer = tf.train.GradientDescentOptimizer(lr)
# 最小化loss
train = optimizer.minimize(loss)

#初始化变量
init = tf.global_variables_initializer()
with tf.Session() as sess:
    # 变量初始化
    sess.run(init)
    for _ in range(3000):
        sess.run(train,feed_dict={x:x_data,y:y_data})
        
    # 获得预测值
    prediction_value = sess.run(prediction,feed_dict={x:x_data})
    # 画图
    plt.scatter(x_data, y_data)
    plt.plot(x_data, prediction_value, 'r-')
    plt.show()

输出:

发布了23 篇原创文章 · 获赞 1 · 访问量 3354

猜你喜欢

转载自blog.csdn.net/ABCDABCD321123/article/details/104310022