TensorFlow的非线性回归实战

版权声明:本博客都是作者10多年工作总结 https://blog.csdn.net/Peter_Changyb/article/details/84344140
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
 
 
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'  # 只显示 warning 和 Error
 
x_data = np.linspace(0, 1, 200)[:, np.newaxis]  # np.newaxis表示在第二维度增加一维
noise = np.random.normal(0, 0.01, x_data.shape)  #高斯分布, 0 表示均值, 0.1 表示标准差 最后是shape
y_data = noise - np.square(x_data)
 
# 构建神经网络
# 1  10  1
# 输入层1个神经元
# 中间隐层10个神经元
# 输出层1个神经元
 
# 定义两个占位符
x = tf.placeholder(tf.float32, [None, 1], name='x')
y = tf.placeholder(tf.float32, [None, 1], name='y')
 
#
# 定义权值矩阵, 输入层与隐层之间的权值矩阵
input_2_hidden_w = tf.Variable(tf.truncated_normal([1,10]))
hidden_bias = tf.Variable(tf.zeros([1,10]))
hidden_out = tf.matmul(x,input_2_hidden_w) + hidden_bias
activation_hidden_out = tf.nn.tanh(hidden_out)
 
# 定义权值矩阵, 隐层与输出层之间的权值矩阵
hidden_2_output_w = tf.Variable(tf.truncated_normal([10,1]))
out_bias = tf.Variable(tf.zeros([1,1]))
out_pure = tf.matmul(activation_hidden_out, hidden_2_output_w) + out_bias
prediction = tf.nn.tanh(out_pure)  # 双曲正切
 
# 定义loss
loss = tf.reduce_mean(tf.square(y - prediction))
# 利用随机梯度下降优化器优化loss
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
 
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for _ in range(10000):
        sess.run(train_step,feed_dict={x:x_data, y:y_data})
 
    prediction = sess.run(prediction,feed_dict={x:x_data})
    plt.figure()  # 建立画布
    plt.scatter(x_data,y_data)  # 利用真值数据画散点图
    plt.plot(x_data, prediction, 'r-', lw=2)  # 利用预测数据画曲线图,实线,线宽为2
    plt.show()  # 显示所画的
--------------------- 

 

猜你喜欢

转载自blog.csdn.net/Peter_Changyb/article/details/84344140
今日推荐