Tensorflow - Nonlinear Regression

Code:

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt


#Use numpy to generate two hundred random points, ranging from -0.5 to 0.5, uniformly distributed
#np.newaxis: Insert a new dimension
x_data = e.g. linspace (-0.5, 0.5, 200) [:, e.g.newaxis]
#generate some distractors
noise = np.random.normal(0, 0.2, x_data.shape)
y_data = np.square(x_data) + noise



#Define two placeholders
#[None, 1]: The number of rows is uncertain, the number of columns is only 1
x = tf.placeholder(tf.float32, [None, 1])
y = tf.placeholder(tf.float32, [None, 1])

#define the middle layer of the neural network
#1 neuron in the input layer, 10 neurons in the middle layer
Weights_L1 = tf.Variable(tf.random_normal([1, 10]))
biases_L1 = tf.Variable(tf.zeros([1, 10]))
Wx_plus_b_L1 = tf.matmul(x, Weights_L1) + biases_L1
#Hyperbolic tangent function as activation function
L1 = tf.nn.tanh(Wx_plus_b_L1)

#define output layer
#10 neurons in the middle layer and 1 neuron in the output layer
Weights_L2 = tf.Variable(tf.random_normal([10, 1]))
biases_L2 = tf.Variable(tf.zeros([1,1]))
Wx_plus_b_L2 = tf.matmul(L1, Weights_L2) + biases_L2
prediction = tf.nn.tanh(Wx_plus_b_L2)


#Secondary cost function
loss = tf.reduce_mean(tf.square(y-prediction))
#train with gradient descent
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)



with tf.Session() as sess:
    #variable initialization
    sess.run(tf.global_variables_initializer())
    for _ in range(2001):
        sess.run(train_step, feed_dict={x:x_data, y:y_data})

        
    # get the predicted value
    prediction_value = sess.run(prediction, feed_dict={x:x_data})
    #paint
    plt.figure()
    plt.scatter(x_data, y_data)
    plt.plot(x_data, prediction_value, 'r-', lw=5)
    plt.show()

operation result:


Guess you like

Origin http://43.154.161.224:23101/article/api/json?id=325829068&siteId=291194637