TensorFlow non-linear regression - based on neural network algorithm

Import tensorflow TF AS
 Import numpy AS NP
 Import matplotlib.pyplot AS PLT 

"" " 
1. 
Shape: * 2 matrix dimension. 3 
=================== 
2. 
[None, 1]: N row 1 
=================== 
3. 
numpy.random.normal (loc = 0.0, Scale = 1.0, size = None) 
normal 
loc : float 
    mean of the probability distribution of this (corresponding to the center of the entire distribution centre) 
Scale: float 
    standard deviation of the distribution of this probability (corresponding to the width of the distribution, scale chunky greater the smaller Scale, the tall, thin) 
size: int or INTS of tuple 
    Shape output, the default is None, only outputs a value 

"" " 
XDATA = np.linspace (-0.5, 0.5, 200 is) [:, np.newaxis]   # increasing dimension behind 
noise = np.random.normal ( 0, 0.02, xdata.shape)   #Plus noise, but to ensure consistent dimensions and xdata 
YDATA = np.square (xdata) + Noise   # Y = X ^ 2 + Noise 
# YDATA = np.exp (xdata) + Noise 

# define two placeholder 
X = tf.placeholder (tf.float32, [None 1])   # N row, a 
Y = tf.placeholder (tf.float32, [None 1])   # N rows, one, depending on the sample (x) is defined 
"" " 
input layer: X, the input is one point, then you need a neuron line 
intermediate layer: custom 
output layer: Y, the output is one of the points, with a neuron 
"" " 

# define the neural network intermediate layer 

weight = tf.Variable (tf.random_normal ([1, 10]))   # weights connecting the input layer and the intermediate layer, the output of the random values from a normal distribution, a (1 input) line 10 (the input to the intermediate layer ) column, weights put on line 
biases = tf.Variable (tf.zeros ([. 1, 10]))   # offset value is initialized to 0, an input from, the intermediate layer 10 neurons
= tf.matmul wx_plus_b_l1 (X, weight) + biases   # signal sum * X + biases weight 
L1 = tf.nn.tanh (wx_plus_b_l1)   # activation function, s-shaped 

# define the output layer 
"" " 
Output L1 of the intermediate layer: output input layer 
"" " 

of weight1 = tf.Variable (tf.random_normal ([10,. 1]))   # intermediate layer to the y (output),. 1 -> 10 
biases1 = tf.Variable (tf.zeros ([. 1 , 1]))   # offset value, 1 -> 1 
wx_plus_b_l2 = tf.matmul (L1, of weight1) + biases1   # output layer signal sum 
predict = tf.nn.tanh (wx_plus_b_l2)   # forecast results 

# secondary consideration function 
Loss = tf.reduce_mean (tf.square (Y - Predict))   # error of the mean 

# gradient descent
= tf.train.GradientDescentOptimizer train_step (0.1 ) .minimize (Loss) 

#
 with tf.Session () AS SES:
     # draw variable 
    ses.run (tf.global_variables_initializer ())
     for _ in the Range (5001 ): 
        ses.run ( train_step, feed_dict = {X: XDATA, Y: YDATA})   # using a gradient descent method of training, x: sample point 

        # obtain a prediction value 
    predict_value = ses.run (predict, feed_dict = {X: XDATA, Y: YDATA})
         # Paint 
    plt.figure () 
    plt.scatter (XDATA, YDATA) 
    plt.plot (XDATA, predict_value, ' R- ' , LW =. 5)   #Red solid line, the line width = width. 5 Line 
    plt.show ()

 

 

Guess you like

Origin www.cnblogs.com/clement-chiu/p/11406915.html