[2-1] nonlinear regression

. 1  Import tensorflow TF AS
 2  Import numpy AS NP
 . 3  Import matplotlib.pyplot AS PLT
 . 4  
. 5  # use numpy generated 200 random points 
. 6 x_data = np.linspace (-0.5,0.5,200 ) [:, np.newaxis]    
 . 7 Noise np.random.normal = (0, 0.02 , x_data.shape)
 . 8 y_data = np.square (x_data) + Noise
 . 9  
10  # define two placeholder 
. 11 X = tf.placeholder (tf.float32, [None,. 1 ])
 12 is Y = tf.placeholder (tf.float32, [None,. 1 ])
 13 is  
14  # input layer neurons node 1, the intermediate layer 10 neurons nodes, an output layer neurons nodes
15  # define intermediate layer neural network 
16 Weights_L1 = tf.Variable (tf.random_normal ([1,10 ]))
 . 17 biases_L1 = tf.Variable (tf.zeros ([1,10 ]))
 18 is Wx_plus_b_L1 tf.matmul = ( X, Weights_L1) + biases_L1
 . 19 Ll = tf.nn.tanh (Wx_plus_b_L1)
 20 is  
21 is  # define the neural network output layer 
22 is Weight_L2 = tf.Variable (tf.random_normal ([10,1 ]))
 23 is biases_L2 tf.Variable = ( tf.zeros ([1,1 ]))
 24 Wx_plus_b_L2 = tf.matmul (Ll, Weight_L2) + biases_L2
 25 Prediction = tf.nn.tanh (Wx_plus_b_L2)
 26 is  
27  #Quadratic cost function 
28 Loss = tf.reduce_mean (tf.square (Y- Prediction))
 29  # using a gradient descent method to train the network 
30 train_step = tf.train.GradientDescentOptimizer (0.1 ) .minimize (Loss)
 31 is  
32  with tf.Session () AS Sess:
 33 is      # variable initialization 
34 is      sess.run (tf.global_variables_initializer ())
 35      for _ in Range (2000 ):
 36          sess.run (train_step, feed_dict = {X: x_data, Y: y_data})
 37 [      # Print (sess.run (Weights_L1)) 
38 is      # obtain predicted value 
39     prediction_value = sess.run(prediction,feed_dict={x:x_data})
40     #画图
41     plt.figure()
42     plt.scatter(x_data,y_data)
43     plt.plot(x_data,prediction_value,'r-',lw=5)
44     plt.show()

2019-05-30 10:58:12

Guess you like

Origin www.cnblogs.com/direwolf22/p/10948367.html