[2-2] neural network simple cases

 

. 1  Import tensorflow TF AS
 2  Import numpy AS NP
 . 3  
. 4  # placeholder 
. 5  # When the input data to give the placeholder node, the node will be described with placeholder TensorFlow waiting for input, to specify the type just; 
6  # then execution node when using a dictionary to "feed" these nodes. Corresponds to the first variable hold live, then each incoming data from the outside; 
7  # attention and feed_dict placeholder is used for binding. 
8  # available to feed data, parameters as run () calls; 
9  # feed is only valid method call it, the end of the method, feed will disappear. 
10  
. 11  # doped layer 
12 is  DEF add_layer (Inputs, in_size, out_size, activation_function = None): # inputs and the number of input and output nodes, the activation function 
13      #add one more layer and return the output of this layer
14     Weights = tf.Variable(tf.random_normal([in_size,out_size]))  #如果不加[]会报错,里边表示shape
15     biases = tf.Variable(tf.zeros([1,out_size]) + 0.1)           #同样zeros([1,out_size])
16     Wx_plus_b = tf.matmul(inputs,Weights) + biases
17     if activation_function is None:
18         outputs = Wx_plus_b
19     else:
20         outputs = activation_function(Wx_plus_b)
21     return outputs
22 
23 #1. Training data 
24 x_data = np.linspace (-1,1,300 ) [:, np.newaxis]     
 25 Noise np.random.normal = (0, 0.05 , x_data.shape)    
 26 is y_data = np.square (x_data) - 0.5 + Noise
 27  
28  # 2. defined node is ready to receive 
29 XS = tf.placeholder (tf.float32, [None,. 1])          # Shape = [None,. 1] 
30 YS = tf.placeholder (tf.float32, [None,. 1 ])
 31 is  
32  # 3. neural layer defined: the hidden layer and output layer 
33  # input value xs, 10 hidden layer neurons 
34 is L1 = add_layer (xs, 1,10, activation_function = tf.nn .relu)
 35  # input value is a hidden layer l1, an output layer predicted value
36 prediction = add_layer(l1,10,1,activation_function=None)
37 
38 #4.定义loss表达式
39 #the error between prediction and data
40 loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),
41                       reduction_indices=[1]))
42 
43 #5.让loss值最小,学习率为0.1
44 train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
45 
46 init = tf.initialize_all_variables()
47 
48 with tf.Session() as sess:
49     sess.run(init)
50     for i in range(1000):
51         sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
52         if i % 50 ==0:
53             print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
54 >>>
55 2.13953
56 0.0208985
57 0.00737973
58 0.00562527
59 0.00518565
60 0.00500644
61 0.00489249
62 0.00478653
63 0.00469118
64 0.00460924
65 0.00453592
66 0.00447866
67 0.00443341
68 0.00438404
69 0.00430098
70 0.0042153
71 0.00414793
72 0.00409648
73 0.00405469
74 0.00401441

When using TensorFlow with Python:
tf.reduce_sum function reduction_indices parameter indicates the dimension of the processing function.
The default value of the parameter when reduction_indices to None, all of the default data summation, i.e. the result is one-dimensional.
reduction_indices when the parameter is 0, the first dimension corresponding to the position 0 are added.
When reduction_indices parameter is 1, the first dimension corresponding to position 1 are added.

 https://blog.csdn.net/u014772246/article/details/84973358

 1 a = np.array([[1,2,3],[4,5,6]])
 2 b = tf.reduce_sum(a,reduction_indices=[0])
 3 c = tf.reduce_sum(a,reduction_indices=[1])
 4 d = tf.reduce_sum(a)
 5 
 6 with tf.Session() as sess:
 7     b_result,c_result,d_result = sess.run([b,c,d])
 8     print(a)
 9     print("a的形状:",a.shape)
10     print("reduction_indices值为0时:")
11     print(b_result)
12     print("reduction_indices值为1时:")
13     print(c_result)
14     print("reduction_indices值为默认None时:")
15     print(d_result)
16 >>>
17 [[1 2 3]
18  [4 5 6]]
19 a的形状: (2, 3)
20 reduction_indices值为0时:
21 [5 7 9]
22 reduction_indices值为1时:
23 [ 6 15]
24 reduction_indices值为默认None时:
25 21

 2019-05-30 14:23:58

Guess you like

Origin www.cnblogs.com/direwolf22/p/10948524.html