[Tensorflow] sample excitation function tf.nn.relu

 

Code:
Import tensorflow TF AS
 Import numpy AS NP
 # ## defined neural network layer functions added START ###

DEF add_layer (Inputs, in_size, out_size, activation_function = None): 
     "" " Description: Add neural network layer functions.
    : Param inputs: an input neural layer
    : Number of neurons in the input layer of neural: param in_size
    : Number of neurons in the output layer neural: param out_size
    : Param activation_function: activation function
    """
    
    # Define a "in_size row, out_size column" random matrix variables 
    Weights = tf.Variable (tf.random_normal ([in_size, out_size]))

    # Define a "row 1, out_size column" 0 reference variable value matrix 
    biases = tf.Variable (tf.zeros ([1, out_size]) + 0.1 )

    # Define a matrix multiplication function formula 
    Wx_plus_b = tf.matmul (Inputs, Weights) + biases

    # Determines whether the excitation function 
    IF activation_function IS None:
        outputs=Wx_plus_b
    else:
        Outputs = activation_function (Wx_plus_b)
     return Outputs
 # ## defined neural network layer functions added END ###


# ## define a variable structure START ###

# Define the start input: Returns an array of 300 rows 300 of evenly spaced intervals specified -1 to 1, and then converted into an array of 300 rows of the matrix 
#    example: 
#    X1 = np.array ( [. 1, 2,. 3,. 4,. 5]) 
#    # The Shape of X1 IS (. 5,) 
#    x1_new = X1 [:, np.newaxis] 
#    # now, The Shape of x1_new IS (. 5,. 1) 
#    Array ([[. 1], 
#           [2], 
#           [. 3], 
#           [. 4], 
#           [. 5]]) 
#    x1_new = X1 [np.newaxis ,:] 
#    # now, The Shape of x1_new IS (. 1, . 5) 
#    Array ([[. 1, 2,. 3,. 4,. 5]]) 

x_data = np.linspace (-1,1,300 ) [:, np.newaxis]

# Define Noise: using a Gaussian distribution of probability density function is defined a mean of 0 and standard deviation for the Gaussian random number 0.05, the number is the number of matrix elements x_data 
Noise np.random.normal = (0, 0.05 , x_data.shape)

# Define the start output: x_data subtracting the square of 0.5, plus noise 
y_data = np.square (x_data) -0.5+ Noise


# Define variable runtime parameters 
XS = tf.placeholder (tf.float32, [None,. 1 ])
ys=tf.placeholder(tf.float32,[None,1])

# ## is defined neural network architecture START ###

# Define hidden layer neural network layer Layer01 
Layer01 add_layer = (XS, 1,10, activation_function = tf.nn.relu)
 # define hidden layer neural network layer Layer02 
Layer02 = add_layer (layer01,10,10, activation_function = tf.nn. Sigmoid)
 # define the output layer prediction prediction 
prediction = add_layer (layer02,10,1, activation_function = None)
 # calculated loss 
# 1 calculates a deviation of the predicted output and the output of the start square 
loss_square = tf.square (y_data - prediction)
 # 2. Double each dimension computing a sum amount of elemental 
reduce_sum_square = tf.reduce_sum (loss_square, reduction_indices = [. 1 ])
 # 3. calculated loss: on each dimension element tensor average 
loss = tf.reduce_mean (reduce_sum_square)

# Using a gradient descent algorithm to train all samples 
train_step = tf.train.GradientDescentOptimizer (0.1 ) .minimize (Loss)
 # define a variable initialization 
the init = tf.initialize_all_variables ()
 # Create Session 
sess = tf.Session ()
 # run the initialization variable pointer 
sess .run (init)

# ## is defined neural network END ###

# ## define the variables of Structural END ###

for i in range(2000):
    sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
    if i%50==0:
        print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))

 

Output:
> Executing task: python d:\Work\002_WorkSpace\VSCode\Tensorflow\cnn.py <

WARNING:tensorflow:From C:\Program Files\Python\Python37\lib\site-packages\tensorflow\python\framework
\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version. Instructions for updating: Colocations handled automatically by placer. WARNING:tensorflow:From C:\Program Files\Python\Python37\lib\site-packages\tensorflow\python\ops
\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.cast instead. WARNING:tensorflow:From C:\Program Files\Python\Python37\lib\site-packages\tensorflow\python\util
\tf_should_use.py:193: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02. Instructions for updating: Use `tf.global_variables_initializer` instead. 2019-06-16 18:23:25.445771: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports
instructions that this TensorFlow binary was not compiled to use: AVX2
0.9150444 0.018474927 0.012227052 0.008430008 0.006330067 0.005174632 0.0045147026 0.004099458 0.0037936615 0.0035521714 0.0033668855 0.003235288 0.0031796119 0.003798308 0.011472862 0.011122204 0.0038715526 0.0029777498 0.00284954 0.0028072707 0.0027813027 0.0027617016 0.0027467846 0.0027342557 0.0027231644 0.0027126905 0.0027037202 0.0026956936 0.0026887206 0.0026827992 0.0026773391 0.0026706234 0.0026643125 0.0026575066 0.0026512532 0.00264405 0.0026368005 0.0026302505 0.0026243015 0.0026188325 Terminal will be reused by tasks, press any key to close it.

 

Guess you like

Origin www.cnblogs.com/Areas/p/11032577.html