Convolution model by吴恩达

  1 # GRADED FUNCTION: model
  2 
  3 def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.009,
  4           num_epochs = 100, minibatch_size = 64, print_cost = True):
  5     """
  6     Implements a three-layer ConvNet in Tensorflow:
  7     CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
  8     
  9     Arguments:
 10     X_train -- training set, of shape (None, 64, 64, 3)
 11     Y_train -- test set, of shape (None, n_y = 6)
 12     X_test -- training set, of shape (None, 64, 64, 3)
 13     Y_test -- test set, of shape (None, n_y = 6)
 14     learning_rate -- learning rate of the optimization
 15     num_epochs -- number of epochs of the optimization loop
 16     minibatch_size -- size of a minibatch
 17     print_cost -- True to print the cost every 100 epochs
 18     
 19     Returns:
 20     train_accuracy -- real number, accuracy on the train set (X_train)
 21     test_accuracy -- real number, testing accuracy on the test set (X_test)
 22     parameters -- parameters learnt by the model. They can then be used to predict.
 23     """
 24     
 25     ops.reset_default_graph()                         # to be able to rerun the model without overwriting tf variables
 26     tf.set_random_seed(1)                             # to keep results consistent (tensorflow seed)
 27     seed = 3                                          # to keep results consistent (numpy seed)
 28     (m, n_H0, n_W0, n_C0) = X_train.shape             
 29     n_y = Y_train.shape[1]                            
 30     costs = []                                        # To keep track of the cost
 31     
 32     # Create Placeholders of the correct shape
 33     ### START CODE HERE ### (1 line)
 34     X, Y = create_placeholders(n_H0, n_W0, n_C0, n_y)
 35     ### END CODE HERE ###
 36 
 37     # Initialize parameters
 38     ### START CODE HERE ### (1 line)
 39     parameters = initialize_parameters()       #初始化filter
 40     ### END CODE HERE ###
 41     
 42     # Forward propagation: Build the forward propagation in the tensorflow graph
 43     ### START CODE HERE ### (1 line)
 44     Z3 = forward_propagation(X, parameters)
 45     ### END CODE HERE ###
 46     
 47     # Cost function: Add cost function to tensorflow graph
 48     ### START CODE HERE ### (1 line)
 49     cost = compute_cost(Z3, Y) # softmax -> average cost
 50     ### END CODE HERE ###
 51     
 52     # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer that minimizes the cost.
 53     ### START CODE HERE ### (1 line)
 54     optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
 55     ### END CODE HERE ###
 56     
 57     # Initialize all the variables globally
 58     init = tf.global_variables_initializer()
 59      
 60     # Start the session to compute the tensorflow graph
 61     with tf.Session() as sess:
 62         
 63         # Run the initialization
 64         sess.run(init)
 65         
 66         # Do the training loop
 67         for epoch in range(num_epochs):
 68 
 69             minibatch_cost = 0.
 70             num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
 71             seed = seed + 1
 72             minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
 73 
 74             for minibatch in minibatches:
 75 
 76                 # Select a minibatch
 77                 (minibatch_X, minibatch_Y) = minibatch
 78                 # IMPORTANT: The line that runs the graph on a minibatch.
 79                 # Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y).
 80                 ### START CODE HERE ### (1 line)
 81                 _ , temp_cost = sess.run([optimizer,cost], feed_dict={X: minibatch_X,Y: minibatch_Y})
 82                 ### END CODE HERE ###
 83                 
 84                 minibatch_cost += temp_cost / num_minibatches
 85                 
 86 
 87             # Print the cost every epoch
 88             if print_cost == True and epoch % 5 == 0:
 89                 print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
 90             if print_cost == True and epoch % 1 == 0:
 91                 costs.append(minibatch_cost)
 92         
 93         
 94         # plot the cost
 95         plt.plot(np.squeeze(costs))
 96         plt.ylabel('cost')
 97         plt.xlabel('iterations (per tens)')
 98         plt.title("Learning rate =" + str(learning_rate))
 99         plt.show()
100 
101         # Calculate the correct predictions
102         predict_op = tf.argmax(Z3, 1)
103         correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))
104         
105         # Calculate accuracy on the test set
106         accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
107         print(accuracy)
108         train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
109         test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
110         print("Train Accuracy:", train_accuracy)
111         print("Test Accuracy:", test_accuracy)
112                 
113         return train_accuracy, test_accuracy, parameters

流程:placeholder for X and Y,    parameters(weight of conv layer) initialization,  forward_prop(X, parameters),  compute_cost(Z, Y), backprop optimizer, global_variables.initializer, run init and optimizer, print the cost every epoch, caculate the crorrect predictions, caculate accuracy on the test set.

初始化:只需要初始conv layer的 weight,不用初始conv layer的 bias,不用初始fully connecyed layer的  weight 和 bias 

完整版参见:https://www.cnblogs.com/CZiFan/p/9481110.html  

猜你喜欢

转载自www.cnblogs.com/flipped415/p/11312936.html