Convolutional neural network identification data

Code:

Import tensorflow TF AS
 from tensorflow.examples.tutorials.mnist Import Input_Data
 Import OS 

os.environ [ ' TF_CPP_MIN_LOG_LEVEL ' ] = ' 2 ' 

# define a weighting function to initialize 
DEF weight_varibles (Shape): 
    W = tf.Variable (tf.random_normal (= Shape Shape, Mean = 0.0, STDDEV = 1.0 ))
     return W 

# define an offset initialization function 
DEF bias_varibles (Shape): 
    B = tf.Variable (tf.constant (0.0, Shape = Shape))
     return B 

DEF Model ():
     "" "
    Since the convolutional model definition 
    : return: 
    "" " 
    # placeholder x [None, 784] 1. Preparation data y_true [None, 10] 
    with tf.variable_scope ( " Data " ): 
        X = tf.placeholder (TF .float32, [None, 784 ]) 
        y_true = tf.placeholder (tf.int32, [None, 10 ]) 

    # 2. convolution a layer of 5,5,1,32 a convolution activated cell activation tf.nn. relu pooled 
    with tf.variable_scope ( " CONV1 " ):
         # randomly initialized weights 
        w_conv1 weight_varibles = ([5,5,1,32 ]) 

        # randomly generated offset 
        b_conv1 bias_varibles = ([32 ]) 

        #A shape change of x [None, 784] is changed to thinking array [None, 28,28,1] 
        x_reshape tf.reshape = (x, [- 1,28,28,1 ]) 

        x_relu = tf.nn.relu (tf.nn.conv2d (x_reshape, w_conv1, Strides = [1,1,1,1], padding = " SAME " ) + b_conv1) 

        # pooling 2 * 2 strides2 [None, 28,28,32 ] - > [None, 14,14,32] 
        x_pool1 = tf.nn.max_pool (x_relu, ksize = [1,2,2,1], Strides = [1,2,2,1], padding = " SAME " ) 

    # 3 Layer convolution 
    with tf.variable_scope ( " CONV2 " ):
         # randomly initialized weight weight [5,5,32,64] offset [64] 
        w_conv2 weight_varibles = ([5,5,32,64 ])
        b_conv2 = bias_varibles ([64 ]) 

        # convolution activation and pooled 
        # convolution of [None, 14,14,32] -----> [None, 14,14,64] 
        x_relu2 = tf.nn .relu (tf.nn.conv2d (x_pool1, w_conv2, Strides = [1,1,1,1], padding = " SAME " ) + b_conv2) 

        # pooling 2 * 2 strides 2, [None , 14,14, 64] ---> [None, 7,7,64] 
        x_pool2 = tf.nn.max_pool (x_relu2, ksize = [1,2,2,1], Strides = [1,2,2,1], padding = " SAME " ) 

    # . 4 full connection layer [None, 7,7,64] ---> [ None, 7 * 7 * 64] * [7 * 7 * 64,10] + [10] = [None, 10] 
    with tf.variable_scope ( "" ):
         # randomly initialized weights and bias 
        w_fc weight_varibles = ([. 7. 7 * * 64, 10 ])
        b_fc = bias_varibles ([10 ]) 

        # modify the shape [None, 7,7,64] ---> [None,. 7. 7 * * 64] 
        x_fc_reshape = tf.reshape (x_pool2, [- l, 7. 7 * 64 * ]) 

        # performs matrix calculation results obtained for each sample 10 
        y_predict = tf.matmul (x_fc_reshape, w_fc) + b_fc 

    return X, y_true, y_predict 

DEF cunv_fc ():
     # acquiring real data 
    MNIST = input_data.read_data_sets ( " . / MNIST / Input_Data / " , one_hot = True)
     # define the model derived output 
    X, y_true, y_predict = model () 

    # obtains loss for all samples averaged
    tf.compat.v1.variable_scope with ( " soft_cross " ):
         # calculating an average cross entropy loss 
        Loss = tf.reduce_mean (tf.nn.softmax_cross_entropy_with_logits (= y_true Labels, logits = y_predict)) 

    # gradient descent loss determined 
    with tf. compat.v1.variable_scope ( " Optimizer " ): 
        train_op = tf.compat.v1.train.GradientDescentOptimizer (0.1 ) .minimize (Loss) 

    # calculate accuracy 
    with tf.compat.v1.variable_scope ( " the ACC " ): 
        equal_list = tf.equal (tf.argmax (y_true,. 1), tf.argmax (y_predict,. 1 ))
         # conversion type sample and averaging 
        accuracy = tf.reduce_mean (tf.cast (equal_list, tf.float32)) 

    #   define a variable initialized OP 
    init_op = tf.global_variables_initializer () 

    # open session running 
    with tf.compat.v1.Session () Sess AS:
         # initialize variables 
        sess.run (init_op) 

        # the number of iterations of training to update the parameter prediction 
        for I in Range (1000 ):
             # extracted real eigenvalues and target 
            mnist_x, mnist_y = mnist.train.next_batch (50 ) 

            # run train_op training 
            sess.run (train_op, feed_dict = {X: mnist_x, y_true: mnist_y}) 


            Print ( " training for% d accuracy rate F% " % (i, sess.run(accuracy, feed_dict={x: mnist_x, y_true: mnist_y})))


if __name__ == '__main__':
    cunv_fc()

 

Guess you like

Origin www.cnblogs.com/wen-kang/p/11127772.html