Convolution neural network CNN (Tensorflow 2.1)

It metaphysical, without modifying the parameters, the accuracy of the test on the plurality changed from 99.1% to 98% of

Reference link: " simple and crude Tensorflow ", bluster

tensorflow Import AS TF 
Import numpy AS NP 

class MNISTLoader (): 
    DEF the __init __ (Self): 
        MNIST = tf.keras.datasets.mnist 
        (self.train_data, self.train_label), (self.test_data, self.test_label) = MNIST. load_data () 
        # MNIST the image default uint8 ( 0 -255 number). The following code thereof to 0- normalized floating point number between 1 and added at the end as a color-dimensional channel 
        self.train_data = np.expand_dims (self.train_data.astype (np.float32) / 255.0 , Axis = - 1 ) # [ 60000 , 28 , 28 , 1 ]
        self.test_data = np.expand_dims (self.test_data.astype (np.float32) / 255.0 , Axis = - . 1 ) # [ 10000 , 28 , 28 , . 1 ] 
        self.train_label = self.train_label.astype (np.int32) # [ 60000 ] 
        self.test_label = self.test_label.astype (np.int32) # [ 10000 ] 
        self.num_train_data, self.num_test_data = self.train_data.shape [ 0 ], self.test_data.shape [ 0 ] 

    DEF get_batch ( Self, batch_size): 
        # concentrated batch_size elements taken randomly from the data and returns the 
        index= np.random.randint(0, np.shape(self.train_data)[0], batch_size)
        return self.train_data[index, :], self.train_label[index]


# tf.keras.layers.Conv2D(
#     filters, kernel_size, strides=(1, 1), padding='valid', data_format=None,
#     dilation_rate=(1, 1), activation=None, use_bias=True,
#     kernel_initializer='glorot_uniform', bias_initializer='zeros',
Kernel_regularizer # = None, bias_regularizer = None, activity_regularizer = None, 
# kernel_constraint = None, bias_constraint = None, ** kwargs 
#) 
# Filters: Integer, The dimensionality of Space The Output (Output IE The Number of Filters in The Convolution). 
# kernel_size: Integer receptive fields or tuple An / List of 2 integers, Specifying the height and width of 2D convolution the window. 
# strides: vertical and horizontal stride 
# padding: valid: not represent the convolution kernel sized block is discarded; It represents a convolution kernel the size of the same is not enough to make up the block 0, the output and input of the same shape 

class CNN (tf.keras.Model): 
    DEF the __init __ (Self): 
        Super () __ .__ the init () 
        self.conv1 =tf.keras.layers.Conv2D ( 
            Filters = 32 , # neuron convolution (convolution) Number 
            kernel_size = [ . 5 , . 5 ], # receptive field size 
            padding = ' Same ' , # padding policy (or vaild Same) 
            activation = tf.nn.relu # activation function 
        ) 
        self.pool = tf.keras.layers.MaxPool2D (pool_size = [ 2 , 2 ], Strides = 2 ) 
        self.conv2 = tf.keras.layers.Conv2D ( 
            Filters = 64 , 
            kernel_size = [5, 5],
            padding='same',
            activation=tf.nn.relu
        )
        self.flatten = tf.keras.layers.Reshape(target_shape=(7 * 7 * 64,))
        self.dense1 = tf.keras.layers.Dense(units=1024, activation=tf.nn.relu)
        self.dense2 = tf.keras.layers.Dense(units=10)

    def call(self, inputs):
        x = self.conv1(inputs)                  # [batch_size, 28, 28, 32]
        x = self.pool(x)                        # [batch_size, 14, 14, 32]
        x = self.conv2(x)                       # [batch_size, 14, 14, 64]
        x = self.pool(x)                        # [batch_size, 7, 7, 64]
        x = self.flatten(x)                     # [batch_size, 7 * 7 * 64]
        x = self.dense1(x)                      # [batch_size, 1024]
        x = self.dense2(x)                      # [batch_size, 10]
        output = tf.nn.softmax(x)
        return output

num_epochs = 5
batch_size = 50
learning_rate = 0.001

model = CNN()
data_loader = MNISTLoader()
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)

num_batches = int(data_loader.num_train_data //* num_epochs the batch_size) 
for batch_index in Range (num_batches): 
    X-, Y = data_loader.get_batch (the batch_size) # randomly selected portion of the data set from the data 
    with tf.GradientTape () AS Tape: 
        y_pred = Model (X-) to give a prediction value # 
        loss tf.keras.losses.sparse_categorical_crossentropy = (Y = y_true, y_pred = y_pred) # calculates Loss 
        Loss = tf.reduce_mean (Loss) 
        Print ( " BATCH% D: Loss% F " % (batch_index, loss.numpy ())) 
    GrADS = = tape.gradient (Loss, model.variables) #calc GrADS
    optimizer.apply_gradients (grads_and_vars ZIP (GrADS, model.variables)) #update GrADS 

# tf.keras.metrics.SparseCategoricalAccuracy an estimator 
# do not understand why the check batch prediction value 
sparse_categorical_accuracy = tf.keras.metrics.SparseCategoricalAccuracy () 
num_batches = int (data_loader.num_test_data // the batch_size) 
for batch_index in Range (num_batches): 
    start_index, end_index = batch_index the batch_size *, (+ batch_index . 1 ) * the batch_size 
    # model.predict test data input, and outputs a prediction result 
    y_pred = model.predict (data_loader.test_data [start_index: end_index]) 
    sparse_categorical_accuracy.update_state (y_true = data_loader. test_label [start_index: end_index], y_pred =y_pred)
print("test accuracy: %f" % sparse_categorical_accuracy.result())

 

Guess you like

Origin www.cnblogs.com/lalalatianlalu/p/12499618.html