import numpy as np from keras.datasets import mnist from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense from keras.layers.recurrent import SimpleRNN from keras.optimizers import Adam
# Data length - a row of 28 pixels input_size = 28 # sequence length - a total of 28 rows time_steps = 28 # number of hidden layer cell cell_size = 50 # load data (x_train, y_train), (x_test, android.permission.FACTOR.) = MNIST. load_data () # (60000,28,28) x_train = x_train / 255.0 x_test = x_test / 255.0 # transducer one hot format y_train = np_utils.to_categorical (y_train, num_classes = 10 ) android.permission.FACTOR. = np_utils.to_categorical (android.permission.FACTOR., num_classes = 10) # One Hot # create a model model = Sequential () # recurrent neural network model.add (SimpleRNN ( Units Cell_size =, # output input_shape = (time_steps, input_size), # input )) # output layer model.add (the Dense (10, Activation = ' SoftMax ' )) # define optimizer ADAM = Adam (= 1E-LR. 4 ) # defined optimizer, loss function, the calculation accuracy of the training process model.compile (= ADAM optimizer, Loss = ' categorical_crossentropy ' , metrics = [ ' accuracy ' ]) # training model model.fit (x_train, y_train, batch_size = 64, = 10 epochs ) # assessment model Loss, Accuracy = model.evaluate (x_test, android.permission.FACTOR.) print('test loss',loss) print('test accuracy',accuracy)