使用TF1.12建立CNN-Fashion MNIST Dataset

版权声明:本文为博主原创文章,转载请加上原文地址,谢谢! https://blog.csdn.net/Dr_Guo/article/details/89375089
import tensorflow as tf
tf.enable_eager_execution()

from tensorflow import keras

import numpy as np
import matplotlib.pyplot as plt
import os
import subprocess

print(tf.__version__)
1.12.0
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()

# scale the values to 0.0 to 1.0
train_images = train_images / 255.0
test_images = test_images / 255.0

# reshape for feeding into the model
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1)
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1)

class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
               'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']

print('train_images.shape: {}, of {}'.format(train_images.shape, train_images.dtype))
print('test_images.shape: {}, of {}'.format(test_images.shape, test_images.dtype))
train_images.shape: (60000, 28, 28, 1), of float64
test_images.shape: (10000, 28, 28, 1), of float64
model = keras.Sequential([
  keras.layers.Conv2D(input_shape=(28,28,1), filters=8, kernel_size=3, 
                      strides=2, activation='relu', name='Conv1'),
  keras.layers.Flatten(),
  keras.layers.Dense(10, activation=tf.nn.softmax, name='Softmax')
])
model.summary()

epochs = 5

model.compile(optimizer=tf.train.AdamOptimizer(), 
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
Conv1 (Conv2D)               (None, 13, 13, 8)         80        
_________________________________________________________________
flatten (Flatten)            (None, 1352)              0         
_________________________________________________________________
Softmax (Dense)              (None, 10)                13530     
=================================================================
Total params: 13,610
Trainable params: 13,610
Non-trainable params: 0
_________________________________________________________________
model.fit(train_images, tf.to_int64(train_labels), epochs=epochs)

test_loss, test_acc = model.evaluate(test_images, tf.to_int64(test_labels))
print('\nTest accuracy: {}'.format(test_acc))
Epoch 1/5
1875/1875 [==============================] - 16s 9ms/step - loss: 0.5203 - acc: 0.8177
Epoch 2/5
1875/1875 [==============================] - 14s 7ms/step - loss: 0.3685 - acc: 0.8704
Epoch 3/5
1875/1875 [==============================] - 14s 7ms/step - loss: 0.3336 - acc: 0.8810
Epoch 4/5
1875/1875 [==============================] - 14s 7ms/step - loss: 0.3120 - acc: 0.8891
Epoch 5/5
1875/1875 [==============================] - 14s 7ms/step - loss: 0.2983 - acc: 0.8940
313/313 [==============================] - 1s 3ms/step

Test accuracy: 0.8828
model2 = keras.Sequential([
    keras.layers.Conv2D(32, (3,3), padding='same', activation=tf.nn.relu,
                           input_shape=(28, 28, 1)),
    # strides=2 步长为2
    keras.layers.MaxPooling2D((2, 2), strides=2),
    keras.layers.Conv2D(64, (3,3), padding='same', activation=tf.nn.relu),
    keras.layers.MaxPooling2D((2, 2), strides=2),
    keras.layers.Flatten(),
    keras.layers.Dense(128, activation=tf.nn.relu),
    keras.layers.Dense(10,  activation=tf.nn.softmax)
])
model2.summary()

epochs = 5

model2.compile(optimizer=tf.train.AdamOptimizer(), 
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
model2.fit(train_images, tf.to_int64(train_labels), epochs=epochs)

test_loss, test_acc = model2.evaluate(test_images, tf.to_int64(test_labels))
print('\nTest accuracy: {}'.format(test_acc))
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 28, 28, 32)        320       
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 14, 14, 32)        0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 14, 14, 64)        18496     
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 7, 7, 64)          0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 3136)              0         
_________________________________________________________________
dense (Dense)                (None, 128)               401536    
_________________________________________________________________
dense_1 (Dense)              (None, 10)                1290      
=================================================================
Total params: 421,642
Trainable params: 421,642
Non-trainable params: 0
_________________________________________________________________
Epoch 1/5
1875/1875 [==============================] - 21s 11ms/step - loss: 0.4057 - acc: 0.8557
Epoch 2/5
1875/1875 [==============================] - 20s 11ms/step - loss: 0.2649 - acc: 0.9037
Epoch 3/5
1875/1875 [==============================] - 20s 11ms/step - loss: 0.2201 - acc: 0.9189
Epoch 4/5
1875/1875 [==============================] - 21s 11ms/step - loss: 0.1889 - acc: 0.9297
Epoch 5/5
1875/1875 [==============================] - 20s 11ms/step - loss: 0.1586 - acc: 0.9412
313/313 [==============================] - 1s 3ms/step

Test accuracy: 0.9189
import matplotlib.pyplot as plt
def show(idx, title):
  plt.figure()
  plt.imshow(test_images[idx].reshape(28,28))
  plt.axis('off')
  plt.title('\n\n{}'.format(title), fontdict={'size': 16})

import random
rando = random.randint(0,len(test_images)-1)
show(rando, 'An Example Image: {}'.format(class_names[test_labels[rando]]))

png

import matplotlib.pyplot as plt

plt.figure(figsize=(10, 10))
for i in range(10):
    plt.subplot(5, 5, i+1)
    # plt.axis('off')
    plt.xticks([])
    plt.yticks([])
    plt.imshow(test_images[i].reshape(28,28))
    plt.xlabel("{} {}".format(class_names[test_labels[i]], test_labels[i]))

png

print(type(test_images[:1]))
print(test_images[:1].shape)
# print(test_images[:1])
<class 'numpy.ndarray'>
(1, 28, 28, 1)
keras.models.save_model(
    model2,
    "model/tf_cnn_fashion_mnist.h5",
    overwrite=True,
    include_optimizer=True
)
WARNING:tensorflow:TensorFlow optimizers do not make it possible to access optimizer attributes or optimizer state after instantiation. As a result, we cannot save the optimizer as part of the model save file.You will have to compile your model again after loading it. Prefer using a Keras optimizer instead (see keras.io/optimizers).
load_model = keras.models.load_model("model/tf_cnn_fashion_mnist.h5")
WARNING:tensorflow:No training configuration found in save file: the model was *not* compiled. Compile it manually.
pre = load_model.predict_classes(test_images[:10])
test_labels[:10]
array([9, 2, 1, 1, 6, 1, 4, 6, 5, 7], dtype=uint8)
pre
array([9, 2, 1, 1, 6, 1, 4, 6, 5, 7], dtype=int64)
!jupyter nbconvert --to markdown "使用TF1.12建立CNN模型-Fashion MNIST Dataset.ipynb"
[NbConvertApp] Converting notebook 使用TF1.12建立CNN模型-Fashion MNIST Dataset.ipynb to markdown
[NbConvertApp] Writing 11852 bytes to 使用TF1.12建立CNN模型-Fashion MNIST Dataset.md

参考链接:
https://tensorflow.google.cn/tfx/tutorials/serving/rest_simple
https://tensorflow.google.cn/versions/r1.12/api_docs/python

猜你喜欢

转载自blog.csdn.net/Dr_Guo/article/details/89375089