tf.kerasr入门示例:Lenet手写字符分类(2扩展) eager模式下Sequence生成器方式加载数据并训练

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/u010472607/article/details/82592260

源代码

# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import time
import argparse
import math

import tensorflow as tf
from tensorflow.keras.utils import Sequence
from tensorflow.data import Dataset
from tensorflow.contrib.eager.python import datasets
import tensorflow.contrib.eager as tfe
import numpy as np

"""两种方式加载的数据集不同图像部分数据是不同的,
official.mnist: 加载的图像是uint8数据类型编码, /255. 需要归一化
tensorflow.examples.tutorials.mnist 是float类型编码, 无需归一化操作
"""
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Model
from tensorflow.keras.layers import MaxPooling2D, Conv2D, Input, Flatten, Dense, Dropout

# 立即执行模式
tf.enable_eager_execution()

"""
Sequence生成器的方法
__iter__()
__getitem__(index)
支持batch
"""
class DataGenerator(Sequence):
    # 自定义数据集加载方式, 
    # 传入数据可以是文件列表或是其他格式,实现相应的加载和预处理方法即可
    def __init__(self, x, y, batch_size=32, shuffle=True):
        self.batch_size = batch_size
        self.x, self.y = x, y
        # 索引重排
        self.indexes = np.arange(len(self.x))
        self.shuffle = shuffle

    def __len__(self):
        # 计算每一个epoch的迭代次数
        return math.ceil(len(self.x) / float(self.batch_size))

    def __getitem__(self, index):
        # 生成每个batch数据,这里就根据自己对数据的读取方式进行发挥了
        # 生成batch_size个索引
        batch_indexs = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
        # 根据索引获取数据集合中的数据
        x, y = self.data_generation(batch_indexs)
        return x, y

    def on_epoch_end(self):
        # 在每一次epoch结束是否需要进行一次随机,重新随机一下index
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def data_generation(self, batch_indexs):
        x = self.x[batch_indexs]
        y = self.y[batch_indexs]
        return x, y

class LeNet(Model):
    def __init__(self, input_shape=(28, 28, 1), num_classes=10):
        # super(LeNet, self).__init__(name="LeNet")
        self.num_classes = num_classes
        ''' 定义要用到的层 layers '''
        # 输入层
        img_input = Input(shape=input_shape)

        # Conv => ReLu => Pool
        x = Conv2D(filters=20, kernel_size=5, padding="same", activation="relu" ,name='block1_conv1')(img_input)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block1_pool')(x)
        # Conv => ReLu => Pool
        x = Conv2D(filters=50, kernel_size=5, padding="same", activation="relu", name='block1_conv2')(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block1_poo2')(x)
        # 压成一维
        x = Flatten(name='flatten')(x)
        # 全连接层
        x = Dense(units=500, activation="relu", name="f1")(x)
        # softmax分类器
        x = Dense(units=num_classes, activation="softmax", name="prediction")(x)

        # 调用Model类的Model(input, output, name="***")构造方法
        super(LeNet, self).__init__(img_input, x, name="LeNet")

# 自定义损失函数
def loss(logits, labels):
    # softmax_cross_entropy_with_logits 为每一个输入项结果计算一个损失, 返回值arrayList, 长度N=Batch
    # reduce_mean 再求均值
    return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=logits))
    # return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits))

# 自定义评估函数
def compute_accuracy(logits, labels):
    predictions = tf.argmax(input=logits, axis=1, output_type=tf.int64)
    labels = tf.argmax(input=labels, axis=1, output_type=tf.int64) #tf.cast(x=labels, dtype=tf.int64)
    batch_size = int(logits.shape[0])
    return tf.reduce_sum(tf.cast(tf.equal(predictions, labels), dtype=tf.float32)) / batch_size



def run_mnist_eager(cfg):
    # 自动选择设备
    (device, data_format) = ('/gpu:0', 'channels_first')
    if not tf.test.is_gpu_available():
        (device, data_format) = ('/cpu:0', 'channels_last')

    print('Using device %s, and data format %s.' % (device, data_format))
    # 载入数据集
    train_ds, test_ds = load_mnist() # shape = (?, 768) / (?)
    # train_ds = train_ds.shuffle(60000, reshuffle_each_iteration=True).batch(cfg.batch_size)
    # test_ds = test_ds.batch(cfg.batch_size)
    # print(train_ds.output_shapes, test_ds.output_shapes)

    # 创建 model and optimizer
    model = LeNet()
    optimizer = tf.train.MomentumOptimizer(cfg.lr, cfg.momentum)
    model.compile(optimizer=optimizer,
                  loss=loss, #'categorical_crossentropy',
                  metrics=[compute_accuracy] ) #['accuracy']

    # Create and restore checkpoint (if one exists on the path)
    checkpoint_prefix = os.path.join(cfg.model_dir, 'ckpt')
    step_counter = tf.train.get_or_create_global_step()
    checkpoint = tfe.Checkpoint(model=model, optimizer=optimizer, step_counter=step_counter)
    # 从检查点文件恢复模型参数, 如果文件存在.
    checkpoint.restore(tf.train.latest_checkpoint(cfg.model_dir))

    # Generator 使用生成器方式提供数据,支持eager模式
    train_ds = DataGenerator(train_ds[0], train_ds[1], batch_size=200)
    test_ds = DataGenerator(test_ds[0], test_ds[1], batch_size=200)
    # Train and evaluate for a set number of epochs.
    with tf.device(device): # 使用GPU必须有此一句
        for _ in range(cfg.train_epochs):
            start = time.time()

            model.fit_generator(generator=train_ds, epochs=1)
            # model.fit(train_ds[0], train_ds[1], batch_size=200, epochs=1)
            # verbose=0 不显示
            # 生成器
            # _loss, _acc = model.evaluate(test_ds[0], test_ds[1], batch_size=100, verbose=0)
            # _loss, _acc = model.evaluate_generator(generator=test_ds, steps=10000)
            #

            _loss, _acc = model.evaluate_generator(generator=test_ds, verbose=1)
            print("test dataset loss: %f acc: %f" % (_loss, _acc))
            # train(model, optimizer, train_ds, step_counter, cfg.log_interval)
            end = time.time()
            print('\nTrain time for epoch #%d (%d total steps): %f' %
                    (checkpoint.save_counter.numpy() + 1, step_counter.numpy(), end - start))

            # test(model, test_ds)
            checkpoint.save(checkpoint_prefix)

def arg_parse():
    """参数定义"""
    parser = argparse.ArgumentParser(description="Lenet-5 MNIST 模型")
    parser.add_argument("--lr", dest="lr", help="学习率", default=0.01, type=float)
    parser.add_argument("--momentum", dest="momentum", help="SGD momentum.", default=0.5)

    parser.add_argument("--data_dir", dest="data_dir", help="数据集下载/保存目录", default="data/mnist/input_data/")
    parser.add_argument("--model_dir", dest="model_dir", help="模型保存目录", default="data/mnist/checkpoints/")
    parser.add_argument("--batch_size", dest="batch_size", help="训练或测试时 Batch Size", default=100, type=int)
    parser.add_argument("--train_epochs", dest="train_epochs", help="训练时epoch迭代次数", default=4, type=int)
    parser.add_argument("--log_interval", dest="log_interval", help="日志打印间隔", default=10, type=int)

    # 返回转换好的结果
    return parser.parse_args()

def load_mnist():
    # 加载数据,转换编码格式并归一化
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.astype("float32") / 255.
    x_test = x_test.astype("float32") / 255.

    # 扩展1维, 等效写法
    x_train = x_train[:, :, :, None]
    x_test = x_test[:, :, :, None]

    print(x_train.shape, "train shape")
    print(x_test.shape, "test shape")

    y_train = to_categorical(y_train, 10)
    y_test = to_categorical(y_test, 10)
    train_ds = (x_train, y_train)
    test_ds = (x_test, y_test)

    return train_ds, test_ds

if __name__ == '__main__':
  args = arg_parse()
  run_mnist_eager(args)

运行结果

2018-09-10 16:50:41.274575: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
2018-09-10 16:50:42.035425: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1405] Found device 0 with properties: 
name: TITAN Xp major: 6 minor: 1 memoryClockRate(GHz): 1.582
pciBusID: 0000:85:00.0
totalMemory: 11.90GiB freeMemory: 11.74GiB
2018-09-10 16:50:42.035472: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1484] Adding visible gpu devices: 0
2018-09-10 16:50:42.494465: I tensorflow/core/common_runtime/gpu/gpu_device.cc:965] Device interconnect StreamExecutor with strength 1 edge matrix:
2018-09-10 16:50:42.494531: I tensorflow/core/common_runtime/gpu/gpu_device.cc:971]      0 
2018-09-10 16:50:42.494539: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] 0:   N 
2018-09-10 16:50:42.498417: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1097] Created TensorFlow device (/device:GPU:0 with 11355 MB memory) -> physical GPU (device: 0, name: TITAN Xp, pci bus id: 0000:85:00.0, compute capability: 6.1)
Using device /gpu:0, and data format channels_first.
(60000, 28, 28, 1) train shape
(10000, 28, 28, 1) test shape
2018-09-10 16:50:43.133449: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1484] Adding visible gpu devices: 0
2018-09-10 16:50:43.133544: I tensorflow/core/common_runtime/gpu/gpu_device.cc:965] Device interconnect StreamExecutor with strength 1 edge matrix:
2018-09-10 16:50:43.133554: I tensorflow/core/common_runtime/gpu/gpu_device.cc:971]      0 
2018-09-10 16:50:43.133578: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] 0:   N 
2018-09-10 16:50:43.133817: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1097] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 11355 MB memory) -> physical GPU (device: 0, name: TITAN Xp, pci bus id: 0000:85:00.0, compute capability: 6.1)
Epoch 1/1
  1/300 [..............................] - ETA: 6:29 - loss: 2.3625 - compute_accuracy: 0.0850
  6/300 [..............................] - ETA: 1:06 - loss: 2.3624 - compute_accuracy: 0.0883
 10/300 [>.............................] - ETA: 40s - loss: 2.3619 - compute_accuracy: 0.1030 
 16/300 [>.............................] - ETA: 26s - loss: 2.3620 - compute_accuracy: 0.0934
 21/300 [=>............................] - ETA: 20s - loss: 2.3619 - compute_accuracy: 0.0967
 27/300 [=>............................] - ETA: 15s - loss: 2.3619 - compute_accuracy: 0.0989
 32/300 [==>...........................] - ETA: 13s - loss: 2.3617 - compute_accuracy: 0.1027
 37/300 [==>...........................] - ETA: 11s - loss: 2.3617 - compute_accuracy: 0.1022
 42/300 [===>..........................] - ETA: 10s - loss: 2.3615 - compute_accuracy: 0.1060
 47/300 [===>..........................] - ETA: 9s - loss: 2.3615 - compute_accuracy: 0.1086 
 53/300 [====>.........................] - ETA: 8s - loss: 2.3613 - compute_accuracy: 0.1125
 59/300 [====>.........................] - ETA: 7s - loss: 2.3612 - compute_accuracy: 0.1154
 64/300 [=====>........................] - ETA: 7s - loss: 2.3611 - compute_accuracy: 0.1176
 70/300 [======>.......................] - ETA: 6s - loss: 2.3610 - compute_accuracy: 0.1211
 74/300 [======>.......................] - ETA: 6s - loss: 2.3609 - compute_accuracy: 0.1239
 79/300 [======>.......................] - ETA: 5s - loss: 2.3608 - compute_accuracy: 0.1278
 84/300 [=======>......................] - ETA: 5s - loss: 2.3607 - compute_accuracy: 0.1322
 89/300 [=======>......................] - ETA: 5s - loss: 2.3606 - compute_accuracy: 0.1347
 94/300 [========>.....................] - ETA: 5s - loss: 2.3605 - compute_accuracy: 0.1389
100/300 [=========>....................] - ETA: 4s - loss: 2.3604 - compute_accuracy: 0.1428
106/300 [=========>....................] - ETA: 4s - loss: 2.3603 - compute_accuracy: 0.1461
110/300 [==========>...................] - ETA: 4s - loss: 2.3602 - compute_accuracy: 0.1485
114/300 [==========>...................] - ETA: 4s - loss: 2.3601 - compute_accuracy: 0.1507
119/300 [==========>...................] - ETA: 3s - loss: 2.3601 - compute_accuracy: 0.1536
123/300 [===========>..................] - ETA: 3s - loss: 2.3600 - compute_accuracy: 0.1563
129/300 [===========>..................] - ETA: 3s - loss: 2.3599 - compute_accuracy: 0.1605
134/300 [============>.................] - ETA: 3s - loss: 2.3598 - compute_accuracy: 0.1644
140/300 [=============>................] - ETA: 3s - loss: 2.3597 - compute_accuracy: 0.1671
145/300 [=============>................] - ETA: 3s - loss: 2.3596 - compute_accuracy: 0.1708
151/300 [==============>...............] - ETA: 2s - loss: 2.3594 - compute_accuracy: 0.1750
156/300 [==============>...............] - ETA: 2s - loss: 2.3593 - compute_accuracy: 0.1785
162/300 [===============>..............] - ETA: 2s - loss: 2.3591 - compute_accuracy: 0.1820
168/300 [===============>..............] - ETA: 2s - loss: 2.3590 - compute_accuracy: 0.1868
174/300 [================>.............] - ETA: 2s - loss: 2.3588 - compute_accuracy: 0.1917
180/300 [=================>............] - ETA: 2s - loss: 2.3587 - compute_accuracy: 0.1958
185/300 [=================>............] - ETA: 2s - loss: 2.3586 - compute_accuracy: 0.1999
191/300 [==================>...........] - ETA: 1s - loss: 2.3584 - compute_accuracy: 0.2055
196/300 [==================>...........] - ETA: 1s - loss: 2.3583 - compute_accuracy: 0.2090
201/300 [===================>..........] - ETA: 1s - loss: 2.3582 - compute_accuracy: 0.2123
207/300 [===================>..........] - ETA: 1s - loss: 2.3581 - compute_accuracy: 0.2165
212/300 [====================>.........] - ETA: 1s - loss: 2.3579 - compute_accuracy: 0.2199
216/300 [====================>.........] - ETA: 1s - loss: 2.3579 - compute_accuracy: 0.2221
222/300 [=====================>........] - ETA: 1s - loss: 2.3577 - compute_accuracy: 0.2261
228/300 [=====================>........] - ETA: 1s - loss: 2.3576 - compute_accuracy: 0.2296
232/300 [======================>.......] - ETA: 1s - loss: 2.3575 - compute_accuracy: 0.2324
236/300 [======================>.......] - ETA: 1s - loss: 2.3573 - compute_accuracy: 0.2350
241/300 [=======================>......] - ETA: 0s - loss: 2.3572 - compute_accuracy: 0.2379
247/300 [=======================>......] - ETA: 0s - loss: 2.3571 - compute_accuracy: 0.2411
253/300 [========================>.....] - ETA: 0s - loss: 2.3569 - compute_accuracy: 0.2454
257/300 [========================>.....] - ETA: 0s - loss: 2.3568 - compute_accuracy: 0.2478
260/300 [=========================>....] - ETA: 0s - loss: 2.3567 - compute_accuracy: 0.2490
266/300 [=========================>....] - ETA: 0s - loss: 2.3565 - compute_accuracy: 0.2524
272/300 [==========================>...] - ETA: 0s - loss: 2.3563 - compute_accuracy: 0.2555
277/300 [==========================>...] - ETA: 0s - loss: 2.3562 - compute_accuracy: 0.2576
282/300 [===========================>..] - ETA: 0s - loss: 2.3560 - compute_accuracy: 0.2598
286/300 [===========================>..] - ETA: 0s - loss: 2.3559 - compute_accuracy: 0.2617
290/300 [============================>.] - ETA: 0s - loss: 2.3557 - compute_accuracy: 0.2634
295/300 [============================>.] - ETA: 0s - loss: 2.3555 - compute_accuracy: 0.2649
300/300 [==============================] - 4s 15ms/step - loss: 2.3553 - compute_accuracy: 0.2668
 1/50 [..............................] - ETA: 0s
10/50 [=====>........................] - ETA: 0s
21/50 [===========>..................] - ETA: 0s
31/50 [=================>............] - ETA: 0s
42/50 [========================>.....] - ETA: 0s
50/50 [==============================] - 0s 5ms/step
test dataset loss: 2.343393 acc: 0.370100

Train time for epoch #1 (0 total steps): 4.813967
Epoch 1/1
  1/300 [..............................] - ETA: 5s - loss: 2.3444 - compute_accuracy: 0.3750
  6/300 [..............................] - ETA: 3s - loss: 2.3428 - compute_accuracy: 0.3792
 10/300 [>.............................] - ETA: 3s - loss: 2.3428 - compute_accuracy: 0.3675
 14/300 [>.............................] - ETA: 3s - loss: 2.3422 - compute_accuracy: 0.3614
 19/300 [>.............................] - ETA: 3s - loss: 2.3418 - compute_accuracy: 0.3508
 24/300 [=>............................] - ETA: 3s - loss: 2.3416 - compute_accuracy: 0.3412
 30/300 [==>...........................] - ETA: 3s - loss: 2.3409 - compute_accuracy: 0.3407
 36/300 [==>...........................] - ETA: 2s - loss: 2.3400 - compute_accuracy: 0.3392
 41/300 [===>..........................] - ETA: 2s - loss: 2.3394 - compute_accuracy: 0.3357
 47/300 [===>..........................] - ETA: 2s - loss: 2.3385 - compute_accuracy: 0.3290
 52/300 [====>.........................] - ETA: 2s - loss: 2.3376 - compute_accuracy: 0.3224
 57/300 [====>.........................] - ETA: 2s - loss: 2.3369 - compute_accuracy: 0.3169
 63/300 [=====>........................] - ETA: 2s - loss: 2.3359 - compute_accuracy: 0.3123
 69/300 [=====>........................] - ETA: 2s - loss: 2.3347 - compute_accuracy: 0.3064
 74/300 [======>.......................] - ETA: 2s - loss: 2.3340 - compute_accuracy: 0.2997
 79/300 [======>.......................] - ETA: 2s - loss: 2.3329 - compute_accuracy: 0.2946
 85/300 [=======>......................] - ETA: 2s - loss: 2.3316 - compute_accuracy: 0.2901
 91/300 [========>.....................] - ETA: 2s - loss: 2.3300 - compute_accuracy: 0.2848
 97/300 [========>.....................] - ETA: 2s - loss: 2.3279 - compute_accuracy: 0.2820
103/300 [=========>....................] - ETA: 2s - loss: 2.3263 - compute_accuracy: 0.2792
110/300 [==========>...................] - ETA: 1s - loss: 2.3240 - compute_accuracy: 0.2769
116/300 [==========>...................] - ETA: 1s - loss: 2.3220 - compute_accuracy: 0.2750
121/300 [===========>..................] - ETA: 1s - loss: 2.3196 - compute_accuracy: 0.2747
126/300 [===========>..................] - ETA: 1s - loss: 2.3175 - compute_accuracy: 0.2743
130/300 [============>.................] - ETA: 1s - loss: 2.3157 - compute_accuracy: 0.2749
135/300 [============>.................] - ETA: 1s - loss: 2.3136 - compute_accuracy: 0.2754
139/300 [============>.................] - ETA: 1s - loss: 2.3119 - compute_accuracy: 0.2757
144/300 [=============>................] - ETA: 1s - loss: 2.3094 - compute_accuracy: 0.2760
150/300 [==============>...............] - ETA: 1s - loss: 2.3064 - compute_accuracy: 0.2766
156/300 [==============>...............] - ETA: 1s - loss: 2.3026 - compute_accuracy: 0.2797
162/300 [===============>..............] - ETA: 1s - loss: 2.2988 - compute_accuracy: 0.2827
168/300 [===============>..............] - ETA: 1s - loss: 2.2948 - compute_accuracy: 0.2858
174/300 [================>.............] - ETA: 1s - loss: 2.2904 - compute_accuracy: 0.2899
179/300 [================>.............] - ETA: 1s - loss: 2.2864 - compute_accuracy: 0.2941
184/300 [=================>............] - ETA: 1s - loss: 2.2827 - compute_accuracy: 0.2983
190/300 [==================>...........] - ETA: 1s - loss: 2.2773 - compute_accuracy: 0.3047
195/300 [==================>...........] - ETA: 1s - loss: 2.2735 - compute_accuracy: 0.3088
200/300 [===================>..........] - ETA: 1s - loss: 2.2689 - compute_accuracy: 0.3142
206/300 [===================>..........] - ETA: 0s - loss: 2.2633 - compute_accuracy: 0.3201
212/300 [====================>.........] - ETA: 0s - loss: 2.2570 - compute_accuracy: 0.3267
218/300 [====================>.........] - ETA: 0s - loss: 2.2509 - compute_accuracy: 0.3325
224/300 [=====================>........] - ETA: 0s - loss: 2.2450 - compute_accuracy: 0.3377
230/300 [======================>.......] - ETA: 0s - loss: 2.2393 - compute_accuracy: 0.3425
236/300 [======================>.......] - ETA: 0s - loss: 2.2336 - compute_accuracy: 0.3473
242/300 [=======================>......] - ETA: 0s - loss: 2.2273 - compute_accuracy: 0.3524
248/300 [=======================>......] - ETA: 0s - loss: 2.2215 - compute_accuracy: 0.3574
253/300 [========================>.....] - ETA: 0s - loss: 2.2164 - compute_accuracy: 0.3626
259/300 [========================>.....] - ETA: 0s - loss: 2.2098 - compute_accuracy: 0.3691
265/300 [=========================>....] - ETA: 0s - loss: 2.2036 - compute_accuracy: 0.3748
270/300 [==========================>...] - ETA: 0s - loss: 2.1986 - compute_accuracy: 0.3793
275/300 [==========================>...] - ETA: 0s - loss: 2.1934 - compute_accuracy: 0.3839
281/300 [===========================>..] - ETA: 0s - loss: 2.1871 - compute_accuracy: 0.3893
287/300 [===========================>..] - ETA: 0s - loss: 2.1811 - compute_accuracy: 0.3943
294/300 [============================>.] - ETA: 0s - loss: 2.1742 - compute_accuracy: 0.4002
298/300 [============================>.] - ETA: 0s - loss: 2.1705 - compute_accuracy: 0.4031
300/300 [==============================] - 3s 10ms/step - loss: 2.1686 - compute_accuracy: 0.4046
 1/50 [..............................] - ETA: 0s
11/50 [=====>........................] - ETA: 0s
18/50 [=========>....................] - ETA: 0s
27/50 [===============>..............] - ETA: 0s
36/50 [====================>.........] - ETA: 0s
46/50 [==========================>...] - ETA: 0s
50/50 [==============================] - 0s 6ms/step
test dataset loss: 1.871826 acc: 0.650700

Train time for epoch #2 (0 total steps): 3.449475
Epoch 1/1
  1/300 [..............................] - ETA: 5s - loss: 1.8883 - compute_accuracy: 0.6350
  5/300 [..............................] - ETA: 4s - loss: 1.8871 - compute_accuracy: 0.6280
 10/300 [>.............................] - ETA: 3s - loss: 1.8768 - compute_accuracy: 0.6360
 16/300 [>.............................] - ETA: 3s - loss: 1.8804 - compute_accuracy: 0.6353
 22/300 [=>............................] - ETA: 3s - loss: 1.8771 - compute_accuracy: 0.6395
 27/300 [=>............................] - ETA: 3s - loss: 1.8742 - compute_accuracy: 0.6404
 32/300 [==>...........................] - ETA: 3s - loss: 1.8694 - compute_accuracy: 0.6464
 36/300 [==>...........................] - ETA: 3s - loss: 1.8647 - compute_accuracy: 0.6521
 41/300 [===>..........................] - ETA: 3s - loss: 1.8601 - compute_accuracy: 0.6593
 46/300 [===>..........................] - ETA: 2s - loss: 1.8593 - compute_accuracy: 0.6605
 51/300 [====>.........................] - ETA: 2s - loss: 1.8554 - compute_accuracy: 0.6644
 56/300 [====>.........................] - ETA: 2s - loss: 1.8498 - compute_accuracy: 0.6700
 61/300 [=====>........................] - ETA: 2s - loss: 1.8462 - compute_accuracy: 0.6739
 67/300 [=====>........................] - ETA: 2s - loss: 1.8425 - compute_accuracy: 0.6775
 73/300 [======>.......................] - ETA: 2s - loss: 1.8385 - compute_accuracy: 0.6814
 79/300 [======>.......................] - ETA: 2s - loss: 1.8361 - compute_accuracy: 0.6828
 85/300 [=======>......................] - ETA: 2s - loss: 1.8329 - compute_accuracy: 0.6852
 91/300 [========>.....................] - ETA: 2s - loss: 1.8300 - compute_accuracy: 0.6873
 95/300 [========>.....................] - ETA: 2s - loss: 1.8281 - compute_accuracy: 0.6890
 99/300 [========>.....................] - ETA: 2s - loss: 1.8271 - compute_accuracy: 0.6898
105/300 [=========>....................] - ETA: 2s - loss: 1.8251 - compute_accuracy: 0.6914
111/300 [==========>...................] - ETA: 2s - loss: 1.8228 - compute_accuracy: 0.6928
117/300 [==========>...................] - ETA: 1s - loss: 1.8204 - compute_accuracy: 0.6949
123/300 [===========>..................] - ETA: 1s - loss: 1.8183 - compute_accuracy: 0.6966
128/300 [===========>..................] - ETA: 1s - loss: 1.8161 - compute_accuracy: 0.6985
133/300 [============>.................] - ETA: 1s - loss: 1.8146 - compute_accuracy: 0.6994
138/300 [============>.................] - ETA: 1s - loss: 1.8119 - compute_accuracy: 0.7017
143/300 [=============>................] - ETA: 1s - loss: 1.8111 - compute_accuracy: 0.7020
149/300 [=============>................] - ETA: 1s - loss: 1.8088 - compute_accuracy: 0.7039
154/300 [==============>...............] - ETA: 1s - loss: 1.8079 - compute_accuracy: 0.7043
158/300 [==============>...............] - ETA: 1s - loss: 1.8055 - compute_accuracy: 0.7066
163/300 [===============>..............] - ETA: 1s - loss: 1.8045 - compute_accuracy: 0.7075
167/300 [===============>..............] - ETA: 1s - loss: 1.8027 - compute_accuracy: 0.7096
172/300 [================>.............] - ETA: 1s - loss: 1.8008 - compute_accuracy: 0.7119
177/300 [================>.............] - ETA: 1s - loss: 1.7992 - compute_accuracy: 0.7136
182/300 [=================>............] - ETA: 1s - loss: 1.7979 - compute_accuracy: 0.7153
187/300 [=================>............] - ETA: 1s - loss: 1.7962 - compute_accuracy: 0.7169
192/300 [==================>...........] - ETA: 1s - loss: 1.7942 - compute_accuracy: 0.7189
197/300 [==================>...........] - ETA: 1s - loss: 1.7922 - compute_accuracy: 0.7208
202/300 [===================>..........] - ETA: 1s - loss: 1.7908 - compute_accuracy: 0.7222
207/300 [===================>..........] - ETA: 0s - loss: 1.7890 - compute_accuracy: 0.7237
213/300 [====================>.........] - ETA: 0s - loss: 1.7870 - compute_accuracy: 0.7255
218/300 [====================>.........] - ETA: 0s - loss: 1.7854 - compute_accuracy: 0.7269
224/300 [=====================>........] - ETA: 0s - loss: 1.7835 - compute_accuracy: 0.7287
230/300 [======================>.......] - ETA: 0s - loss: 1.7814 - compute_accuracy: 0.7305
236/300 [======================>.......] - ETA: 0s - loss: 1.7790 - compute_accuracy: 0.7329
240/300 [=======================>......] - ETA: 0s - loss: 1.7776 - compute_accuracy: 0.7343
245/300 [=======================>......] - ETA: 0s - loss: 1.7759 - compute_accuracy: 0.7357
249/300 [=======================>......] - ETA: 0s - loss: 1.7749 - compute_accuracy: 0.7365
254/300 [========================>.....] - ETA: 0s - loss: 1.7731 - compute_accuracy: 0.7381
259/300 [========================>.....] - ETA: 0s - loss: 1.7715 - compute_accuracy: 0.7394
264/300 [=========================>....] - ETA: 0s - loss: 1.7704 - compute_accuracy: 0.7401
269/300 [=========================>....] - ETA: 0s - loss: 1.7688 - compute_accuracy: 0.7416
273/300 [==========================>...] - ETA: 0s - loss: 1.7676 - compute_accuracy: 0.7426
278/300 [==========================>...] - ETA: 0s - loss: 1.7663 - compute_accuracy: 0.7438
283/300 [===========================>..] - ETA: 0s - loss: 1.7650 - compute_accuracy: 0.7449
289/300 [===========================>..] - ETA: 0s - loss: 1.7631 - compute_accuracy: 0.7468
293/300 [============================>.] - ETA: 0s - loss: 1.7621 - compute_accuracy: 0.7476
298/300 [============================>.] - ETA: 0s - loss: 1.7607 - compute_accuracy: 0.7487
300/300 [==============================] - 3s 11ms/step - loss: 1.7603 - compute_accuracy: 0.7491
 1/50 [..............................] - ETA: 0s
10/50 [=====>........................] - ETA: 0s
19/50 [==========>...................] - ETA: 0s
27/50 [===============>..............] - ETA: 0s
38/50 [=====================>........] - ETA: 0s
48/50 [===========================>..] - ETA: 0s
50/50 [==============================] - 0s 6ms/step
test dataset loss: 1.678678 acc: 0.812900

Train time for epoch #3 (0 total steps): 3.654161
Epoch 1/1
  1/300 [..............................] - ETA: 6s - loss: 1.7017 - compute_accuracy: 0.7900
  6/300 [..............................] - ETA: 3s - loss: 1.6759 - compute_accuracy: 0.8167
 11/300 [>.............................] - ETA: 3s - loss: 1.6847 - compute_accuracy: 0.8064
 17/300 [>.............................] - ETA: 3s - loss: 1.6871 - compute_accuracy: 0.8056
 22/300 [=>............................] - ETA: 3s - loss: 1.6872 - compute_accuracy: 0.8061
 28/300 [=>............................] - ETA: 2s - loss: 1.6866 - compute_accuracy: 0.8057
 33/300 [==>...........................] - ETA: 2s - loss: 1.6869 - compute_accuracy: 0.8052
 37/300 [==>...........................] - ETA: 2s - loss: 1.6865 - compute_accuracy: 0.8055
 42/300 [===>..........................] - ETA: 2s - loss: 1.6835 - compute_accuracy: 0.8086
 47/300 [===>..........................] - ETA: 2s - loss: 1.6834 - compute_accuracy: 0.8080
 52/300 [====>.........................] - ETA: 2s - loss: 1.6802 - compute_accuracy: 0.8113
 56/300 [====>.........................] - ETA: 2s - loss: 1.6800 - compute_accuracy: 0.8108
 61/300 [=====>........................] - ETA: 2s - loss: 1.6800 - compute_accuracy: 0.8105
 66/300 [=====>........................] - ETA: 2s - loss: 1.6797 - compute_accuracy: 0.8105
 71/300 [======>.......................] - ETA: 2s - loss: 1.6784 - compute_accuracy: 0.8118
 77/300 [======>.......................] - ETA: 2s - loss: 1.6778 - compute_accuracy: 0.8119
 82/300 [=======>......................] - ETA: 2s - loss: 1.6773 - compute_accuracy: 0.8124
 87/300 [=======>......................] - ETA: 2s - loss: 1.6757 - compute_accuracy: 0.8139
 93/300 [========>.....................] - ETA: 2s - loss: 1.6741 - compute_accuracy: 0.8152
 99/300 [========>.....................] - ETA: 2s - loss: 1.6739 - compute_accuracy: 0.8151
105/300 [=========>....................] - ETA: 2s - loss: 1.6733 - compute_accuracy: 0.8158
111/300 [==========>...................] - ETA: 2s - loss: 1.6735 - compute_accuracy: 0.8153
115/300 [==========>...................] - ETA: 1s - loss: 1.6740 - compute_accuracy: 0.8147
120/300 [===========>..................] - ETA: 1s - loss: 1.6735 - compute_accuracy: 0.8149
125/300 [===========>..................] - ETA: 1s - loss: 1.6725 - compute_accuracy: 0.8158
131/300 [============>.................] - ETA: 1s - loss: 1.6720 - compute_accuracy: 0.8161
135/300 [============>.................] - ETA: 1s - loss: 1.6710 - compute_accuracy: 0.8170
140/300 [=============>................] - ETA: 1s - loss: 1.6707 - compute_accuracy: 0.8171
145/300 [=============>................] - ETA: 1s - loss: 1.6702 - compute_accuracy: 0.8173
150/300 [==============>...............] - ETA: 1s - loss: 1.6700 - compute_accuracy: 0.8174
155/300 [==============>...............] - ETA: 1s - loss: 1.6699 - compute_accuracy: 0.8173
161/300 [===============>..............] - ETA: 1s - loss: 1.6696 - compute_accuracy: 0.8178
167/300 [===============>..............] - ETA: 1s - loss: 1.6696 - compute_accuracy: 0.8175
173/300 [================>.............] - ETA: 1s - loss: 1.6698 - compute_accuracy: 0.8171
179/300 [================>.............] - ETA: 1s - loss: 1.6694 - compute_accuracy: 0.8174
184/300 [=================>............] - ETA: 1s - loss: 1.6693 - compute_accuracy: 0.8175
190/300 [==================>...........] - ETA: 1s - loss: 1.6687 - compute_accuracy: 0.8179
196/300 [==================>...........] - ETA: 1s - loss: 1.6682 - compute_accuracy: 0.8185
201/300 [===================>..........] - ETA: 1s - loss: 1.6677 - compute_accuracy: 0.8188
206/300 [===================>..........] - ETA: 0s - loss: 1.6677 - compute_accuracy: 0.8187
211/300 [====================>.........] - ETA: 0s - loss: 1.6672 - compute_accuracy: 0.8190
216/300 [====================>.........] - ETA: 0s - loss: 1.6671 - compute_accuracy: 0.8189
222/300 [=====================>........] - ETA: 0s - loss: 1.6669 - compute_accuracy: 0.8190
228/300 [=====================>........] - ETA: 0s - loss: 1.6664 - compute_accuracy: 0.8192
233/300 [======================>.......] - ETA: 0s - loss: 1.6664 - compute_accuracy: 0.8192
238/300 [======================>.......] - ETA: 0s - loss: 1.6662 - compute_accuracy: 0.8194
244/300 [=======================>......] - ETA: 0s - loss: 1.6661 - compute_accuracy: 0.8194
250/300 [========================>.....] - ETA: 0s - loss: 1.6659 - compute_accuracy: 0.8194
256/300 [========================>.....] - ETA: 0s - loss: 1.6664 - compute_accuracy: 0.8186
261/300 [=========================>....] - ETA: 0s - loss: 1.6662 - compute_accuracy: 0.8186
267/300 [=========================>....] - ETA: 0s - loss: 1.6657 - compute_accuracy: 0.8190
271/300 [==========================>...] - ETA: 0s - loss: 1.6653 - compute_accuracy: 0.8193
276/300 [==========================>...] - ETA: 0s - loss: 1.6653 - compute_accuracy: 0.8192
281/300 [===========================>..] - ETA: 0s - loss: 1.6652 - compute_accuracy: 0.8193
285/300 [===========================>..] - ETA: 0s - loss: 1.6653 - compute_accuracy: 0.8191
290/300 [============================>.] - ETA: 0s - loss: 1.6656 - compute_accuracy: 0.8188
293/300 [============================>.] - ETA: 0s - loss: 1.6654 - compute_accuracy: 0.8189
297/300 [============================>.] - ETA: 0s - loss: 1.6649 - compute_accuracy: 0.8193
300/300 [==============================] - 3s 11ms/step - loss: 1.6646 - compute_accuracy: 0.8195
 1/50 [..............................] - ETA: 0s
10/50 [=====>........................] - ETA: 0s
21/50 [===========>..................] - ETA: 0s
31/50 [=================>............] - ETA: 0s
41/50 [=======================>......] - ETA: 0s
50/50 [==============================] - 0s 5ms/step
test dataset loss: 1.645696 acc: 0.831400

Train time for epoch #4 (0 total steps): 3.597641

Process finished with exit code 0

猜你喜欢

转载自blog.csdn.net/u010472607/article/details/82592260
今日推荐