基于Keras实现Kaggle2013--Dogs vs. Cats12500张猫狗图像的精准分类

【下载数据集】

关于狗的部分数据集示例

【整理数据集】

  • 将训练数据集分割成训练集、验证集、测试集,目录结构如图所示:

  • 在Pycharm中新建项目,创建split_dataset.py

import os, shutil

# 数据集解压之后的目录
original_dataset_dir = 'D:\kaggle\dogsvscats\\train'
# 存放小数据集的目录
base_dir = 'D:\kaggle\dogsvscats\\cats_and_dogs_small'
os.mkdir(base_dir)

# 建立训练集、验证集、测试集目录
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)

# 将猫狗照片按照训练、验证、测试分类
train_cats_dir = os.path.join(train_dir, 'cats')
os.mkdir(train_cats_dir)

train_dogs_dir = os.path.join(train_dir, 'dogs')
os.mkdir(train_dogs_dir)

validation_cats_dir = os.path.join(validation_dir, 'cats')
os.mkdir(validation_cats_dir)

validation_dogs_dir = os.path.join(validation_dir, 'dogs')
os.mkdir(validation_dogs_dir)

test_cats_dir = os.path.join(test_dir, 'cats')
os.mkdir(test_cats_dir)

test_dogs_dir = os.path.join(test_dir, 'dogs')
os.mkdir(test_dogs_dir)

# 切割数据集
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dat = os.path.join(train_cats_dir, fname)
    shutil.copyfile(src, dat)

fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dat = os.path.join(validation_cats_dir, fname)
    shutil.copyfile(src, dat)

fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dat = os.path.join(test_cats_dir, fname)
    shutil.copyfile(src, dat)

fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dat = os.path.join(train_dogs_dir, fname)
    shutil.copyfile(src, dat)

fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dat = os.path.join(validation_dogs_dir, fname)
    shutil.copyfile(src, dat)

fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
    src = os.path.join(original_dataset_dir, fname)
    dat = os.path.join(test_dogs_dir, fname)
    shutil.copyfile(src, dat)

【建立简单版CNN网络模型】

from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPool2D((2, 2)))

model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))

model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))

model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))

model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])

【对图像信息进行预处理】

  • 读取图片文件;
  • 将jpg解码成RGB像素点;
  • 将这些像素点转换成浮点型张量;
  • 将[0, 255]区间的像素值减小到[0, 1]区间中,CNN更喜欢处理小的输入值。
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
    directory=train_dir,
    target_size=(150, 150),
    batch_size=20,
    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    directory=validation_dir,
    target_size=(150, 150),
    batch_size=20,
    class_mode='binary')
  • 用fit_generator向模型中填充数据
history = model.fit_generator(
    train_generator,
    steps_per_epoch=100,
    epochs=30,
    validation_data=validation_generator,
    validation_steps=50)
  • 保存模型
model.save('cats_and_dogs_small_1.h5')
  • 显示训练中loss和acc的曲线
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()

plt.figure()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()

plt.show()

【简单版CNN模型完整代码】

from keras import layers
from keras import models
import matplotlib.pyplot as plt
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator

train_dir = r'D:\kaggle\\dogsvscats\\cats_and_dogs_small\\train'
validation_dir = r'D:\kaggle\\dogsvscats\\cats_and_dogs_small\\validation'


model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPool2D((2, 2)))

model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))

model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))

model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))

model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])



# 调整像素值
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
    directory=train_dir,
    target_size=(150, 150),
    batch_size=20,
    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    directory=validation_dir,
    target_size=(150, 150),
    batch_size=20,
    class_mode='binary')

history = model.fit_generator(
    train_generator,
    steps_per_epoch=100,
    epochs=30,
    validation_data=validation_generator,
    validation_steps=50)

model.save('cats_and_dogs_small_1.h5')

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()

plt.figure()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()

plt.show()

【训练结果及分析】

  • 训练结果

  • 分析
    训练曲线的最大特征就是过拟合。训练集上的准确率线性增加,接近100%,而验证集上的准确率是在70%~72%之间。同样的,训练集上的loss线性下降趋于0,而验证集上的loss在迭代5个epoch之后趋于上升。
    由于训练样本只选取了2000个,因此数据量不足是过拟合的最大的问题。缓解过拟合的方法有很多,诸如:dropout、L2-norm等等。在这里,我们使用增大数据(data augmentation)的方式来试一试解决过拟合,这种方法也是处理图片分类的通常做法。

【优化版本(1)—增大数据(data augmentation)】

  • #####增大数据
    过拟合是由于学习到样本量过小导致的,使得我们训练的模型对于新的数据没有很好的泛化能力。增大数据(data augmentation)是在已有的训练样本上增加数据的一种最好的方式。增大数据是通过随机的改变那些已经被模型“记住”的图片,通过缩放、裁剪、拉伸图片来使得模型不会两次见到同一张图片。
    在Keras中是通过ImageDataGenerator来实现,看一个例子:
train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,     # 宽度平移
    height_shift_range=0.2,   # 高度平移
    shear_range=0.2,            # 修剪
    zoom_range=0.2,            # 缩放
    horizontal_flip=True,
    fill_mode='nearest')        # 添加新像素  

效果如下图所示,图一是原图像,图二是纵向平移,图三是纵向拉伸,图四 是添加了新的像素。

- #####隐患分析
虽然使用了增大数据,但是从输入数据上看还是有很大一部分是有联系的,是相似的,因为它们均来自同一张原始图片,并没有提供新的信息。为了长远的与过拟合做斗争,我们在模型中增加一层Dropout,并将batch_size调大为32,epoch调大至100,再来看看效果。

  • #####网络模型完整代码
from keras import layers
from keras import models
import matplotlib.pyplot as plt
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator

train_dir = r'D:\kaggle\\dogsvscats\\cats_and_dogs_small\\train'
validation_dir = r'D:\kaggle\\dogsvscats\\cats_and_dogs_small\\validation'


model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPool2D((2, 2)))

model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))

model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))

model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))

model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])



# 调整像素值
train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
    directory=train_dir,
    target_size=(150, 150),
    batch_size=32,
    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    directory=validation_dir,
    target_size=(150, 150),
    batch_size=32,
    class_mode='binary')

history = model.fit_generator(
    train_generator,
    steps_per_epoch=100,
    epochs=100,
    validation_data=validation_generator,
    validation_steps=50)

model.save('cats_and_dogs_small_2.h5')

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()

plt.figure()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()

plt.show()
  • #####优化版本(1)—训练结果及分析
  • 训练结果

  • 分析

我们可以看出在准确率上有很大的提升,训练集和验证集上的准确率是在85%~86%之间。同样的,训练集和验证集上的loss均趋于0.35以下,未出现大幅度的过拟合。那么我们思考一下,如何将准确率达到90%以上,有一种很好的优化方式就是使用预训练模型。

【优化版本(2)—预训练网络之特征提取】

  • #####预训练卷积网络
    才疏学浅,我也是第一次用到预训练的模型来优化网络,由于我们的数据样本本身就少,那么想要追求高准确率,需要基于大数据集图片分类的预先训练好的网络,例如ImageNet。由于ImageNet的数据量无比之大(1.4 million 已经标注的图片,1000个不同的类别),那么我们用ImageNet预先训练好的网络来对我们的猫狗进行提取特征,可见这个预训练网络可以为大多数不同的计算机视觉分类的问题提供很大的帮助。ImageNet中包含了许多的动物类别,包含不同品种的猫和狗,因此我们期望很够很好的提升猫狗大战的准确率。
    我们选用VGG16的结构,得益于它在ImageNet中良好的表现,VGG16不仅简单而且好用,不需要引入其他的概念,还有一些其他的模型,它们都有一些很优雅的名字-VGG,ResNet, Inception-ResNet,Xception等等。
  • #####预训练网络使用方式
    特征提取 and 微调
  • #####特征提取
    特征提取是用一个之前的已经训练好的网络结构,利用这些已经训练好的参数来对于新的样本提取有趣的特征。之后,将这些提取出的特征送入一个新的分类器。我们将卷积层的模型称之为基线卷积,这一部分是已经有训练好的,算是被冻结了,不需要修改的,我们需要做的就是定义全连接层,定义新的分类器。
  • #####在优化模型的第二版本中,采取的优化策略是:不增加数据量 + 预训练网络,那我们试一试。
  • #####采用VGG16作为基线卷积
from keras.applications import VGG16
conv_base = VGG16(weights='imagenet',
                  include_top=False,    # 是否包括全连接分类器,显然在ImageNet中有上千分类,在我们这里是不需要的
                  input_shape=(150, 150, 3))
  • #####从预训练的基线卷积层中提取特征
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
base_dir = 'D:\kaggle\dogsvscats\\cats_and_dogs_small'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')

datagen = ImageDataGenerator(rescale=1./255)
batch_size = 20


def extarct_features(directory, sample_count):
    features = np.zeros(shape=(sample_count, 4, 4, 512))
    labels = np.zeros(shape=(sample_count))
    generator = datagen.flow_from_directory(
        directory,
        target_size=(150, 150),
        batch_size=batch_size,
        class_mode='binary')

    i = 0
    for inputs_batch, labels_batch in generator:
        features_batch = conv_base.predict(inputs_batch)
        features[i * batch_size : (i + 1) * batch_size] = features_batch
        labels[i * batch_size : (i + 1) * batch_size] = labels_batch
        i += 1
        if i * batch_size >= sample_count:
            break

    return features, labels


train_features, train_labels = extarct_features(train_dir, 2000)
validation_features, validation_labels = extarct_features(validation_dir, 1000)
test_features, test_labels = extarct_features(test_dir, 1000)

train_features = np.reshape(train_features, (2000, 4 * 4 * 512))
validation_features = np.reshape(validation_features, (1000, 4 * 4 * 512))
test_features = np.reshape(test_features, (1000, 4 * 4 * 512))
  • #####优化版本(2)—网络模型
from keras.applications import VGG16
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras import models
from keras import layers
from keras import optimizers
import matplotlib.pyplot as plt


conv_base = VGG16(weights='imagenet',
                  include_top=False,
                  input_shape=(150, 150, 3))

base_dir = 'D:\kaggle\dogsvscats\\cats_and_dogs_small'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')

datagen = ImageDataGenerator(rescale=1./255)
batch_size = 20


def extarct_features(directory, sample_count):
    features = np.zeros(shape=(sample_count, 4, 4, 512))
    labels = np.zeros(shape=(sample_count))
    generator = datagen.flow_from_directory(
        directory,
        target_size=(150, 150),
        batch_size=batch_size,
        class_mode='binary')

    i = 0
    for inputs_batch, labels_batch in generator:
        features_batch = conv_base.predict(inputs_batch)
        features[i * batch_size : (i + 1) * batch_size] = features_batch
        labels[i * batch_size : (i + 1) * batch_size] = labels_batch
        i += 1
        if i * batch_size >= sample_count:
            break

    return features, labels


train_features, train_labels = extarct_features(train_dir, 2000)
validation_features, validation_labels = extarct_features(validation_dir, 1000)
test_features, test_labels = extarct_features(test_dir, 1000)

train_features = np.reshape(train_features, (2000, 4 * 4 * 512))
validation_features = np.reshape(validation_features, (1000, 4 * 4 * 512))
test_features = np.reshape(test_features, (1000, 4 * 4 * 512))

model = models.Sequential()
model.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
              loss='binary_crossentropy',
              metrics=['acc'])

history = model.fit(train_features, train_labels,
                    epochs=30,
                    batch_size=20,
                    validation_data=(validation_features, validation_labels))


acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()

plt.figure()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()

plt.show()
  • #####优化版本(2)—训练结果及分析
  • 训练结果

  • 分析
    Yes,准确率已经达到90%,要好于之前一贯在小的数据集上做训练。由曲线可以看出仍然出现了过拟合这个问题,在训练集上达到了95%以上啊,尽管我们用了dropout以及相对更大的学习率,还是有过拟合的问题,很可能是因为我们并没有用到增加数据量(data augmentation)这个有效的方法,那我们应该加上这个方法,看看效果怎样,是不是可以提升准确率呢?!

【优化版本(3)—预训练网络之特征提取】

  • #####带有增大数据量的特征提取
    我们所作的改进就是在优化版本(2)中添加了data augentation,去掉了dropout。
train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest')


test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
    directory=train_dir,
    target_size=(150, 150),
    batch_size=32,
    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    directory=validation_dir,
    target_size=(150, 150),
    batch_size=32,
    class_mode='binary')

model.compile(loss='binary_crossentropy',
              optimizer=optimizers.RMSprop(lr=2e-5),
              metrics=['acc'])

history = model.fit_generator(
    train_generator,
    steps_per_epoch=100,
    epochs=30,
    validation_data=validation_generator,
    validation_steps=50)
  • #####优化版本(3)—网络模型
from keras.applications import VGG16
from keras.preprocessing.image import ImageDataGenerator
from keras import models
from keras import layers
from keras import optimizers
import matplotlib.pyplot as plt

train_dir = r'D:\kaggle\\dogsvscats\\cats_and_dogs_small\\train'
validation_dir = r'D:\kaggle\\dogsvscats\\cats_and_dogs_small\\validation'

conv_base = VGG16(weights='imagenet',
                  include_top=False,
                  input_shape=(150, 150, 3))
conv_base.trainable = False


model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
model.add(layers.Dense(1, activation='sigmoid'))

train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest')


test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
    directory=train_dir,
    target_size=(150, 150),
    batch_size=32,
    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    directory=validation_dir,
    target_size=(150, 150),
    batch_size=32,
    class_mode='binary')

model.compile(loss='binary_crossentropy',
              optimizer=optimizers.RMSprop(lr=2e-5),
              metrics=['acc'])

history = model.fit_generator(
    train_generator,
    steps_per_epoch=100,
    epochs=30,
    validation_data=validation_generator,
    validation_steps=50)

model.save('cats_and_dogs_small_3.h5')

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()

plt.figure()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()

plt.show()
  • #####优化版本(3)—训练结果及分析
  • 训练结果

  • 分析
    由上图的曲线可以看出,增大数据量很好的并有效的解决了过拟合的问题,准确率稳定在90%以上,由于我的GPU是750 Ti,仅仅迭代30次,就跑了将近18个小时,需要一块高性能的GPU是很提升工作效率的。

【优化版本(4)—预训练网络之微调模型】

  • 微调(Fine-tuning)

    这也是另一种重用预训练模型的一种方式,微调就是我们解冻之前固定的VGG16模型,进行细微的调整,使模型与我们的问题更相关。
    1)在一个已经训练好的基线网络上添加自定义网络;
    2)冻结基线网络;
    3)训练我们所添加的部分;
    4)解冻一些基线网络中的卷积层;
    5)将我们所添加的部分与解冻的卷积层相连接;

  • 优化版本(4)—网络模型

    我们将VGG16中的第5大卷积层解冻,和全连接层一起参与训练,更新参数。同时,加入测试集,并对测试集最后计算结果进行平滑处理。同样,我们增加了数据量来防止过拟合,我们训练试一试。

from keras.applications import VGG16
from keras.preprocessing.image import ImageDataGenerator
from keras import models
from keras import layers
from keras import optimizers
import matplotlib.pyplot as plt

train_dir = r'D:\kaggle\\dogsvscats\\cats_and_dogs_small\\train'
validation_dir = r'D:\kaggle\\dogsvscats\\cats_and_dogs_small\\validation'
test_dir = r'D:\kaggle\\dogsvscats\\cats_and_dogs_small\\test'

conv_base = VGG16(weights='imagenet',
                  include_top=False,
                  input_shape=(150, 150, 3))
set_trainable = False

for layer in conv_base.layers:
    if layer.name == 'block5_conv1':
        set_trainable = False
    if set_trainable:
        layer.trainable = True
    else:
        layer.trainable = False


model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
model.add(layers.Dense(1, activation='sigmoid'))

train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest')


test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
    directory=train_dir,
    target_size=(150, 150),
    batch_size=32,
    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    directory=validation_dir,
    target_size=(150, 150),
    batch_size=32,
    class_mode='binary')

model.compile(loss='binary_crossentropy',
              optimizer=optimizers.RMSprop(lr=1e-5),
              metrics=['acc'])

history = model.fit_generator(
    train_generator,
    steps_per_epoch=100,
    epochs=100,
    validation_data=validation_generator,
    validation_steps=50)

model.save('cats_and_dogs_small_4.h5')


def smooth_curve(points, factor=0.8):
    smoothed_points = []
    for point in points:
        if smoothed_points:
            previous = smoothed_points[-1]
            smoothed_points.append(int(previous * factor + points * (1 - factor)))
        else:
            smoothed_points.append(point)


acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)
plt.plot(epochs, smooth_curve(acc), 'bo', label='Smoothed training acc')
plt.plot(epochs, smooth_curve(val_acc), 'b', label='Smoothed validation acc')
plt.title('Training and validation accuracy')
plt.legend()

plt.figure()

plt.plot(epochs, smooth_curve(loss), 'bo', label='Smoothed training loss')
plt.plot(epochs, smooth_curve(val_loss), 'b', label='Smoothed validation loss')
plt.title('Training and validation loss')
plt.legend()

plt.show()

test_generator = test_datagen.flow_from_directory(
    test_dir,
    target_size=(150, 150),
    batch_size=20,
    class_mode='binary'
)
test_loss, test_acc = model.evaluate_generator(test_generator, steps=50)
print('test acc:', test_acc)
  • #####优化版本(4)—训练结果与分析
  • 训练结果
    测试结果能够保持在90%~91%,但是始终没有训练到95%以上,由于训练后期机器出了问题,没法截图给大家了,希望体谅750 Ti~
  • 分析
    此次训练时长将近50多个小时,在训练的50多个小时里,由于电脑处于超负荷状态,我只能为训练让路,我看完一本阿根廷作家的小说《沙之书》、一部电影《侏罗纪公园》、以及《DEEP LEARNING with Python》全部看完。
    个人觉得Fine-Tuning方法在本实验中起的作用并不大,并且如果没有更大的GPU资源来训练,在本地小机器上是无法训练的,可能更适用于其他场景吧。

【参考文献】

  • 《DEEP LEARNING with Python》
    一本很好的有关深度学习的书籍,能让你构建一个正确的、系统的深度学习的思想和体系,值得推荐~
  • 电子版链接–百度网盘

【代码】

【感谢】

感谢每一个读到这里的朋友,如有疑问,请在下方留言与评论,或者发到我的邮箱,互相学习,互相分享,走过路过,点个赞呗��
- 邮箱:[email protected]

猜你喜欢

转载自blog.csdn.net/apengpengpeng/article/details/80866029
今日推荐