利用Inception-V3训练的权重微调,实现猫狗分类(基于keras)

利用Inception-V3训练的权重微调实现猫狗的分类,其中权重的下载在我的博客下载资源处,https://download.csdn.net/download/fanzonghao/10566634

第一种权重不改变直接用mixed7层(mixed7呆会把打印结果一放就知道了)进行特征提取,然后在拉平,连上两层神经网络

def define_model():
    InceptionV3_weight_path='./model_weight/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
    pre_trained_model=InceptionV3(input_shape=(150,150,3),
                                  include_top=False,#不包含全连接层
                                  weights=None)
    pre_trained_model.load_weights(InceptionV3_weight_path)
    #下面两种取其一

    #仅仅用其做特征提取 不需要更新权值
    for layer in pre_trained_model.layers:
        print(layer.name)
        layer.trainable=False
    #微调权值
    # unfreeze=False
    # for layer in pre_trained_model.layers:
    #     if unfreeze:
    #         layer.trainable=True
    #     if layer.name=='mixed6':
    #         unfreeze=True
    last_layer=pre_trained_model.get_layer('mixed7')
    print(last_layer.output_shape)
    last_output=last_layer.output
    #以下是在模型的基础上增加的
    x=layers.Flatten()(last_output)
    x=layers.Dense(1024,activation='relu')(x)
    x=layers.Dropout(0.2)(x)
    x=layers.Dense(1,activation='sigmoid')(x)
    model=Model(inputs=pre_trained_model.input,outputs=x)
    return model

第一种完全利用Inception-V3训练的权重代码

import os
import tensorflow as tf
import matplotlib.pyplot as plt

from keras.applications.inception_v3 import InceptionV3
from keras import  layers
from keras.models import Model
from keras.optimizers import RMSprop
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
import data_read
"""
#获得所需求的图片--进行了图像增强
"""
def data_deal_overfit():
    # 获取数据的路径
    train_dir, validation_dir, next_cat_pix, next_dog_pix = data_read.read_data()
    #图像增强
    train_datagen=ImageDataGenerator(
        rescale=1./255,
        rotation_range=40,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode='nearest')
    test_datagen=ImageDataGenerator(rescale=1./255)
    #从文件夹获取所需要求的图片
    train_generator=train_datagen.flow_from_directory(
          train_dir,
          target_size=(150,150),
          batch_size=20,
          class_mode='binary')
    test_generator = test_datagen.flow_from_directory(
        validation_dir,
        target_size=(150, 150),
        batch_size=20,
        class_mode='binary')
    return train_generator,test_generator
"""
#定义模型并加入了dropout
"""
def define_model():
    InceptionV3_weight_path='./model_weight/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
    pre_trained_model=InceptionV3(input_shape=(150,150,3),
                                  include_top=False,#不包含全连接层
                                  weights=None)
    pre_trained_model.load_weights(InceptionV3_weight_path)
    #下面两种取其一

    #仅仅用其做特征提取 不需要更新权值
    for layer in pre_trained_model.layers:
        print(layer.name)
        layer.trainable=False
    #微调权值
    # unfreeze=False
    # for layer in pre_trained_model.layers:
    #     if unfreeze:
    #         layer.trainable=True
    #     if layer.name=='mixed6':
    #         unfreeze=True
    last_layer=pre_trained_model.get_layer('mixed7')
    print(last_layer.output_shape)
    last_output=last_layer.output
    #以下实在模型的基础上增加的
    x=layers.Flatten()(last_output)
    x=layers.Dense(1024,activation='relu')(x)
    x=layers.Dropout(0.2)(x)
    x=layers.Dense(1,activation='sigmoid')(x)
    model=Model(inputs=pre_trained_model.input,outputs=x)
    return model

"""
训练模型
"""
def train_model():
    model=define_model()
    model.compile(optimizer=RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['accuracy'])
    train_generator, test_generator = data_deal_overfit()
    # verbose:日志显示,0为不在标准输出流输出日志信息,1为输出进度条记录,2为每个epoch输出一行记录
    # 训练模型 返回history包含各种精度和损失
    history = model.fit_generator(
        train_generator,
        steps_per_epoch=100,  # 2000 images=batch_szie*steps
        epochs=50,
        validation_data=test_generator,
        validation_steps=50,  # 1000=20*50
        verbose=2)
    #精度
    acc=history.history['acc']
    val_acc=history.history['val_acc']
    #损失
    loss=history.history['loss']
    val_loss=history.history['val_loss']
    #epochs的数量
    epochs=range(len(acc))

    plt.plot(epochs,acc)
    plt.plot(epochs, val_acc)
    plt.title('training and validation accuracy')

    plt.figure()
    plt.plot(epochs, loss)
    plt.plot(epochs, val_loss)
    plt.title('training and validation loss')
    plt.show()

if __name__ == '__main__':
    train_model()
打印结果:其中这些代表每一层的名字,直接利用mixed7的特征,(none,7,7,768)就是该层的shape, 直接拉平,添加两层神经网络进行分类。

打印结果:这是每一层的名字,mixed7层的shape是(None,7,7,768)第一种做法就是直接利用该层及之前层的权重进行训练分类的。

第二种:进行微调要不是需要对整个权重都进行重新赋值,因为前面层数学习到的特征是一些简单的特征,只是随着层数增强才更加具有针对性,故把mixed7层的卷积层权重 重新训练,代码:

unfreeze=False
for layer in pre_trained_model.layers:
    if unfreeze:
        layer.trainable=True
    if layer.name=='mixed6':
        unfreeze=True

也就是把我上段完整的代码注释替换一下即可。

猜你喜欢

转载自blog.csdn.net/fanzonghao/article/details/81228596
今日推荐