keras调用mobilenetv2进行二分类

1 文件架构

-train.py  # 训练
-predict.py  # 预测
-IMG	# 存放正负样本
--False.0.jpg  # 负样本命名
--True.0.jpg  # 正样本命名
--MODEL  # 存放生成模型

重命名:可以使用os.rename(srcpath+name, dstpath+name)
dnn调用mobilenetv2时,mobilenetv2支持avg-pooling,或不池化,不支持max-pooling

2 模型的训练

# -*- coding: utf-8 -*-
"""
	train.py
"""


from keras.applications.mobilenet_v2 import MobileNetV2
from keras.optimizers import Adam
from keras.utils import np_utils
from keras.utils.vis_utils import plot_model
from keras.engine.topology import Input
from keras.layers import *
from keras.models import Model
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from tensorflow.python.framework.graph_util import convert_variables_to_constants
from tensorflow.python.framework import graph_io
from keras.callbacks import ModelCheckpoint
import cv2
import os
import numpy as np
import tensorflow as tf


def build_model():
    # (1) model1
    img_shape = (224, 224, 3)
    # (a)直接使用mobilenet进行1000分类;(b)也可以使用mobilenet进行特征提取,保存特征传入下一层网络,重新设计全连接层
    model1 = MobileNetV2(input_shape=img_shape,
                         include_top=False,  # 是否包括顶层的全连接层。
                         weights='imagenet',
                         pooling='avg')  # 'None', 'max', 'avg'
    # MobileNetV2()函数没有提示参数,搜索keras文档查询:https://keras.io/zh/applications/#mobilenetv2
    plot_model(model1, to_file="./MODEL/model1.png", show_shapes=True)
    # 不知道在该输入shape下输出大小时,可以使用plot_model()查看

    # (2) model2
    xa = Input(shape=model1.output_shape[1:])
    x = Dense(2, use_bias=True, activation='softmax', name='weighted-average')(xa)
    model2 = Model(xa, x, name='m2')
    plot_model(model2, to_file="./MODEL/model2.png", show_shapes=True)

    # (3) model
    img_a = Input(shape=img_shape)
    xa = model1(img_a)
    x = model2(xa)
    model = Model(img_a, x, name="m")
    plot_model(model, to_file="./MODEL/model.png", show_shapes=True)
    # model.summary()  # 输出模型各层的参数
    return model


def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
# 将会话状态冻结为已删除的计算图,创建一个新的计算图,其中变量节点由在会话中获取其当前值的常量替换.
    # session要冻结的TensorFlow会话,keep_var_names不应冻结的变量名列表,或者无冻结图中的所有变量
    # output_names相关图输出的名称,clear_devices从图中删除设备以获得更好的可移植性
    graph = session.graph
    with graph.as_default():
        freeze_var_names = list(
            set(v.op.name for v in tf.compat.v1.global_variables()).difference(keep_var_names or []))
        output_names = output_names or []
        output_names += [v.op.name for v in tf.compat.v1.global_variables()]
        input_graph_def = graph.as_graph_def()
        # 从图中删除设备以获得更好的可移植性
        if clear_devices:
            for node in input_graph_def.node:
                node.device = ""
        # 用相同值的常量替换图中的所有变量
        frozen_graph = convert_variables_to_constants(session, input_graph_def, output_names, freeze_var_names)
        return frozen_graph


if __name__ == "__main__":
    """
        (1) 图像预处理
    """
    img_path = "./IMG"
    X_train_list = []
    Y_train_list = []
    for path, dir_list, file_list in os.walk(img_path):
        for file in file_list:
            filename = os.path.join(path, file)
            img = cv2.imread(filename)
            img = cv2.resize(img, (224, 224))
            X_train_list.append(img)
            if filename.split("\\")[-1].split(".")[0] == "False":
                Y_train_list.append(0)
            elif filename.split("\\")[-1].split(".")[0] == "True":
                Y_train_list.append(1)
            else:
                print("命名出错!")

    X_train = np.array(X_train_list) / 255.
    Y_train = np_utils.to_categorical(np.array(Y_train_list), 2)
    # # 切分测试集
    # (X_train, X_test, Y_train, Y_test) = train_test_split(X_train_list, Y_train_list, test_size=0.1,
    #                                                           random_state=42)  # random_state随机数种子
    # # 对应训练model.fit_generator()中的validation_data


    """
        (2) 模型搭建
    """

    model = build_model()

    """
        (3) 模型编译和训练
    """
    # 编译:compile怎么用,keras文档直接搜索compile
    model.compile(Adam(lr=0.001),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    # 图像增强, fit_generator会用到
    aug = ImageDataGenerator(rotation_range=0,
                             zoom_range=0,
                             width_shift_range=0,
                             height_shift_range=0,
                             shear_range=0,
                             horizontal_flip=False,
                             fill_mode="nearest")


    # 保存每个epoch训练后的模型
    save_dir = "./MODEL/"
    filepath = "model_{epoch:04d}-{accuracy:.4f}.hdf5"
    checkpoint = ModelCheckpoint(os.path.join(save_dir, filepath), monitor='accuracy', verbose=0, save_best_only=False,
                                 mode='max', save_freq='epoch')

    # # (1)fit方法1
    # nb_batch_size = 12
    # H = model.fit_generator(aug.flow(X_train, Y_train, batch_size=nb_batch_size),
    #                         steps_per_epoch=len(X_train)//nb_batch_size,  # "//"表示相除之后向下 取整
    #                         epochs=30)

    # # (2)fit方法2, 加上测试集
    # nb_batch_size = 12
    # H = model.fit_generator(aug.flow(X_train, Y_train, batch_size=nb_batch_size),
    #                         validation_data=(X_test, Y_test), steps_per_epoch=len(Y_train) // nb_batch_size,
    #                         epochs=30, callbacks=checkpoint)


    # (3)fit方法3, 不带数据增强
    Trainning = model.fit(X_train, Y_train,
                          batch_size=12,
                          epochs=10,
                          verbose=1,
                          callbacks=checkpoint
                          # 默认为1,输出显示进度条,显示每个Epoch
                          # 2,输出不显示进度条,显示每个Epoch
                          # 0,输出什么都不显示
                          )

    # # (4)断点续训
    # hdf5_model_name = "model_0010-0.9825.hdf5"  # 训练好的模型及权重
    # model_loaded = load_model('./MODEL/'+hdf5_model_name)  # 加载训练好的模型及权重
    # weights = model_loaded.get_weights()  # 获取训练好的模型的权重
    # model.set_weights(weights)  # 将该权重赋值给model,model是前面定义的模型
    # Trainning = model.fit(X_train, Y_train,
    #                       batch_size=12,
    #                       epochs=10,
    #                       verbose=1,
    #                       callbacks=checkpoint
    #                       # 默认为1,输出显示进度条,显示每个Epoch
    #                       # 2,输出不显示进度条,显示每个Epoch
    #                       # 0,输出什么都不显示
    #                       )

    """
        (4) 模型保存,保存hdf5--h5--json--pb文件;model1、model2同理也可以
    """

    # 1 保存hdf5格式文件:模型+权重
    model_hdf5_file = './MODEL/model.hdf5'
    model.save(model_hdf5_file)

    # 2 保存h5格式文件:权重
    model_h5_file = './MODEL/model_weight.h5'
    model.save_weights(model_h5_file)

    # 3 保存json文件格式:模型
    json_string = model.to_json()
    with open('./MODEL/model.json', "w") as f:
        f.write(json_string)  # 将模型转为json文件后的字符串写入本地

    # 4 保存pb文件:加载hdf5,转换成pb文件
    # # 连续保存多个pb文件时,有时可能需要清空graph,获取当前图时。可用下面方式清空:
    # from tensorflow.python.framework import ops  # v1:  tf.reset_default_graph()
    # ops.reset_default_graph()

    tf.compat.v1.disable_eager_execution()
    # eager_execution见补充1
    # TensorFlow 2.0中提供了tensorflow.compat.v1代码包来兼容原有1.x的代码,可以做到几乎不加修改的运行
    tf.compat.v1.keras.backend.set_learning_phase(0)  # 0训练模式,1预测模式,之所以这么区分,是因为某些层在预测和训练时不同
    hdf5_model = load_model(model_hdf5_file)  # hdf5文件的加载
    # 获得当前图
    sess = tf.compat.v1.keras.backend.get_session()
    # 冻结图
    frozen_graph = freeze_session(sess, output_names=[hdf5_model.output.op.name])
    output_fld = './MODEL/'
    graph_io.write_graph(frozen_graph, output_fld, 'model.pb', as_text=False)
    print("*********pb保存完成**********")

    print("end!")


    # 补充1:
    # 参考:https://blog.csdn.net/happyhorizion/article/details/103849408

3 模型的预测

"""
	predict.py
"""
import train
# from train import build_model  # 函数写在文件最前面,其他内容放在if __name__ == "__main__":里面
import pickle
import os
import time
import cv2
import numpy as np
import math
from keras.models import model_from_json
from keras import backend as K
from mtcnn.mtcnn import MTCNN


def init(path):
    """
        模型加载方法1
    """
    # # (1)json模型结构加载
    # t1 = time.time()
    # model_json = open(os.path.join(path, 'model.json')).read()
    # model = model_from_json(model_json)
    # t2 = time.time()  # 睡眠函数time.sleep(1) 休眠一秒;t2-t1单位也是秒
    # print(t2-t1)  # 2.1138834953308105秒
    #
    # # (2)h5参数加载
    # model.load_weights(os.path.join(path, 'model_weight.h5'))
    # t3 = time.time()
    # print(t3-t2)  # 0.08873796463012695秒

    """
        模型加载方法2
    """
    model = train.build_model()
    model.save(os.path.join(path, 'model.hdf5'))
    t4 = time.time()
    # print(t4-t3)  # 0.1326446533203125秒

    """
        pickle文件保存,可保存字典、数组
    """
    # f1 = open(path + '/model/h2ws.pickle', 'wb')
    # pickle.dump(dict1, f3)  # dict1 -> array1
    # f1.close()
    # # pickle文件读取
    # f2 = open(os.path.join(path, 'fknown.pickle'), 'rb')
    # fknown = pickle.load(f2)
    return model


def filter_(value, max_value, min_value):
    if value > max_value:
        return max_value
    elif value < min_value:
        return min_value
    else:
        return value


def predict(model, img):
    img = cv2.resize(img, (box_size, box_size))
    image = img / 255.  # 预处理必须和训练一致
    result = model.predict(np.expand_dims(image, 0))
    label = np.argmax(result[0])
    return label


if __name__ == "__main__":
    path = "./MODEL/"
    test_path = "./TEST/"
    box_size = 224
    model = init(path)
    for path, dir_list, file_list in os.walk(test_path):
        for file in file_list:
            filename = os.path.join(path, file)
            img = cv2.imread(filename)
            label = predict(model, img)
            if label == 0:
                print(file + ": False")
            elif label == 1:
                print(file + ": True")
            else:
                print(file + "未检测到")



Guess you like

Origin blog.csdn.net/weixin_41874898/article/details/112002876