基于tensorflow+opencv+python的人脸识别demo

基于tensorflow+opencv+python的人脸识别项目

最近在用到一个功能,人脸识别用于会议场景,即如何实现人脸签到。想预研一下将人脸识别功能落地Android A311D

采集数据

采集数据使用的是一个USB摄像头,通过控制摄像头进行图像采集,当采集到的图像包含人脸的时候,就读取其中的人脸部分,并且将图像保存下来。这里采集的图像做了最初步的筛选,即确认是脸部照片了才采集,而且,为了保证后续的训练准确,建议将摄像头在不同位置,或者不同光照条件下拍摄。【本人做过实验,如果A脸一直是a角度采集,B脸一直是b角度采集,经过训练之后,识别的时候如果A脸使用b角度去识别,会被识别成B。当然这里用的是现成的,如果读者自己去训练分类器,或许鲁棒性更好】

import logging
import cv2
import sys
import numpy as np

# 第一步,创建一个logger
logger = logging.getLogger()
# Log等级总开关  此时是INFO
logger.setLevel(logging.INFO)  
# 第二步,创建一个handler,用于写入日志文件
logfile = './log.txt'
fh = logging.FileHandler(logfile, mode='a')  # open的打开模式这里可以进行参考
fh.setLevel(logging.DEBUG)  # 输出到file的log等级的开关
# 第三步,再创建一个handler,用于输出到控制台
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)   # 输出到console的log等级的开关
# 第四步,定义handler的输出格式(时间,文件,行数,错误级别,错误提示)
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# 第五步,将logger添加到handler里面
logger.addHandler(fh)
logger.addHandler(ch)
# 日志级别
#logger.debug('this is logger debug message')
#logger.info('this is logger info message')
#logger.warning('this is logger warning message')
#logger.error('this is logger error message')
#logger.critical('this is logger critical message')
# DEBUG:详细的信息,通常只出现在诊断问题上
# INFO:确认一切按预期运行
# WARNING(默认):一个迹象表明,一些意想不到的事情发生了,或表明一些问题在不久的将来(例如。磁盘空间低”)。这个软件还能按预期工作。
# ERROR:更严重的问题,软件没能执行一些功能
# CRITICAL:一个严重的错误,这表明程序本身可能无法继续运行
logger.info('---------------------------------------------')
logger.info('--------------------begin--------------------')
#cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
#classfier = cv2.CascadeClassifier("F:\VideoEncDec\OpenCV\opencv\build\etc\haarcascades\haarcascade_frontalface_alt2.xml")

#采集一个分类图像
def NewPICFromVideo(camera_idx, pic_category, new_pic_num, path):
    logger.info('--------------------get newpic--------------------')
    window_name = 'show you face before camera'
    cv2.namedWindow(window_name)

    cap = cv2.VideoCapture(camera_idx,cv2.CAP_DSHOW)
    #cap = cv2.VideoCapture("F:\\VideoEncDec\\img\\190204084208765161.mp4")
    print(cap)
    classfier = cv2.CascadeClassifier("F:\\VideoEncDec\\OpenCV\\opencv\\build\\etc\\haarcascades\\haarcascade_frontalface_alt2.xml")

    # 识别出人脸后要画的边框的颜色,RGB格式
    color = (0, 255, 0)

    # 循环获取摄像头帧
    num = 0
    while cap.isOpened():
        print(cap.isOpened())
        # 读取摄像头一帧数据
        ok, frame = cap.read() #读取一帧数据
        if not ok:
            break

        grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  #将当前桢图像转换成灰度图像
        # 人脸检测,1.2表示表示在前后两次相继的扫描中,搜索窗口的比例系数,默认为1.2即每次搜索窗口依次扩大20%;
        # 3表示构成检测目标的相邻矩形的最小个数
        faceRects = classfier.detectMultiScale(grey, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))
        print(faceRects)

        logger.info('--------------------get faces--------------------')
        if len(faceRects) > 0:
            for faceRect in faceRects:
                x, y, w, h = faceRect
                print(x, y, w, h, 'AAAAA')

                # 将当前帧保存为照片
                #path_name = path + "/" + pic_category + '/'
                #path_name = './data/hzg/'
                #img_name = '%s/%d.jpg'%(path_name, num)
                img_name = path + '/' + pic_category + '/' + str(num) + '.jpg'
                print(img_name)
                image = frame[y-10:y+h+10,x-10:x+w+10]
                if image.size != 0:
                    cv2.imwrite(img_name, image)
                    print('BBBBB')
                #path = path_name + "/" + pic_category
                #filename = '%s/%d.jpg'%(path, num)
                num+=1
                if (num > new_pic_num):
                    break

                cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), color, 2)

                # 显示当前捕捉到了多少人脸图片了,这样站在那里被拍摄时心里有个数,不用两眼一抹黑傻等着
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(frame, 'num:%d' % (num), (x + 30, y + 30), font, 1, (255, 0, 255), 4)

        if num > (new_pic_num): break

        # 显示图像
        cv2.imshow(window_name, frame)
        c = cv2.waitKey(10)
        if c & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
    logger.info('--------------------end--------------------')
    logger.info('---------------------------------------------')

'''
        if num > (new_pic_num): break
        cv2.imshow(window_name, frame)
        c = cv2.waitKey(10)
        if c & 0xFF == ord('q'):
            break
'''
if __name__ == '__main__':
    if len(sys.argv) != 1:
        NewPICFromVideo(0, 'hzg', 10, './data')
    else:
        NewPICFromVideo(0, 'hzg', 200, './data')

导入数据

将采集到的数据,resize成64*64大小的数据,并遍历分类里面的照片,保存到张量里面。

import os
import sys
import numpy as np
import cv2

IMAGE_SIZE = 64


# 按照指定图像大小调整尺寸
def resize_image(image, height=IMAGE_SIZE, width=IMAGE_SIZE):
    top, bottom, left, right = (0, 0, 0, 0)

    # 获取图像尺寸
    h, w, _ = image.shape

    # 对于长宽不相等的图片,找到最长的一边
    longest_edge = max(h, w)

    # 计算短边需要增加多上像素宽度使其与长边等长
    if h < longest_edge:
        dh = longest_edge - h
        top = dh // 2
        bottom = dh - top
    elif w < longest_edge:
        dw = longest_edge - w
        left = dw // 2
        right = dw - left
    else:
        pass

        # RGB颜色
    BLACK = [0, 0, 0]

    # 给图像增加边界,是图片长、宽等长,cv2.BORDER_CONSTANT指定边界颜色由value指定
    constant = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=BLACK)

    # 调整图像大小并返回
    return cv2.resize(constant, (height, width))


# 读取训练数据
images = []
labels = []


def read_path(path_name):
    for dir_item in os.listdir(path_name):
        # 从初始路径开始叠加,合并成可识别的操作路径
        # 在这里做了处理,方便移植,参数可以使用相对路径
        full_path = os.path.abspath(os.path.join(path_name, dir_item))
        print(full_path)

        if os.path.isdir(full_path):  # 如果是文件夹,继续递归调用
            read_path(full_path)
        else:  # 文件
            if dir_item.endswith('.jpg'):
                image = cv2.imread(full_path)       # 以多维数组np.array()的形式保存图片,image.shape返回(高度,宽度,通道数)
                print(image.shape)
                image = resize_image(image, IMAGE_SIZE, IMAGE_SIZE)     # 重组图片
                #print(image+'-----')

                # 放开这个代码,可以看到resize_image()函数的实际调用效果
                # cv2.imwrite('1.jpg', image)

                images.append(image)
                print(images)
                labels.append(path_name)
                print(labels)   # 这里保存的路劲没有jpg文件名,只有文件夹路径

    return images, labels


# 从指定路径读取训练数据
def load_dataset(path_name):
    images, labels = read_path(path_name)

    # 将输入的所有图片转成四维数组,尺寸为(图片数量*IMAGE_SIZE*IMAGE_SIZE*3)
    # hzg目录经过筛选之后又1973照片,IMAGE_SIZE为64,故对我来说尺寸为1973 * 64 * 64 * 3
    # 图片为64 * 64像素,一个像素3个颜色值(RGB)
    images = np.array(images)
    print(images.shape)

    # 标注数据,'hzg'文件夹下都是我的脸部图像,全部指定为0,其他文件夹下是非本人的照片的,全部指定为1
    labels = np.array([0 if label.endswith('hzg') else 1 for label in labels])

    return images, labels


if __name__ == '__main__':
    if len(sys.argv) != 1:
        print("Usage:%s path_name\r\n" % (sys.argv[0]))
    else:
        # images, labels = load_dataset("C:\\Users\\10257818\\PycharmProjects\\facedetc\\venv\\data\\hzg")
        # images, labels = load_dataset("C:\\Users\\10257818\\PycharmProjects\\facedetc\\venv\\data")
        images, labels = load_dataset("C:\\Users\\10257818\\PycharmProjects\\facedetc\\venv\\sample")


训练模型

=。= 这一步问题好多,踩雷很多。。。。
Pycharm可以支持conda,在conda中设置环境,将环境中pip的包,可以导入在Pycharm中,直接使用,解决了很多import错误。具体可以参考文献6。
发现在安装keras之后,会报错
from Keras.layers import Dense, Dropout, Activation, Flatten
ModuleNotFoundError: No module named ‘Keras’
参考了文章https://blog.csdn.net/weixin_40405758/article/details/88094405,发现是tensorflow包含支持了keras,引用的时候,直接从tf中import keras。
还有很多问题本地记录了,后续有机会整理一下。

import random
import logging
import numpy as np
#from sklearn.cross_validation import train_test_split       # 导入图像交叉验证模块,模块改名为下面的模块
from sklearn.model_selection import train_test_split       # 导入图像交叉验证模块
from keras.preprocessing.image import ImageDataGenerator    # 导入图像预处理模块中导入图像生成器
#from Keras.models import Sequential                         # 两种模型,导入顺序模型 报错,引用了文章https://blog.csdn.net/weixin_40405758/article/details/88094405
#from Keras.layers import Dense, Dropout, Activation, Flatten    # https://keras.io/api/layers/  全连接 防过拟合 激活层 降维层
#from Keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow import keras                #
#from tensorflow.keras import Sequential     #TypeError: The added layer must be an instance of class Layer. Found: <keras.layers.convolutional.Conv2D object at 0x0000025C68ECCEF0>
from keras.models import Sequential # 解决上面的问题 改了源码 草稿修改
from keras.layers import Dense, Dropout, Activation, Flatten    # 全链接层,
from keras.layers import Convolution2D, MaxPooling2D        # 卷积 & 池化
from keras.optimizers import SGD        # SGD随机梯度下降法-优化器
from keras.utils import np_utils        # 计算工具包
from keras.models import load_model     # 载入权重模块 json或者HDF5
from keras import backend as K          # 后端,用来抽象 tensorflow或者Theano,使工程可以在两个框架上跑

from load_dataset import load_dataset, resize_image, IMAGE_SIZE



# 第一步,创建一个logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)  # Log等级总开关  此时是INFO
logger.info('this is logger info message')
# 第二步,创建一个handler,用于写入日志文件
logfile = './log.txt'
fh = logging.FileHandler(logfile, mode='a')  # open的打开模式这里可以进行参考
fh.setLevel(logging.DEBUG)  # 输出到file的log等级的开关
# 第三步,再创建一个handler,用于输出到控制台
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)   # 输出到console的log等级的开关
# 第四步,定义handler的输出格式(时间,文件,行数,错误级别,错误提示)
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# 第五步,将logger添加到handler里面
logger.addHandler(fh)
logger.addHandler(ch)

class Dataset:
    def __init__(self, path_name):      # 谁调用,表示谁--self
        # 训练集
        self.train_images = None
        self.train_labels = None

        # 验证集
        self.valid_images = None
        self.valid_labels = None

        # 测试集
        self.test_images = None
        self.test_labels = None

        # 数据集加载路径
        self.path_name = path_name

        # 当前库采用的维度顺序
        self.input_shape = None

    # 加载数据集并按照交叉验证的原则划分数据集并进行相关预处理工作
    def load(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE,
             img_channels=3, nb_classes=2):
        # 加载数据集到内存
        images, labels = load_dataset(self.path_name)
        print(images)
        print(images.shape)
        print(labels)
        print(labels.shape)
        # print('-----' + images + '-----')
        # print('-----' + images.shape + '-----')
        # print('-----' + labels + '-----')
        # print('-----' + labels.shape + '-----')
        # 将数据进行分类
        train_images, valid_images, train_labels, valid_labels = train_test_split(images, labels, test_size=0.3,
                                                                                  random_state=random.randint(0, 100))
        _, test_images, _, test_labels = train_test_split(images, labels, test_size=0.5,
                                                          random_state=random.randint(0, 100))

        # 当前的维度顺序如果为'th',则输入图片数据时的顺序为:channels,rows,cols,,对应channels_first,
        # 否则'tf':rows,cols,channels,对应channels_last
        # 这部分代码就是根据keras库要求的维度顺序重组训练数据集
        if K.image_data_format() == 'channels_first':      # 三位图像显示问题---‘th’代表theano,样本数,通道数,宽,高
            train_images = train_images.reshape(train_images.shape[0], img_channels, img_rows, img_cols)
            valid_images = valid_images.reshape(valid_images.shape[0], img_channels, img_rows, img_cols)
            test_images = test_images.reshape(test_images.shape[0], img_channels, img_rows, img_cols)
            self.input_shape = (img_channels, img_rows, img_cols)
        else:                                   # # 三位图像显示问题---‘tf表tensorflow,样本数,宽,高,通道数
            train_images = train_images.reshape(train_images.shape[0], img_rows, img_cols, img_channels)
            valid_images = valid_images.reshape(valid_images.shape[0], img_rows, img_cols, img_channels)
            test_images = test_images.reshape(test_images.shape[0], img_rows, img_cols, img_channels)
            self.input_shape = (img_rows, img_cols, img_channels)
### ‘tf’ [[[[255,255,255],[255,255,255]...],[[[255,255,255],...[255,255,255]]]  shape(1973,64,64,3)
            # 输出训练集、验证集、测试集的数量
            print(train_images.shape[0], 'train samples')   # 训练样本数
            print(valid_images.shape[0], 'valid samples')   # 验证样本数
            print(test_images.shape[0], 'test samples')     # 测试样本数

            # 我们的模型使用categorical_crossentropy作为损失函数,因此需要根据类别数量nb_classes将
            # 类别标签进行one-hot编码使其向量化,在这里我们的类别只有两种,经过转化后标签数据变为二维
            train_labels = np_utils.to_categorical(train_labels, nb_classes)
            valid_labels = np_utils.to_categorical(valid_labels, nb_classes)
            test_labels = np_utils.to_categorical(test_labels, nb_classes)

            # 像素数据浮点化以便归一化
            # 生成热编码数组,之前lables已经转换成0或者1了,-----目前还没有采集别人的照片-----
            train_images = train_images.astype('float32')
            valid_images = valid_images.astype('float32')
            test_images = test_images.astype('float32')

            # 将其归一化,图像的各像素值归一化到0~1区间            -----  归一化  -----
            train_images /= 255
            valid_images /= 255
            test_images /= 255

            self.train_images = train_images
            self.valid_images = valid_images
            self.test_images = test_images
            self.train_labels = train_labels
            self.valid_labels = valid_labels
            self.test_labels = test_labels
            print(test_labels)


# CNN网络模型类
class Model:
    def __init__(self):
        self.model = None

    # 建立模型
    def build_model(self, dataset, nb_classes=2):   # 数据输入是一个 1977/3/64/64,种类2个
        # 构建一个空的网络模型,它是一个线性堆叠模型,各神经网络层会被顺序添加,专业名称为序贯模型或线性堆叠模型
        self.model = Sequential()

        # 以下代码将顺序添加CNN网络需要的各层,一个add就是一个网络层
        #self.model.add(Convolution2D(32, 3, 3, border_mode='same',
                                     #input_shape=dataset.input_shape))  # 1 2维卷积层
        self.model.add(Convolution2D(32, 3, padding='same', input_shape=dataset.input_shape))  # 1 2维卷积层
        self.model.add(Activation('relu'))  # 2 激活函数层

        self.model.add(Convolution2D(32, 3))  # 3 2维卷积层
        self.model.add(Activation('relu'))  # 4 激活函数层

        self.model.add(MaxPooling2D(pool_size=(2, 2)))  # 5 池化层
        self.model.add(Dropout(0.25))  # 6 Dropout层

        #self.model.add(Convolution2D(64, 3, 3, border_mode='same'))  # 7  2维卷积层
        self.model.add(Convolution2D(64, 3, padding='same'))  # 7  2维卷积层
        self.model.add(Activation('relu'))  # 8  激活函数层

        self.model.add(Convolution2D(64, 3))  # 9  2维卷积层
        self.model.add(Activation('relu'))  # 10 激活函数层

        self.model.add(MaxPooling2D(pool_size=(2, 2)))  # 11 池化层
        self.model.add(Dropout(0.25))  # 12 Dropout层

        self.model.add(Flatten())  # 13 Flatten层        扁平化,将多维数据降维为1维
        self.model.add(Dense(512))  # 14 Dense层,又被称作全连接层
        self.model.add(Activation('relu'))  # 15 激活函数层
        self.model.add(Dropout(0.5))  # 16 Dropout层
        self.model.add(Dense(nb_classes))  # 17 Dense层
        self.model.add(Activation('softmax'))  # 18 分类层,输出最终结果

        # 输出模型概况
        self.model.summary()

    # 训练模型
    def train(self, dataset, batch_size=20, nb_epoch=10, data_augmentation=True):   # batch_size 一次训练所选取的样本数
        sgd = SGD(lr=0.01, decay=1e-6,                                              # nb_epoch 迭代次数
                  momentum=0.9, nesterov=True)  # 采用SGD+momentum的优化器进行训练,首先生成一个优化器对象
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=sgd,
                           metrics=['accuracy'])  # 完成实际的模型配置工作

        # 不使用数据提升,所谓的提升就是从我们提供的训练数据中利用旋转、翻转、加噪声等方法创造新的
        # 训练数据,有意识的提升训练数据规模,增加模型训练量
        if not data_augmentation:
            self.model.fit(dataset.train_images,
                           dataset.train_labels,
                           batch_size=batch_size,
                           nb_epoch=nb_epoch,
                           validation_data=(dataset.valid_images, dataset.valid_labels),
                           shuffle=True)
        # 使用实时数据提升
        else:
            # 定义数据生成器用于数据提升,其返回一个生成器对象datagen,datagen每被调用一
            # 次其生成一组数据(顺序生成),节省内存,其实就是python的数据生成器
            datagen = ImageDataGenerator(
                featurewise_center=False,  # 是否使输入数据去中心化(均值为0),
                samplewise_center=False,  # 是否使输入数据的每个样本均值为0
                featurewise_std_normalization=False,  # 是否数据标准化(输入数据除以数据集的标准差)
                samplewise_std_normalization=False,  # 是否将每个样本数据除以自身的标准差
                zca_whitening=False,  # 是否对输入数据施以ZCA白化
                rotation_range=20,  # 数据提升时图片随机转动的角度(范围为0~180)
                width_shift_range=0.2,  # 数据提升时图片水平偏移的幅度(单位为图片宽度的占比,0~1之间的浮点数)
                height_shift_range=0.2,  # 同上,只不过这里是垂直
                horizontal_flip=True,  # 是否进行随机水平翻转
                vertical_flip=False)  # 是否进行随机垂直翻转

            # 计算整个训练样本集的数量以用于特征值归一化、ZCA白化等处理
            datagen.fit(dataset.train_images)

            # 利用生成器开始训练模型
            self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels,
                                                  batch_size=batch_size),
                                     samples_per_epoch=dataset.train_images.shape[0],
                                     nb_epoch=nb_epoch,
                                     verbose=0,
                                     validation_data=(dataset.valid_images, dataset.valid_labels))

    #MODEL_PATH = './hzg&zh-sample.face.model.h5'      # 模型路径
    MODEL_PATH = './hzg&zh.face.model.h5'  # 模型路径
    def save_model(self, file_path=MODEL_PATH):     # 保存方法
        self.model.save(file_path)

    def load_model(self, file_path=MODEL_PATH):     # 加载方法
        self.model = load_model(file_path)

    def evaluate(self, dataset):                    # 评估方法
        score = self.model.evaluate(dataset.test_images, dataset.test_labels, verbose=1)
        print("%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100))

    # 识别人脸
    def face_predict(self, image):
        # Keras版本差异 以前的K.image_dim_ordering() == 'th' 现在的image_data_format() == "channels_first"
        # 依然是根据后端系统确定维度顺序
        if K.image_data_format() == 'channels_first' and image.shape != (1, 3, IMAGE_SIZE, IMAGE_SIZE):
            image = resize_image(image)  # 尺寸必须与训练集一致都应该是IMAGE_SIZE x IMAGE_SIZE
            image = image.reshape((1, 3, IMAGE_SIZE, IMAGE_SIZE))  # 与模型训练不同,这次只是针对1张图片进行预测
        elif K.image_data_format() == 'channels_last' and image.shape != (1, IMAGE_SIZE, IMAGE_SIZE, 3):
            image = resize_image(image)
            image = image.reshape((1, IMAGE_SIZE, IMAGE_SIZE, 3))

        # 浮点并归一化
        image = image.astype('float32')
        image /= 255

        # 给出输入属于各个类别的概率,我们是二值类别,则该函数会给出输入图像属于0和1的概率各为多少
        result = self.model.predict_proba(image)
        print('result:', result)

        # 给出类别预测:0或者1
        result = self.model.predict_classes(image)

        # 返回类别预测结果
        return result[0]


if __name__ == '__main__':
    dataset = Dataset('./sample/')
    dataset.load()

    model = Model()
    model.build_model(dataset)

    # 先前添加的测试build_model()函数的代码
    model.build_model(dataset)

    # 测试训练函数的代码
    model.train(dataset)

if __name__ == '__main__':
    dataset = Dataset('./sample/')
    dataset.load()

    model = Model()
    model.build_model(dataset)
    model.train(dataset)
    model.save_model(file_path='./hzg&zh.face.model.h5')
    #model.save_model(file_path='./hzg&zh.face.model.h5')

if __name__ == '__main__':
    dataset = Dataset('./data/')
    dataset.load()

    # 评估模型
    model = Model()
    model.load_model(file_path='./hzg&zh.face.model.h5')
    #model.load_model(file_path='./hzg&zh.face.model.h5')
    model.evaluate(dataset)

人脸识别

训练好自己的数据之后,就可以去识别了。识别程序比较简单,就加载之前训练好的模型,读取USB摄像头的帧,灰度之后判断人脸,然后再判断是谁。这里只是做了一个分类,A是本人,B是另一个同事,只取了一个同事的照片作为非的对照。

import cv2
import sys
import gc
from face_train import Model

def zh_ch(string):
	return string.encode("gbk").decode(errors="ignore")

if __name__ == '__main__':
    if len(sys.argv) != 1:
        print("Usage:%s camera_id\r\n" % (sys.argv[0]))
        sys.exit(0)

    # 加载模型
    model = Model()
    model.load_model(file_path='././hzg&zh.face.model.h5')
    #model.load_model(file_path='././hzg&zh.face.model.h5')

    # 框住人脸的矩形边框颜色
    color = (0, 255, 0)

    # 捕获指定摄像头的实时视频流
    cap = cv2.VideoCapture(0)

    # 人脸识别分类器本地存储路径
    cascade_path = "F:\VideoEncDec\OpenCV\opencv\\build\etc\haarcascades\haarcascade_frontalface_alt2.xml"

    # 循环检测识别人脸
    while True:
        ret, frame = cap.read()  # 读取一帧视频

        if ret is True:

            # 图像灰化,降低计算复杂度
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        else:
            continue
        # 使用人脸识别分类器,读入分类器
        cascade = cv2.CascadeClassifier(cascade_path)

        # 利用分类器识别出哪个区域为人脸
        faceRects = cascade.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))
        if len(faceRects) > 0:
            for faceRect in faceRects:
                x, y, w, h = faceRect

                # 截取脸部图像提交给模型识别这是谁
                image = frame[y - 10: y + h + 10, x - 10: x + w + 10]
                faceID = model.face_predict(image)

                # 如果是“我”
                if faceID == 0:
                    cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), color, thickness=2)

                    # 文字提示是谁
                    cv2.putText(frame, 'hzg',
                                (x + 30, y + 30),  # 坐标
                                cv2.FONT_HERSHEY_SIMPLEX,  # 字体
                                1,  # 字号
                                (255, 0, 255),  # 颜色
                                2)  # 字的线宽
                else:
                    #pass
                    cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), color, thickness=2)

                    # 文字提示是谁
                    cv2.putText(frame, 'zh',
                                (x + 30, y + 30),  # 坐标
                                cv2.FONT_HERSHEY_SIMPLEX,  # 字体
                                1,  # 字号
                                (255, 255, 0),  # 颜色
                                2)  # 字的线宽
        cv2.imshow("recognize hzg or zh", frame)

        # 等待10毫秒看是否有按键输入
        k = cv2.waitKey(10)
        # 如果输入q则退出循环
        if k & 0xFF == ord('q'):
            break

    # 释放摄像头并销毁所有窗口
    cap.release()
    cv2.destroyAllWindows()

到这里了就差不多了,调试识别程序,可以看到验证的结果,还是蛮有意思的~~下面是部分识别结果,

result: [[0.9915513  0.00844877]]
result: [[0.99243504 0.0075649 ]]
result: [[0.9969267  0.00307327]]
result: [[0.99651885 0.00348113]]

需要将训练的模型应用于android,尝试将.h5文件转换成.pb文件

import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2

def h5_to_pb(h5_save_path):

    model = tf.keras.models.load_model(h5_save_path, compile=False)

    model.summary()

    full_model = tf.function(lambda Input: model(Input))

    full_model = full_model.get_concrete_function(tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype))



    # Get frozen ConcreteFunction

    frozen_func = convert_variables_to_constants_v2(full_model)

    frozen_func.graph.as_graph_def()



    layers = [op.name for op in frozen_func.graph.get_operations()]

    print("-" * 50)

    print("Frozen model layers: ")

    for layer in layers:

        print(layer)



    tf.io.write_graph(graph_or_graph_def=frozen_func.graph,

                      logdir="./frozen_models3",

                      name="model.pb",

                      as_text=False)

h5_to_pb("./hzg&zh.face.model.h5")

转换输出为

C:\ProgramData\Anaconda3\envs\py37\python.exe C:/Users/10257818/PycharmProjects/facedetc/venv/h5topb.py
2022-06-07 15:23:02.888693: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX AVX2
Model: "sequential_3"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_9 (Conv2D)            (None, 64, 64, 32)        896       
_________________________________________________________________
activation_13 (Activation)   (None, 64, 64, 32)        0         
_________________________________________________________________
conv2d_10 (Conv2D)           (None, 62, 62, 32)        9248      
_________________________________________________________________
activation_14 (Activation)   (None, 62, 62, 32)        0         
_________________________________________________________________
max_pooling2d_5 (MaxPooling2 (None, 31, 31, 32)        0         
_________________________________________________________________
dropout_7 (Dropout)          (None, 31, 31, 32)        0         
_________________________________________________________________
conv2d_11 (Conv2D)           (None, 31, 31, 64)        18496     
_________________________________________________________________
activation_15 (Activation)   (None, 31, 31, 64)        0         
_________________________________________________________________
conv2d_12 (Conv2D)           (None, 29, 29, 64)        36928     
_________________________________________________________________
activation_16 (Activation)   (None, 29, 29, 64)        0         
_________________________________________________________________
max_pooling2d_6 (MaxPooling2 (None, 14, 14, 64)        0         
_________________________________________________________________
dropout_8 (Dropout)          (None, 14, 14, 64)        0         
_________________________________________________________________
flatten_3 (Flatten)          (None, 12544)             0         
_________________________________________________________________
dense_5 (Dense)              (None, 512)               6423040   
_________________________________________________________________
activation_17 (Activation)   (None, 512)               0         
_________________________________________________________________
dropout_9 (Dropout)          (None, 512)               0         
_________________________________________________________________
dense_6 (Dense)              (None, 2)                 1026      
_________________________________________________________________
activation_18 (Activation)   (None, 2)                 0         
=================================================================
Total params: 6,489,634
Trainable params: 6,489,634
Non-trainable params: 0
_________________________________________________________________
2022-06-07 15:23:03.224513: I tensorflow/core/grappler/devices.cc:60] Number of eligible GPUs (core count >= 8, compute capability >= 0.0): 0 (Note: TensorFlow was not compiled with CUDA support)
2022-06-07 15:23:03.225656: I tensorflow/core/grappler/clusters/single_machine.cc:356] Starting new session
2022-06-07 15:23:03.240499: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:814] Optimization results for grappler item: graph_to_optimize
2022-06-07 15:23:03.240800: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:816]   function_optimizer: function_optimizer did nothing. time = 0.002ms.
2022-06-07 15:23:03.241083: I tensorflow/core/grappler/optimizers/meta_optimizer.cc:816]   function_optimizer: function_optimizer did nothing. time = 0ms.
--------------------------------------------------
Frozen model layers: 
Input
sequential_3/conv2d_9/Conv2D/ReadVariableOp/resource
sequential_3/conv2d_9/Conv2D/ReadVariableOp
sequential_3/conv2d_9/Conv2D
sequential_3/conv2d_9/BiasAdd/ReadVariableOp/resource
sequential_3/conv2d_9/BiasAdd/ReadVariableOp
sequential_3/conv2d_9/BiasAdd
sequential_3/activation_13/Relu
sequential_3/conv2d_10/Conv2D/ReadVariableOp/resource
sequential_3/conv2d_10/Conv2D/ReadVariableOp
sequential_3/conv2d_10/Conv2D
sequential_3/conv2d_10/BiasAdd/ReadVariableOp/resource
sequential_3/conv2d_10/BiasAdd/ReadVariableOp
sequential_3/conv2d_10/BiasAdd
sequential_3/activation_14/Relu
sequential_3/max_pooling2d_5/MaxPool
sequential_3/dropout_7/Identity
sequential_3/conv2d_11/Conv2D/ReadVariableOp/resource
sequential_3/conv2d_11/Conv2D/ReadVariableOp
sequential_3/conv2d_11/Conv2D
sequential_3/conv2d_11/BiasAdd/ReadVariableOp/resource
sequential_3/conv2d_11/BiasAdd/ReadVariableOp
sequential_3/conv2d_11/BiasAdd
sequential_3/activation_15/Relu
sequential_3/conv2d_12/Conv2D/ReadVariableOp/resource
sequential_3/conv2d_12/Conv2D/ReadVariableOp
sequential_3/conv2d_12/Conv2D
sequential_3/conv2d_12/BiasAdd/ReadVariableOp/resource
sequential_3/conv2d_12/BiasAdd/ReadVariableOp
sequential_3/conv2d_12/BiasAdd
sequential_3/activation_16/Relu
sequential_3/max_pooling2d_6/MaxPool
sequential_3/dropout_8/Identity
sequential_3/flatten_3/Const
sequential_3/flatten_3/Reshape
sequential_3/dense_5/MatMul/ReadVariableOp/resource
sequential_3/dense_5/MatMul/ReadVariableOp
sequential_3/dense_5/MatMul
sequential_3/dense_5/BiasAdd/ReadVariableOp/resource
sequential_3/dense_5/BiasAdd/ReadVariableOp
sequential_3/dense_5/BiasAdd
sequential_3/activation_17/Relu
sequential_3/dropout_9/Identity
sequential_3/dense_6/MatMul/ReadVariableOp/resource
sequential_3/dense_6/MatMul/ReadVariableOp
sequential_3/dense_6/MatMul
sequential_3/dense_6/BiasAdd/ReadVariableOp/resource
sequential_3/dense_6/BiasAdd/ReadVariableOp
sequential_3/dense_6/BiasAdd
sequential_3/activation_18/Softmax
Identity

Process finished with exit code 0

转换前后的模型图
在这里插入图片描述

在这里插入图片描述

https://www.cnblogs.com/shihuc/p/6593041.html 【源码参考】
参考文档:
1、https://keras.io/zh/getting-started/faq/#_4
2、https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html 【conda官方文档】
3、https://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy 【常用库下载地址】
4、https://pypi.tuna.tsinghua.edu.cn/simple/ 【清华库下载地址】
5、https://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy 【Numpy地址】
6、https://www.jetbrains.com/help/pycharm/conda-support-creating-conda-virtual-environment.html#Conda_Support__Creating_Conda_Virtual_Environment-4-procedure 【Pycharm白皮书】

猜你喜欢

转载自blog.csdn.net/qq_38750519/article/details/112396057
今日推荐