C/S架构的人脸识别系统----初级版

多线程处理客户端连接

客户端负责人脸采集和人脸注册功能;

服务器负责人脸数据集训练和人脸识别功能。

信息交互采用字节形式。

【后期可加入数据库、客户端收发线程实现多平台操作】

fs_server.py

import cv2
import json
import time
import os,sys
import socket
import pyttsx3
import threading
import numpy as np
from PIL import Image

#服务器端负责实际的训练人脸集以及识别任务

# 创建一个socket:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#绑定端口号
s.bind(('127.0.0.1', 3333))

#首次启动环境创建
def makeDir(engine):
    flag= 0
    if not os.path.exists("face_trainer"):
        print("创建预训练环境")
        engine.say('检测到第一次启动,未检测到环境,正在创建环境')
        engine.say('正在创建预训练环境')
        os.mkdir("face_trainer")
        engine.say('创建成功')
        engine.runAndWait()
        flag=1
    if not os.path.exists("Facedata"):
        print("创建训练环境")
        engine.say('正在创建训练环境')
        os.mkdir("Facedata")
        engine.say('创建成功')
        engine.runAndWait()
        flag=1
    return flag
#语音模块
def say(engine,str):
    engine.say(str)
    engine.runAndWait()
#初始化
names = []
if os.path.exists("name.txt"):
    with open("name.txt") as f:
        names = json.loads(f.read())
        # print(names)
engine = pyttsx3.init()
rate = engine.getProperty('rate')
engine.setProperty('rate', rate - 20)
#是否首次启动,若首次启动则直接提示录入人脸或退出
flag = makeDir(engine)

# 监听端口:
s.listen(5)
print('Waiting for connection...')

def getImagesAndLabels(path, detector):
    imagePaths = [os.path.join(path, f) for f in os.listdir(path)]  # join函数的作用?
    faceSamples = []
    ids = []
    for imagePath in imagePaths:
        PIL_img = Image.open(imagePath).convert('L')  # convert it to grayscale
        img_numpy = np.array(PIL_img, 'uint8')
        id = int(os.path.split(imagePath)[-1].split(".")[1])
        faces = detector.detectMultiScale(img_numpy)
        for (x, y, w, h) in faces:
            faceSamples.append(img_numpy[y:y + h, x: x + w])
            ids.append(id)
    return faceSamples, ids

def trainFace():
    # 人脸数据路径
    path = 'Facedata'
    recognizer = cv2.face.LBPHFaceRecognizer_create()
    detector = cv2.CascadeClassifier(r"F:\npyWorkspace\venv\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml")
    print('Training faces. It will take a few seconds. Wait ...')
    faces, ids = getImagesAndLabels(path,detector)
    recognizer.train(faces, np.array(ids))
    recognizer.write(r'face_trainer\trainer.yml')
    print("{0} faces trained. Exiting Program".format(len(np.unique(ids))))

def tcplink(sock, addr):
    #连接客户端
    print('Accept new connection from %s:%s...' % addr)
    sock.send(b'Welcome!')

    while True:
        recvbuf = sock.recv(1024)
        if not recvbuf or recvbuf.decode('utf-8') == 'exit':
            break

        if recvbuf.decode('utf-8') == 'flag':
            global flag
            # print(flag)
            if flag == 1:
                sendbuf = "1"
                sock.send(sendbuf.encode('utf-8'))
                flag = 0

            else:
                sendbuf = "0"
                sock.send(sendbuf.encode('utf-8'))

        if recvbuf.decode('utf-8') == 'train':
            trainFace()
            sock.send(b'train success')

    sock.close()

    print('Connection from %s:%s closed.' % addr)

while True:
    # 接受一个新连接:
    sock, addr = s.accept()
    # 创建新线程来处理TCP连接:
    t = threading.Thread(target=tcplink, args=(sock, addr))
    t.start()

fs_client.py

import os
import cv2
import sys
import json
import time
import socket
import pyttsx3
import threading
import numpy as np
from PIL import Image

#客户端承担人脸采集识别和人脸注册两大职责

#创建socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 建立本地连接:
s.connect(('127.0.0.1', 3333))

#语音模块
def say(engine,str):
    engine.say(str)
    engine.runAndWait()
#初始化
names = []
if os.path.exists("name.txt"):
    with open("name.txt") as f:
        names = json.loads(f.read())
        # print(names)
engine = pyttsx3.init()
rate = engine.getProperty('rate')
engine.setProperty('rate', rate - 20)

# 接收欢迎消息:
print(s.recv(1024).decode('utf-8'))
say(engine,"欢迎进入人脸识别系统")

def getFace(cap,face_id):
    face_detector = cv2.CascadeClassifier(r'F:\npyWorkspace\venv\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml')
    print('\n Initializing face capture. Look at the camera and wait ...')
    count = 0
    while True:
        # 从摄像头读取图片
        sucess, img = cap.read()
        # 转为灰度图片
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # 检测人脸
        faces = face_detector.detectMultiScale(gray, 1.3, 5)
        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x+w, y+w), (255, 0, 0))
            count += 1
            # 保存图像
            cv2.imwrite("Facedata/User." + str(face_id) + '.' + str(count) + '.jpg', gray[y: y + h, x: x + w])
            cv2.imshow('image', img)
        # 保持画面的持续。
        k = cv2.waitKey(1)
        if k == 27:   # 通过esc键退出摄像
            break
        elif count >= 100:  # 得到1000个样本后退出摄像
            break
    cam.release()
    cv2.destroyAllWindows()

def checkFace(cam,names,engine):
    recognizer = cv2.face.LBPHFaceRecognizer_create()
    recognizer.read('face_trainer/trainer.yml')
    cascadePath = r"F:\npyWorkspace\venv\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml"
    faceCascade = cv2.CascadeClassifier(cascadePath)
    font = cv2.FONT_HERSHEY_SIMPLEX
    idnum = 0
    #names = ['zongyong', 'zhangmin', 'shanglanqing']
    #cam = cv2.VideoCapture(0)
    minW = 0.1 * cam.get(3)
    minH = 0.1 * cam.get(4)
    while True:
        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.2,
            minNeighbors=5,
            minSize=(int(minW), int(minH))
        )
        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
            idnum, confidence = recognizer.predict(gray[y:y + h, x:x + w])
            if confidence < 100:
                idnum = names[idnum]
                confidence = "{0}%".format(round(100 - confidence))
                print("欢迎  "+idnum+"签到成功!\n")
                say(engine, "欢迎    "+idnum+"签到成功!")
                # cv2.imshow('camera', img)
                # time.sleep(2)
                # os.system("pause")
                cam.release()
                return
            else:
                idnum = "unknown"
                confidence = "{0}%".format(round(100 - confidence))
                print("对不起,未识别到!\n");

            cv2.putText(img, str(idnum), (x + 5, y - 5), font, 1, (0, 0, 255), 1)
            cv2.putText(img, str(confidence), (x + 5, y + h - 5), font, 1, (0, 0, 0), 1)
        cv2.imshow('camera', img)
        k = cv2.waitKey(10)
        if k == 27:
            break
    cam.release()
    cv2.destroyAllWindows()

while True:
    #是否具备训练环境
    sendbuf = "flag"
    s.send(sendbuf.encode('utf-8'))
    time.sleep(1)
    recvbuf = s.recv(1024).decode('utf-8')
    time.sleep(1)
    # print(recvbuf)

    if recvbuf == '1':#首次
        say(engine,"新人脸信息录入 或者 退出")
        value = input("1:录入  or other:退出\n")
        if value == '1':
            say(engine, "请输入您的姓名,注意要写成拼音形式")
            name = input("请输入姓名:")
            names.append(name)
            say(engine, "正在打开摄像头")
            cam = cv2.VideoCapture(0)
            say(engine, "注视摄像头,开始采集人脸数据")
            getFace(cam, len(names) - 1)
            say(engine,"采集完毕,成功上传系统训练")
            s.send((b'train'))
            print(s.recv(1024).decode('utf-8'))
        else:
            # 将姓名保存到文件
            with open("name.txt", 'w') as f:
                f.write(json.dumps(names))
            say(engine, "信息已保存")
            say(engine, "再见")
            s.send(b'exit')
            break
            # sys.exit(0)
    else:
        say(engine,"请选择系统功能")
        value = input("1:录入新的人脸信息 2:人脸识别签到 0:退出\n")
        if value == '1':
            say(engine, "请输入您的姓名,注意要写成拼音形式")
            name = input("请输入姓名:")
            names.append(name)
            say(engine, "正在打开摄像头")
            cam = cv2.VideoCapture(0)
            say(engine, "注视摄像头,开始采集人脸数据")
            getFace(cam, len(names) - 1)
            say(engine,"采集完毕,成功上传系统训练")
            s.send((b'train'))
            print(s.recv(1024).decode('utf-8'))
        elif value == '2':
            say(engine, "开始人脸识别")
            say(engine, "正在打开摄像头")
            cam = cv2.VideoCapture(0)
            checkFace(cam, names, engine)
        else:
            # 将姓名保存到文件
            with open("name.txt", 'w') as f:
                f.write(json.dumps(names))
            say(engine, "信息已保存")
            say(engine, "再见")
            s.send(b'exit')
            break
            # sys.exit(0)
# 关闭网络连接
s.close()

猜你喜欢

转载自blog.csdn.net/lidengdengter/article/details/94962182