Python+Qt face recognition employee entry management system

 Featured program examples

Python+Qt face recognition employee entry management system

If you need to install the operating environment or remote debugging, see the personal QQ business card at the bottom of the article, and professional and technical personnel will assist remotely!

foreword

This blog writes code for <<Python+Qt Face Recognition Employee Entry Management System>>, the code is neat, regular and easy to read. The first choice for learning and application recommendation.


Article directory

1. Required tool software

2. Use steps

        1. Import library

        2. Code implementation

       3. Running results

3. Online assistance

1. Required tool software

1. Python

2. Qt, OpenCV

2. Use steps

1. Import library

## coding:utf-8
import sys
import os
import csv
import cv2
from untitled import Ui_mainWindow
import record
import name
from dbase import Record2
from PyQt5 import QtWidgets

from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *

2. Code implementation

code show as below:

class myWin(QtWidgets.QMainWindow, Ui_mainWindow):

    def __init__(self):
        super(myWin, self).__init__()
        self.setupUi(self)

        self.pushButton_4.clicked.connect(self.onVideo) 
        self.open_flag = False  
        self.painter = QPainter(self)  

        self.pushButton.clicked.connect(self.openFileButton)
        self.pushButton_2.clicked.connect(self.open_name_ui)
        self.pushButton_7.clicked.connect(self.train)
        #self.pushButton_6.clicked.connect(self.faceRecog)
        self.pushButton_3.clicked.connect(self.open_second_ui)

        self.pushButton_5.clicked.connect(self.threadRun)
        self.pushButton_6.clicked.connect(self.switch_video)
        self.timer2 = VideoTimer()
        self.timer2.timeSignal.signal[str].connect(self.videoRecog2)

        self.pushButton_8.clicked.connect(self.recogConform)
        self.pushButton.hide()
        self.pushButton_4.hide()

    def threadRun(self):
        # thread1.start()
        if self.pushButton_5.text() == "模型初始化thread":
            threadSetup()
            # thread1.start()
            self.pushButton_5.setText("停止模型thread")

        elif self.pushButton_5.text() == "停止模型thread":
            # stop_thread(thread1)
            # stop_thread(threadT)
            threadStop()
            print("tttt6")
            self.pushButton_5.setText("模型初始化thread")

    def closeEvent2(self, event):
        self.box = QMessageBox(QMessageBox.Warning, "系统提示信息", "是否暂停摄像头?")
        qyes = self.box.addButton(self.tr("是"), QMessageBox.YesRole)
        qno = self.box.addButton(self.tr("否"), QMessageBox.NoRole)
        self.box.exec_()
        if self.box.clickedButton() == qyes:
            self.label.clear()
            while 1:
                if cv2.waitKey(1) == ord('q'):
                    break
            self.cap.release()
            cv2.destroyAllWindows()
        else:
            event.ignore()

    def closethreed(self):
        print("test")

    # # 退出系统窗口 X 绑定函数事件
    def closeEvent(self, event):
        # print("test")
        self.box = QMessageBox(QMessageBox.Warning, "系统提示信息", "是否退出系统?")
        qyes = self.box.addButton(self.tr("是"), QMessageBox.YesRole)
        qno = self.box.addButton(self.tr("否"), QMessageBox.NoRole)
        self.box.exec_()
        if self.box.clickedButton() == qyes:
            try:
                threadStop()
            except:
                print("abnormal")
            event.accept()
            QtWidgets.QWidget.closeEvent(self, event)
            sys.exit().accept()
        else:
            event.ignore()

    def switch_video(self):
        # self.timer2.start()
        if self.pushButton_6.text() == "开始检测":
            self.timer2.start()
            print("tttt6")
            self.pushButton_6.setText("暂停检测")
        elif self.pushButton_6.text() == "暂停检测":
            self.timer2.stop()
            print("tttt6")
            self.pushButton_6.setText("开始检测")

    def videoRecog2(self):

        # print("im02: ",im02)
        import cv2
        import numpy as np
        count = 0
        recognizer = cv2.face.LBPHFaceRecognizer_create()
        recognizer.read('face_trainer/trainer.yml')
        cascadePath = "haarcascade_frontalface_default.xml"
        faceCascade = cv2.CascadeClassifier(cascadePath)
        font = cv2.FONT_HERSHEY_SIMPLEX
        print("11")



        gray = cv2.cvtColor(im02, cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.2,
            minNeighbors=5,

        )

        if len(faces) == 0:
            print("len(faces)", len(faces))

            frame = cv2.cvtColor(im02, cv2.COLOR_BGR2RGB)
            height, width, bytesPerComponent = frame.shape
            bytesPerLine = bytesPerComponent * width

            self.q_image = QtGui.QImage(frame.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888).scaled(self.label.height() * 1.5, self.label.height())
            self.label.setPixmap(QPixmap.fromImage(self.q_image))
            self.update()  

        if len(faces) == 1:
            for (x, y, w, h) in faces:
                idnum, confidence = recognizer.predict(gray[y:y + h, x:x + w])
                print("confidence", confidence)
                print("idnum", idnum)


                # cv2.putText(img, str(username), (x + 5, y - 5), font, 1, (0, 0, 255), 1)
                confidence2 = round(160 - confidence)
                if confidence2 > 80:
                    cv2.rectangle(im02, (x, y), (x + w, y + h), (0, 255, 0), 3)
                    cv2.putText(im02, str(confidence2) + "%", (x + 5, y + h - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                (37, 46, 6), 1)

                if confidence2 <= 80:
                    cv2.rectangle(im02, (x, y), (x + w, y + h), (255, 0, 0), 3)
                    cv2.putText(im02, "unknow", (x + 5, y + h - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                (37, 46, 6), 1)

                frame = cv2.cvtColor(im02, cv2.COLOR_BGR2RGB)
                height, width, bytesPerComponent = frame.shape
                bytesPerLine = bytesPerComponent * width

                self.q_image = QtGui.QImage(frame.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888) \
                    .scaled(self.label.height() * 1.5, self.label.height())
                self.label.setPixmap(QPixmap.fromImage(self.q_image))
                self.update()  

3. Running results

 

3. Online assistance:

If you need to install the operating environment or remote debugging, see the personal QQ business card at the bottom of the article, and professional and technical personnel will assist remotely!
1) Remote installation and operation environment, code debugging
2) Qt, C++, Python entry guide
3) Interface beautification
4) Software production

Blogger recommended article: python face recognition statistics qt form - CSDN Blog

Blogger recommended article: Python Yolov5 flame smoke recognition source code sharing - CSDN blog

                         Python OpenCV recognizes the number of people entering and exiting the pedestrian entrance - python recognizes the number of people - CSDN Blog

Personal blog homepage: alicema1111's blog_CSDN blog-Python, C++, bloggers in the field of web pages

Click here for all the blogger's articles: alicema1111's blog_CSDN blog-Python, C++, bloggers in the field of web pages

Guess you like

Origin blog.csdn.net/alicema1111/article/details/130272551