Fatigue detection
#导入工具包 from scipy.spatial import distance as dist from collections import OrderedDict import numpy as np import argparse import time import dlib import cv2 FACIAL_LANDMARKS_68_IDXS = OrderedDict([ ("mouth", (48, 68)), ("right_eyebrow", (17, 22)), ("left_eyebrow", (22, 27)), ("right_eye", (36, 42)), ("left_eye", (42, 48)), ("nose", (27, 36)), ("jaw", (0, 17)) ]) # http://vision.fe.uni-lj.si/cvww2016/proceedings/papers/05.pdf def eye_aspect_ratio(eye): # 计算距离,竖直的 A = dist.euclidean(eye[1], eye[5]) B = dist.euclidean(eye[2],Eye [. 4]) # calculate the distance, the level of Dist.euclidean = C (Eye [0], Eye [. 3]) # value EAR EAR = (A + B) / (2.0 * C) return EAR # input parameter AP = argparse.ArgumentParser () ap.add_argument ( "- the p-"," --shape-Predictor ", required = True, Help =" path to Facial Landmark Predictor ") ap.add_argument (" - v "," --video ", of the type = str, default =" ", Help = "path to INPUT Video File") args = VARS (ap.parse_args ()) # setting determination parameters EYE_AR_THRESH = 0.3 EYE_AR_CONSEC_FRAMES. 3 = # initializes a counter the cOUNTER = 0 TOTAL = 0 # detection and localization tool print ( "[INFO] loading Landmark Predictor ... Facial ") Detector = dlib.get_frontal_face_detector () = dlib.shape_predictor Predictor (args [ "shape_predictor"]) # were taken two eye regions (Lstart, LEND) FACIAL_LANDMARKS_68_IDXS = [ "left_eye"] (RSTART, REND) FACIAL_LANDMARKS_68_IDXS = [ "right_eye"] # video read print ( "[the INFO] Starting Thread ... Video Stream") VS = cv2.VideoCapture (args [ "Video"]) #vs = FileVideoStream (args [ "Video"]). Start () the time.sleep (1.0) DEF shape_to_np (the Shape, dtype = "int"): # 2 * create a 68 coords = np.zeros ((shape.num_parts, 2), dtype = dtype) # through each key point # get the coordinates for i in range (0, shape .num_parts): the coords [I] = (shape.part (I) .x, shape.part (I) .y) return the coords # through each frame while True: # Pretreatment = vs.read Frame () [. 1] IF None Frame IS: BREAK (h, w) = frame.shape[:2] width=1200 R & lt = width / a float (W) Dim = (width, int (* R & lt H)) Frame = cv2.resize (Frame, Dim, interpolation = CV2. INTER_AREA) Gray = cv2.cvtColor (Frame, cv2.COLOR_BGR2GRAY) # face detection rects = detector (Gray, 0) # through each detected face for RECT in rects: # acquires coordinates shape = predictor (gray, rect ) Shape = shape_to_np (Shape) # calculates ear value leftEye = Shape [Lstart: LEND] rightEye = Shape [RSTART: REND] leftEAR = eye_aspect_ratio (leftEye) rightEAR = eye_aspect_ratio (rightEye) # count an average ear = (leftEAR + rightEAR) / 2.0 # drawing eye area leftEyeHull = cv2.convexHull (leftEye) rightEyeHull = CV2. convexHull (rightEye) cv2.drawContours (Frame, [leftEyeHull], -1, (0, 255, 0),. 1) cv2.drawContours (Frame, [rightEyeHull], - 1, (0, 255, 0), 1) # checked whether a threshold IF EAR <EYE_AR_THRESH: the COUNTER + 1 = the else: # If several consecutive frames are closed eyes, a total and IF the COUNTER> = EYE_AR_CONSEC_FRAMES: TOTAL + = 1 # reset the COUNTER = 0 # display cv2.putText (Frame, "[Blinks: {}." the format (TOTAL), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) cv2.putText (Frame, "the EAR: {:} .2f." the format (EAR), (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) cv2.imshow ( "Frame", Frame) Key = cv2.waitKey (10) & 0xFF IF = Key = 27: break vs.release() cv2.destroyAllWindows()
OpenCV computer vision combat
Tang Yu Di teacher of course talking about the very good is a little expensive
Course Catalog
01 Course Description and environment configuration
02 Basic Operations image
03 threshold smoothing
04 image morphological operations
Calculating image gradient 05
06 Edge Detection
07 image pyramid and contour detection
08 histograms and Fourier transform
09 project combat - to identify credit card numbers
10 project combat - Document scanning OCR recognition
11 image feature -harris
12 image feature -sift
13 cases of actual combat - panoramic image stitching
14 project combat - Parking for recognition
15 project combat - the answer sheet to identify graders
16 background modeling
17 optical flow estimation
18Opencv of DNN modules
19 project combat - target tracking
20 Principle of operation, convolution
21 project combat - fatigue detection
#导入工具包from scipy.spatial import distance as distfrom collections import OrderedDictimport numpy as npimport argparseimport timeimport dlibimport cv2
FACIAL_LANDMARKS_68_IDXS = OrderedDict([("mouth", (48, 68)),("right_eyebrow", (17, 22)),("left_eyebrow", (22, 27)),("right_eye", (36, 42)),("left_eye", (42, 48)),("nose", (27, 36)),("jaw", (0, 17))])
# Http://vision.fe.uni-lj.si/cvww2016/proceedings/papers/05.pdfdef eye_aspect_ratio (eye): # calculating the distance, vertical A = dist.euclidean (eye [1] , eye [5 ]) B = dist.euclidean (eye [ 2], eye [4]) # calculate the distance, the level of C = dist.euclidean (eye [0] , eye [3]) # ear values ear = (a + B) / (2.0 * C) return ear # input parameters ap = argparse.ArgumentParser () ap.add_argument ( " - p", "--shape-predictor", required = True, help = "path to facial landmark predictor") ap .add_argument ( "- v", " --video", type = str, default = "", help = "path to input video file") args = vars (ap.parse_args ()) # setting determination parameters EYE_AR_THRESH = 0.3 =. 3 EYE_AR_CONSEC_FRAMES
# initializes the counter 0TOTAL = 0 = the cOUNTER
# detection and localization tool print ( "[INFO] loading facial landmark predictor ...") detector = dlib.get_frontal_face_detector () predictor = dlib.shape_predictor (args [ "shape_predictor"])
# Were taken two eye regions (Lstart, LEND) FACIAL_LANDMARKS_68_IDXS = [ "left_eye"] (RSTART, REND) FACIAL_LANDMARKS_68_IDXS = [ "right_eye"]
# reading video print ( "[INFO] starting video stream thread ...") = cv2.VideoCapture VS (args [ "Video"]) # VS = FileVideoStream (args [ "Video"]) Start () the time.sleep (1.0).
DEF shape_to_np (the Shape, dtype = "int"): # create 68 * 2coords = np.zeros ((shape.num_parts, 2), dtype = dtype) # # through each key point to obtain the coordinates for i in range (0, shape.num_parts ): coords [i] = (shape.part ( I) .x, shape.part (I) .y) return the coords
# through each frame while True: # pretreatment frame = vs.read () [1] if frame is None: break (h, w) = frame .shape [: 2] width = 1200r = width / float (w) dim = (width, int (h * r)) frame = cv2.resize (frame, dim, interpolation = cv2.INTER_AREA) gray = cv2.cvtColor ( frame, cv2.COLOR_BGR2GRAY)
# detect a human face rects = detector (gray, 0)
# Traversing each detected face for rect in rects: # acquires coordinates Shape = Predictor (Gray, RECT) Shape = shape_to_np (Shape)
# calculates ear value leftEye = shape [lStart: lEnd] rightEye = shape [rStart: REND] leftEAR = eye_aspect_ratio (leftEye) rightEAR = eye_aspect_ratio (rightEye)
# = calculated an average EAR (leftEAR rightEAR +) / 2.0
# drawing eye region leftEyeHull = cv2.convexHull (leftEye) rightEyeHull = cv2.convexHull (rightEye) cv2. drawContours (Frame, [leftEyeHull], -1, (0, 255, 0),. 1) cv2.drawContours (Frame, [rightEyeHull], -1, (0, 255, 0),. 1)
# check whether the threshold is met if EAR <EYE_AR_THRESH: + = the COUNTER. 1
the else: # If several consecutive frames are closed eyes, a total and the COUNTER IF> = EYE_AR_CONSEC_FRAMES: TOTAL =. 1 +
# reset COUNTER = 0
# 显示cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow("Frame", frame)key = cv2.waitKey(10) & 0xFF if key == 27:break
vs.release()cv2.destroyAllWindows()