42 is mounted on the expression recognition dlib Raspberry Pi

https://www.jianshu.com/p/848014d8dea9

https://www.pyimagesearch.com/2017/05/01/install-dlib-raspberry-pi/

Download Library

https://github.com/davisking/dlib

Identification code

https://gitee.com/Andrew_Qian/face/blob/master/from_video.py

Dependent on the weight

https://github.com/AKSHAYUBHAT/TensorFace/blob/master/openface/models/dlib/shape_predictor_68_face_landmarks.dat

 

 

 

 

 

Third, the dependent libraries

dlib need to rely on the following:

  1. Boost
  2. Boost.Python
  3. CMake
  4. X11
    installation method:
$ sudo apt-get update
$ sudo apt-get install build-essential cmake libgtk-3-dev libboost-all-dev -y 

Fourth, install additional dlib dependent libraries run by pip3

$ pip3 install numpy
$ pip3 install scipy
$ pip3 install scikit-image

Fifth, the official installation

After unzip the downloaded dlib, enter dlib directory

$ sudo python3 setup.py install 

This step takes the longest, and be patient.

Sixth, verification

$ python3
Python 3.4.2 (default, Oct 19 2014, 13:31:11) [GCC 4.9.1] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import dlib >>> 

Seven, the use of virtual memory and GPU memory back to the original value

Modification methods, see "Second, pre-modification preparatory work"

knock off.

 

python code

Anaconda # / Anaconda / Python! 
#Coding: UTF-. 8 

"" " 
from Screen in face recognition, and real-time facial feature points marked 
" "" 

Import dlib recognition library # dlib 
Import numpy AS # NP data processing library numpy 
Import CV2 # image processing library OpenCv 


class face_emotion (): 

    DEF __init __ (Self): 
        # use the feature extractor get_frontal_face_detector 
        self.detector = dlib.get_frontal_face_detector () 
        # 68 dlib point model, the use of trained forecast feature is 
        self.predictor = dlib.shape_predictor ( "shape_predictor_68_face_landmarks.dat") 

        # cv2 camera built objects, as used herein, computer's own camera, then if the external camera, the camera automatically switches to the external 
        self.cap = cv2.VideoCapture (0) 
        # set video parameters, propId set video parameters, parameter value set
        self.cap.set (. 3, 480) 480) 
        # Screenshot screenshoot counter
        self.cnt = 0 


    DEF learning_face (Self): 

        # eyebrows linear fit to the data buffer 
        line_brow_x = [] 
        line_brow_y = [] 

        # cap.isOpened () Returns true / false check is successful initialization 
        the while (self.cap.isOpened ()): 

            # cap.read () 
            # returns two values: 
            # a boolean true / false, reading the video to determine whether the success / whether to end the video 
            # image object image three dimensional matrix 
            in Flag, im_rd self.cap.read = () 

            # data per frame delay 1ms, the read delay of 0 is a static frame 
            K cv2.waitKey = (. 1) 

            # fetch gradation 
            img_gray = cv2.cvtColor (im_rd , cv2.COLOR_RGB2GRAY) 

            # using the face detector detects human faces in an image of each frame. And return to face several rects 
            Faces = self.detector (img_gray, 0) 

            # question will be displayed on the screen font on 
            font = cv2.FONT_HERSHEY_SIMPLEX 

            # If a face is detected
            IF (len (Faces) = 0!): 

                # face mark for each feature point 68 
                for I in Range (len (Faces)): 
                    # the enumerate method of returning the data objects and the index data, k is the index, d is the faces of the object 
                    for K, d in the enumerate (faces): 
                        # human face with the red rectangle 
                        cv2.rectangle (im_rd, (d.left () , d.top ()), (d.right () , d.bottom ()), (0, 0, 255)) 
                        # do not heat calculation block face edge length 
                        self.face_width = d.right () - d.left () 
                        # circles show each feature point 
                        for i in range (68):
 
                        # 68 obtained using the predictor data point coordinates
                        = self.predictor Shape (im_rd, D) 
                            cv2.circle (im_rd, (shape.part (I) .x, shape.part (I) .y), 2, (0, 255, 0), -1,. 8 ) 
                            # cv2.putText (im_rd, STR (I), (shape.part (I) .x, shape.part (I) .y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 
                            # (255, 255, 255)) 

                        # analysis a positional relationship between an arbitrary point n as expression recognition based 
                        mouth_width = (shape.part (54) .x - shape.part (48) .x) / self.face_width # mouth god degree 
                        mouth_higth = (shape.part (66 ) .y - shape.part (62) .y ) / self.face_width # mouth opening degree 
                        # print ( "width of the mouth width ratio of the identification frame:", mouth_width_arv) 
                        # print ( "mouth height identification frame height ratio: ", mouth_higth_arv)
 
                        # by two 10 feature points on the eyebrows, eyebrow and analyze the extent degree frown
                        brow_sum = 0 # height and 
                        frown_sum = 0 # sides of the eyebrows and the distances 
                        for J in Range (. 17, 21 is): 
                            brow_sum + = (shape.part (J) .y - d.top ()) + (shape.part (+ J. 5) .y - d.top ()) 
                            frown_sum shape.part + = (J +. 5) .x - shape.part (J) .x 
                            line_brow_x.append (shape.part (J) .x) 
                            line_brow_y .append (shape.part (J) .y) 

                        # self.brow_k, self.brow_d = self.fit_slr (line_brow_x, line_brow_y) # calculates the inclination of the eyebrows  
                        tempx = np.array (line_brow_x)
                        tempy = np.array (line_brow_y) 
                        Z1 = np.polyfit (tempx , tempy, 1) # fit to a straight line 
                        self.brow_k = -Round (Z1 [0],. 3) # fitted tilt direction and the actual slope of the curve are opposite eyebrows 

                        brow_hight = (brow_sum / 10) / self.face_width # the height of the eyebrows proportion 
                        brow_width = (frown_sum / 5) / self.face_width # eyebrow distance accounted 
                        # print ( "eyebrow height than the height of the identification frame:", round (brow_arv / self.face_width,. 3)) 
                        # print ( " eyebrow spacing than the height of the identification frame: ", round (frown_arv / self.face_width,. 3)) 

                        # eye opening degree 
                        eye_sum = (shape.part (41) .y - shape.part (37) .y + shape. Part (40) .y - shape.part (38 is) .y + 
                                   shape.part (47) .y - shape.part (43 is) .y + shape.part (46 is) .y - shape.part (44 is). y)
                        = eye_hight (eye_sum /. 4) / self.face_width  
                        # Print ( "open eye than the distance of the identified frame height:", round (eye_open / self.face_width , 3))

                        # min discussions 
                        # mouth, upset or may be surprised 
                        IF round (mouth_higth> = 0.03): 
                            IF eye_hight> = 0.056: 
                                cv2.putText (im_rd, "Amazing", (D .left (), d.bottom () + 20 is), cv2.FONT_HERSHEY_SIMPLEX, 0.8, 
                                            (0, 0, 255), 2,. 4) 
                            the else: 
                                cv2.putText (im_rd, "Happy", (d.left () , d.bottom () + 20 is), cv2.FONT_HERSHEY_SIMPLEX, 0.8, 
                                            (0, 0, 255), 2,. 4) 

                        # no mouth, may be normal and angry
                        the else: 
                            IF self.brow_k <= -0.3:
                                cv2.putText (im_rd, "Angry", (d.left (), d.bottom () + 20 is), cv2.FONT_HERSHEY_SIMPLEX, 0.8, 
                                            (0, 0, 255), 2,. 4) 
                            the else: 
                                cv2.putText ( im_rd, "Nature", (d.left (), d.bottom () + 20 is), cv2.FONT_HERSHEY_SIMPLEX, 0.8, 
                                            (0, 0, 255), 2,. 4) 

                # indicated face number 
                cv2.putText ( im_rd, "faces:" + STR (len (faces)), (20,50), font,. 1, (0, 0, 255),. 1, cv2.LINE_AA) 
            the else: 
                # does not detect a human face 
                cv2.putText (im_rd, "No Face", (20 is, 50), font,. 1, (0, 0, 255),. 1, cv2.LINE_AA) 

            # add a description
            = cv2.putText im_rd (im_rd, "S: Screenshot", (20 is, 400), font, 0.8, (0, 0, 255),. 1, cv2.LINE_AA) 
            im_rd = cv2.putText (im_rd, "Q: quit ", (20 is, 450), font, 0.8, (0, 0, 255),. 1, cv2.LINE_AA) 

            # save theme key is pressed s 
            IF (K == the ord ( 's')): 
                self.cnt + = . 1 
                cv2.imwrite ( "screenshoot" + STR (self.cnt) +. "JPG", im_rd) 

            # q is pressed to exit 
            IF (K == the ord ( 'q')): 
                BREAK 

            # window 
            cv2.imshow ( "camera", im_rd) 

        # release camera 
        self.cap.release () 

        # delete establish a window 
        cv2.destroyAllWindows () 


IF __name__ == "__main__":
    my_face = face_emotion()
    my_face.learning_face()

  

 
 

Guess you like

Origin www.cnblogs.com/kekeoutlook/p/11986625.html