源码解析 人脸分析face_toolbox_keras demo.py

#!gdown https://drive.google.com/uc?id=1H37LER8mRRI4q_nxpS3uQz3DcGHkTrNU
get_ipython().system('mv lresnet100e_ir_keras.h5 ./models/verifier/insightface/lresnet100e_ir_keras.h5')
#!gdown https://drive.google.com/uc?id=18MyyXQIwhR5I6gzipYMiJ9ywgvFWQMvI
get_ipython().system('mv backbone_ir50_ms1m_keras.h5 ./models/verifier/face_evoLVe_ir50/backbone_ir50_ms1m_keras.h5')
#!gdown https://drive.google.com/uc?id=1P_eQHU8bNJEsB6hHt_fnltOwQVKIfhiX
get_ipython().system('mv backbone_ir50_asia_keras.h5 ./models/verifier/face_evoLVe_ir50/backbone_ir50_asia_keras.h5')




#!pip install keras==2.2.4

import warnings
warnings.filterwarnings("ignore")

import cv2
import numpy as np
from matplotlib import pyplot as plt


def resize_image(im, max_size=768):
    if np.max(im.shape) > max_size:
        ratio = max_size / np.max(im.shape)
        print(f"Resize image to ({str(int(im.shape[1]*ratio))}, {str(int(im.shape[0]*ratio))}).")
        return cv2.resize(im, (0,0), fx=ratio, fy=ratio)
    return im


# Test images are obtained on https://www.pexels.com/
im = cv2.imread("images/test.jpg")[..., ::-1]
im = resize_image(im) # Resize image to prevent GPU OOM.
h, w, _ = im.shape
plt.imshow(im)


# ---
# # 0. Coordinate ordering
# 
# The coordinate in this demo has ordering `(x, y)`, which follows the format fo numpy array: `x` is the vertical axis and `y` the horizontal axis.

# ---
# # 1. Face detection
# 
# Face deteciton using S3FD and FAN.

from models.detector import face_detector


fd = face_detector.FaceAlignmentDetector(
    lmd_weights_path="./models/detector/FAN/2DFAN-4_keras.h5"# 2DFAN-4_keras.h5, 2DFAN-1_keras.h5
)
# ## Detect faces

bboxes = fd.detect_face(im, with_landmarks=False)
assert len(bboxes) > 0, "No face detected."

# Display detected face
x0, y0, x1, y1, score = bboxes[0] # show the first detected face
x0, y0, x1, y1 = map(int, [x0, y0, x1, y1])

plt.imshow(im[x0:x1, y0:y1, :])


# ## Detect faces and landmarks
# 
# `FaceDetector` will instantiate `FANLandmarksDetector` when calling `detect_face(with_landmarks=True)` the first time.

bboxes, landmarks = fd.detect_face(im, with_landmarks=True)

# Display landmarks
plt.figure(figsize=(15,8))
num_faces = len(bboxes)
for i in range(num_faces):
    try:
        plt.subplot(1, num_faces, i+1)
        plt.imshow(fd.draw_landmarks(im, landmarks[i], color=(0,255,0)))
    except:
        pass


# ---
# # 2. Face parsing
# 
# Face parsing using BiSeNet.


from models.parser import face_parser

prs = face_parser.FaceParser()


# ## Parse without deteciton



out = prs.parse_face(im)




plt.imshow(out[0])



# Show parsing result with annotations

from utils.visualize import show_parsing_with_annos
show_parsing_with_annos(out[0])


# ## Parse with detection
# 
# Parsing network has its best performance on single face (even better if aligned).

# In[24]:


im = cv2.imread("images/test2.jpg")[..., ::-1]
im = resize_image(im) # Resize image to prevent GPU OOM.
h, w, _ = im.shape
plt.imshow(im)


# In[ ]:


# Set detector into FaceParser
try:
    fd
except:
    from detector import face_detector
    fd = face_detector.FaceAlignmentDetector()
    
prs.set_detector(fd)
# prs.remove_detector()


# In[ ]:


out = prs.parse_face(im, with_detection=True)


# In[27]:


#plt.figure(figsize=(15,8))
num_faces = len(out)
for i in range(num_faces):
    try:
        plt.subplot(1, num_faces, i+1)
        plt.imshow(out[i])
    except:
        pass


# ---
# # 3. Detect irises
# 
# Iris detection using ELG (Eye region Landmarks based Gaze estimation).

# In[ ]:


from models.detector.iris_detector import IrisDetector


# ## Detect iris, eyelibs and pulpils

# In[29]:


im = cv2.imread("images/test5.jpg")[..., ::-1]
im = resize_image(im) # Resize image to prevent GPU OOM.
h, w, _ = im.shape
plt.imshow(im)


# In[ ]:


idet = IrisDetector()


# In[ ]:


idet.set_detector(fd)


# In[ ]:


eye_lms = idet.detect_iris(im)


# In[33]:


# Display detection result
plt.figure(figsize=(15,10))
draw = idet.draw_pupil(im, eye_lms[0][0,...]) # draw left eye
draw = idet.draw_pupil(draw, eye_lms[0][1,...]) # draw right eye
bboxes = fd.detect_face(im, with_landmarks=False)
x0, y0, x1, y1, _ = bboxes[0].astype(np.int32)
plt.subplot(1,2,1)
plt.imshow(draw)
plt.subplot(1,2,2)
plt.imshow(draw[x0:x1, y0:y1])


# ---
# # 4. Face verification
# 
# Face verification using facenet.

# In[ ]:


from models.verifier.face_verifier import FaceVerifier


# In[ ]:


im1 = cv2.imread("images/test0.jpg")[..., ::-1]
im1 = resize_image(im1) # Resize image to prevent GPU OOM.
im2 = cv2.imread("images/BO1.jpg")[..., ::-1]
im2 = resize_image(im2) # Resize image to prevent GPU OOM.
im3 = cv2.imread("images/DT.jpg")[..., ::-1]
im3 = resize_image(im3) # Resize image to prevent GPU OOM.


# In[35]:


fv = FaceVerifier(classes=512, extractor="facenet") # extractor="insightface"


# In[ ]:


fv.set_detector(fd)


# ## Verify if two given faces are the same person

# In[ ]:


# Face verification
result1, distance1 = fv.verify(im1, im2, threshold=0.5, with_detection=True, with_alignment=False, return_distance=True)
result2, distance2 = fv.verify(im1, im3, threshold=0.5, with_detection=True, with_alignment=False, return_distance=True)


# In[39]:


plt.figure(figsize=(15,6))
plt.subplot(1,3,1)
plt.title(f"Source face")
plt.imshow(im1)
plt.subplot(1,3,2)
plt.title(f"Same person: {str(result1)}\n Cosine distance: {str(round(distance1, 2))}")
plt.imshow(im2)
plt.subplot(1,3,3)
plt.title(f"Same person: {str(result2)}\n Cosine distance: {str(round(distance2, 2))}")
plt.imshow(im3)


# ---
# # 5. Gender and Age Estimation
# 
# Gender/Age estimation using insightface mobilenet.

# In[10]:


from models.estimator.gender_age_estimator import GenderAgeEstimator


# In[11]:


im = cv2.imread("images/BO1.jpg")[..., ::-1]
im = resize_image(im) # Resize image to prevent GPU OOM.
h, w, _ = im.shape
plt.imshow(im)


# In[12]:


gae = GenderAgeEstimator(model_type="insightface")


# In[13]:


gae.set_detector(fd)


# In[16]:


gender, age = gae.predict_gender_age(im, with_detection=True)


# In[17]:


print("Gender: female") if gender == 0 else print("Gender: male")
print(f"Age: {str(age)}")

  1. def resize_image(im, max_size=768): 定义了一个名为 resize_image 的函数,该函数接受一个图像和最大大小作为输入,默认最大大小为 768。

  2. if np.max(im.shape) > max_size: 如果图像的尺寸大于给定的最大尺寸,那么就需要对图像进行缩放。

  3. ratio = max_size / np.max(im.shape) 计算缩放比例,即最大尺寸除以图像当前的最大尺寸。

  4. print(f"Resize image to ({str(int(im.shape[1]*ratio))}, {str(int(im.shape[0]*ratio))}).") 打印出调整后的图像大小

  5. return cv2.resize(im, (0,0), fx=ratio, fy=ratio) 使用 OpenCV 的 resize 函数来对图像进行缩放,并返回缩放后的图像。

  6. return im 如果图像的尺寸小于或等于给定的最大尺寸,那么就返回原始图像。

  7. im = cv2.imread("images/test.jpg")[..., ::-1] 使用 OpenCV 读取图像,并使用 [..., ::-1] 对图像的通道进行反转,因为 OpenCV 默认读取的图像通道顺序为 BGR,但是在后续的处理中,我们需要的通道顺序为 RGB。

  8. im = resize_image(im) 使用之前定义的 resize_image 函数对图像进行缩放,防止在使用 GPU 时发生内存溢出。

  9. h, w, _ = im.shape 获取图像的高度、宽度和通道数。

  10. plt.imshow(im) 使用 matplotlib 展示图像。

接下来,代码加载了人脸检测模型,并对图像进行了人脸检测和关键点检测

  1. from models.detector import face_detector 导入了 face_detector 类,用于进行人脸检测。
  2. fd = face_detector.FaceAlignmentDetector(lmd_weights_path="./models/detector/FAN/2DFAN-4_keras.h5") 使用预训练权重实例化人脸检测器。
  3. bboxes = fd.detect_face(im, with_landmarks=False) 调用 detect_face 方法对图像进行人脸检测,并返回人脸的边界框。
  4. x0, y0, x1, y1, score = bboxes[0] 获取第一个检测到的人脸的边界框和得分。
  5. plt.imshow(im[x0:x1, y0:y1, :]) 展示第一个检测到的人脸

然后,代码使用人脸解析模型,对图像进行了人脸解析:

  1. from models.parser import face_parser 导入了 face_parser 类,用于进行人脸解析。
  2. prs = face_parser.FaceParser() 实例化人脸解析器。
  3. out = prs.parse_face(im) 调用 parse_face 方法对图像进行人脸解析,并返回解析的结果。
  4. plt.imshow(out[0]) 展示第一个解析的结果。

最后,代码在使用人脸解析模型进行人脸解析的同时,也进行了人脸检测,并展示了结果:

  1. im = cv2.imread("images/test2.jpg")[..., ::-1] 读取另一张图像。
  2. im = resize_image(im) 对图像进行缩放。
  3. h, w, _ = im.shape 获取图像的高度、宽度和通道数
  4. plt.imshow(im) 展示图像。
  5. prs.set_detector(fd) 设置人脸检测器。
  6. out = prs.parse_face(im, with_detection=True) 在解析人脸的同时,进行人脸检测,并返回结果。
  7. for i in range(num_faces): 遍历每一个检测到的人脸。
  8. plt.subplot(1, num_faces, i+1) 为每一个人脸创建一个子图。
  9. plt.imshow(out[i]) 展示每一个检测到的人脸的解析结果。

猜你喜欢

转载自blog.csdn.net/sinat_37574187/article/details/131609573