三维人脸重建 (四)

代码分析

3dmm.py 是把2d图片和3dmm参数变为3d模型

1. 载入模型

''' 3d morphable model example
3dmm parameters --> mesh 
fitting: 2d image + 3dmm -> 3d face
'''
import os, sys
import subprocess
import numpy as np
import scipy.io as sio
from skimage import io
from time import time
import matplotlib.pyplot as plt

sys.path.append('..')
import face3d
from face3d import mesh
from face3d.morphable_model import MorphabelModel

# --------------------- Forward: parameters(shape, expression, pose) --> 3D obj --> 2D image  ---------------
# --- 1. load model
bfm = MorphabelModel('Data/BFM/Out/BFM.mat')
print('init bfm model success')
  • 下面详细看看MorphableModel这个类
    shapeMU是平均形状向量, 也就是相当于一个平均人脸的形状, 大小是160470*1, 按照S=(X1,Y1,Z1, X2,Y2,Z2…)的格式存储
    ShapePC是形状PCA降维矩阵, 大小是160470*199, 可以理解成是200个人脸形状数据降维后减少了一维,当然主要目的是为了正交
    ShapeEV是形状正交空间的系数, 大小是199*1
    texMU是平均纹理向量, 大小是160470*1, 按照T=(R1,G1,B1,R2,G2,B2,…)的格式存储
    texPC是纹理PCA降维矩阵, 大小是160470*199
    texEV是纹理正交空间的系数, 大小是199*1
    tri是三角形的点的索引, 大小是106466*3, 代表有106466个三角形
    tri_mouth是嘴唇部分三角格坐标, 大小为114*3
    kpt_ind是关键点索引, 大小是68
    可以知道应该是正交空间的系数×PCA降维矩阵然后加上平均值就得到3dmm模型
class  MorphabelModel(object):
    """docstring for  MorphabelModel
    model: nver: number of vertices. ntri: number of triangles. *: must have. ~: can generate ones array for place holder.
            'shapeMU': [3*nver, 1]. *
            'shapePC': [3*nver, n_shape_para]. *
            'shapeEV': [n_shape_para, 1]. ~
            'expMU': [3*nver, 1]. ~ 
            'expPC': [3*nver, n_exp_para]. ~
            'expEV': [n_exp_para, 1]. ~
            'texMU': [3*nver, 1]. ~
            'texPC': [3*nver, n_tex_para]. ~
            'texEV': [n_tex_para, 1]. ~
            'tri': [ntri, 3] (start from 1, should sub 1 in python and c++). *
            'tri_mouth': [114, 3] (start from 1, as a supplement to mouth triangles). ~
            'kpt_ind': [68,] (start from 1). ~
    """
    def __init__(self, model_path, model_type = 'BFM'):
        super( MorphabelModel, self).__init__()
        if model_type=='BFM':
            self.model = load.load_BFM(model_path) # 载入BFM模型
        else:
            print('sorry, not support other 3DMM model now')
            exit()
            
        # fixed attributes
        self.nver = self.model['shapePC'].shape[0]/3 # 顶点个数
        self.ntri = self.model['tri'].shape[0]
        self.n_shape_para = self.model['shapePC'].shape[1]  # 199
        self.n_exp_para = self.model['expPC'].shape[1] # 29
        self.n_tex_para = self.model['texMU'].shape[1] # 1
        
        self.kpt_ind = self.model['kpt_ind'] # 关键点索引
        self.triangles = self.model['tri']
        self.full_triangles = np.vstack((self.model['tri'], self.model['tri_mouth']))  # 按行堆叠, 例如两个(1, 3)成为(2, 3), 因为嘴唇的三角格是后加上的, 所以需要堆叠起来

    # ------------------------------------- shape: represented with mesh(vertices & triangles(fixed))
    # 得到形状系数
    def get_shape_para(self, type = 'random'):
        if type == 'zero':
            sp = np.random.zeros((self.n_shape_para, 1))
        elif type == 'random':
            sp = np.random.rand(self.n_shape_para, 1)*1e04
        return sp

 	# 得到表情系数, 表情系数我还不了解
    def get_exp_para(self, type = 'random'):
        if type == 'zero':
            ep = np.zeros((self.n_exp_para, 1))
        elif type == 'random':
            ep = -1.5 + 3*np.random.random([self.n_exp_para, 1])
            ep[6:, 0] = 0

        return ep 

# 输入形状系数和表情系数,返回顶点, 大小是(nver, 3)
    def generate_vertices(self, shape_para, exp_para):
        '''
        Args:
            shape_para: (n_shape_para, 1)
            exp_para: (n_exp_para, 1) 
        Returns:
            vertices: (nver, 3)
        '''
        vertices = self.model['shapeMU'] + self.model['shapePC'].dot(shape_para) + self.model['expPC'].dot(exp_para)
        vertices = np.reshape(vertices, [int(3), int(len(vertices)/3)], 'F').T

        return vertices

    # -------------------------------------- texture: here represented with rgb value(colors) in vertices.
    # 纹理是RGB值, 这是得到纹理系数
    def get_tex_para(self, type = 'random'):
        if type == 'zero':
            tp = np.zeros((self.n_tex_para, 1))
        elif type == 'random':
            tp = np.random.rand(self.n_tex_para, 1)
        return tp
# 输入纹理系数, 返回颜色
    def generate_colors(self, tex_para):
        '''
        Args:
            tex_para: (n_tex_para, 1)
        Returns:
            colors: (nver, 3)
        '''
        colors = self.model['texMU'] + self.model['texPC'].dot(tex_para*self.model['texEV'])
        colors = np.reshape(colors, [int(3), int(len(colors)/3)], 'F').T/255.  
        
        return colors


    # ------------------------------------------- transformation
    # -------------  transform
    def rotate(self, vertices, angles):
        ''' rotate face
        Args:
            vertices: [nver, 3]
            angles: [3] x, y, z rotation angle(degree)
            x: pitch. positive for looking down 
            y: yaw. positive for looking left
            z: roll. positive for tilting head right
        Returns:
            vertices: rotated vertices
        '''
        return mesh.transform.rotate(vertices, angles)

    def transform(self, vertices, s, angles, t3d):
        R = mesh.transform.angle2matrix(angles)
        return mesh.transform.similarity_transform(vertices, s, R, t3d)

    def transform_3ddfa(self, vertices, s, angles, t3d): # only used for processing 300W_LP data
        R = mesh.transform.angle2matrix_3ddfa(angles)
        return mesh.transform.similarity_transform(vertices, s, R, t3d)

    # --------------------------------------------------- fitting
    def fit(self, x, X_ind, max_iter = 4, isShow = False):
        ''' fit 3dmm & pose parameters
        Args:
            x: (n, 2) image points
            X_ind: (n,) corresponding Model vertex indices
            max_iter: iteration
            isShow: whether to reserve middle results for show
        Returns:
            fitted_sp: (n_sp, 1). shape parameters
            fitted_ep: (n_ep, 1). exp parameters
            s, angles, t
        '''
        if isShow:
            fitted_sp, fitted_ep, s, R, t = fit.fit_points_for_show(x, X_ind, self.model, n_sp = self.n_shape_para, n_ep = self.n_exp_para, max_iter = max_iter)
            angles = np.zeros((R.shape[0], 3))
            for i in range(R.shape[0]):
                angles[i] = mesh.transform.matrix2angle(R[i])
        else:
            fitted_sp, fitted_ep, s, R, t = fit.fit_points(x, X_ind, self.model, n_sp = self.n_shape_para, n_ep = self.n_exp_para, max_iter = max_iter)
            angles = mesh.transform.matrix2angle(R)
        return fitted_sp, fitted_ep, s, angles, t
  • 在类的一开始有个载入函数, 这里的BFM model是有表情参数的, 因为已经融合了3DDFA的表情信息:
def load_BFM(model_path): # 这是因为例子中是这么多点, 和那个01——的数据不一样
 
    C = sio.loadmat(model_path)
    model = C['model']
    model = model[0,0]

    # change dtype from double(np.float64) to np.float32, 
    # since big matrix process(espetially matrix dot) is too slow in python.
    model['shapeMU'] = (model['shapeMU'] + model['expMU']).astype(np.float32) # 加入了表情基
    model['shapePC'] = model['shapePC'].astype(np.float32)
    model['shapeEV'] = model['shapeEV'].astype(np.float32)
    model['expEV'] = model['expEV'].astype(np.float32)
    model['expPC'] = model['expPC'].astype(np.float32)

    # matlab start with 1. change to 0 in python.
    model['tri'] = model['tri'].T.copy(order = 'C').astype(np.int32) - 1  # 三角格坐标
    model['tri_mouth'] = model['tri_mouth'].T.copy(order = 'C').astype(np.int32) - 1  # 嘴唇部分三角格坐标

    # kpt ind
    model['kpt_ind'] = (np.squeeze(model['kpt_ind']) - 1).astype(np.int32) # 关键点索引 68,

    return model
  • 得到变换矩阵对顶点进行旋转变换
def rotate(vertices, angles):
    ''' rotate vertices. 
    X_new = R.dot(X). X: 3 x 1   
    Args:
        vertices: [nver, 3]. 
        rx, ry, rz: degree angles
        rx: pitch. positive for looking down 
        ry: yaw. positive for looking left
        rz: roll. positive for tilting head right
    Returns:
        rotated vertices: [nver, 3]
    '''
    R = angle2matrix(angles)
    rotated_vertices = vertices.dot(R.T)

    return rotated_vertices

2. 产生脸部的mesh, 即顶点坐标和颜色

这里的表情参数是random的, 表情参数乘PCA降维后的特征向量可以实现变换, 因为给定了参数所以现在就产生了唯一的人脸

sp = bfm.get_shape_para('random')
ep = bfm.get_exp_para('random')
vertices = bfm.generate_vertices(sp, ep) # 

tp = bfm.get_tex_para('random')
colors = bfm.generate_colors(tp)
colors = np.minimum(np.maximum(colors, 0), 1)

3. 转换顶点到合适的位置

s = 8e-04
angles = [10, 30, 20]
t = [0, 0, 0]
transformed_vertices = bfm.transform(vertices, s, angles, t)
projected_vertices = transformed_vertices.copy() # using stantard camera & orth projection

4. render, 从3d变换为2d

这一步可以得到随机的3dmm模型render后的图片

# set prop of rendering
h = w = 256; c = 3
image_vertices = mesh.transform.to_image(projected_vertices, h, w) # 转换到图像坐标系下

image = mesh.render.render_colors(image_vertices, bfm.triangles, colors, h, w)

5. 二维图像landmark和相应的三维顶点–>参数(姿势、形状、表情)

上面我们得到了一张照片, 可以得到它的landmarks

x = projected_vertices[bfm.kpt_ind, :2] # 2d keypoint, which can be detected from image
X_ind = bfm.kpt_ind # index of keypoints in 3DMM. fixed.

# fit
fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t = bfm.fit(x, X_ind, max_iter = 3)

# verify fitted parameters
fitted_vertices = bfm.generate_vertices(fitted_sp, fitted_ep)
transformed_vertices = bfm.transform(fitted_vertices, fitted_s, fitted_angles, fitted_t)

image_vertices = mesh.transform.to_image(transformed_vertices, h, w)
fitted_image = mesh.render.render_colors(image_vertices, bfm.triangles, colors, h, w)


# ------------- print & show 
print('pose, groudtruth: \n', s, angles[0], angles[1], angles[2], t[0], t[1])
print('pose, fitted: \n', fitted_s, fitted_angles[0], fitted_angles[1], fitted_angles[2], fitted_t[0], fitted_t[1])

save_folder = 'results/3dmm'
if not os.path.exists(save_folder):
    os.mkdir(save_folder)

io.imsave('{}/generated.jpg'.format(save_folder), image)
io.imsave('{}/fitted.jpg'.format(save_folder), fitted_image)

  • 其中fit是指根据二维点来解出对应的3d参数
# 给定二维landmarks坐标和3dmm的关键点的索引, 接触3d model的参数
fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t = bfm.fit(x, X_ind, max_iter = 3)

 def fit(self, x, X_ind, max_iter = 4, isShow = False):
        ''' fit 3dmm & pose parameters
        Args:
            x: (n, 2) image points
            X_ind: (n,) corresponding Model vertex indices
            max_iter: iteration
            isShow: whether to reserve middle results for show
        Returns:
            fitted_sp: (n_sp, 1). shape parameters
            fitted_ep: (n_ep, 1). exp parameters
            s, angles, t
        '''
        if isShow:
            fitted_sp, fitted_ep, s, R, t = fit.fit_points_for_show(x, X_ind, self.model, n_sp = self.n_shape_para, n_ep = self.n_exp_para, max_iter = max_iter)
            angles = np.zeros((R.shape[0], 3))
            for i in range(R.shape[0]):
                angles[i] = mesh.transform.matrix2angle(R[i])
        else:
            fitted_sp, fitted_ep, s, R, t = fit.fit_points(x, X_ind, self.model, n_sp = self.n_shape_para, n_ep = self.n_exp_para, max_iter = max_iter)
            angles = mesh.transform.matrix2angle(R)
        return fitted_sp, fitted_ep, s, angles, t

  • 最关键的是fit_points这个函数, 它先求出变换矩阵P, 然后分解得到s, R, T, R对应3个角度
    在这里插入图片描述
def fit_points(x, X_ind, model, n_sp, n_ep, max_iter = 4):
    '''
    Args:
        x: (n, 2) image points
        X_ind: (n,) corresponding Model vertex indices
        model: 3DMM
        max_iter: iteration
    Returns:
        sp: (n_sp, 1). shape parameters
        ep: (n_ep, 1). exp parameters
        s, R, t
    '''
    x = x.copy().T

    #-- init 把形状和表情参数初始化为0
    sp = np.zeros((n_sp, 1), dtype = np.float32)
    ep = np.zeros((n_ep, 1), dtype = np.float32)

    #-------------------- estimate
    X_ind_all = np.tile(X_ind[np.newaxis, :], [3, 1])*3 # (3, 68)

    X_ind_all[1, :] += 1 # 索引加1
    X_ind_all[2, :] += 2
    valid_ind = X_ind_all.flatten('F')

    shapeMU = model['shapeMU'][valid_ind, :]

    shapePC = model['shapePC'][valid_ind, :n_sp] # (204, 199)
    expPC = model['expPC'][valid_ind, :n_ep]

    for i in range(max_iter):
        X = shapeMU + shapePC.dot(sp) + expPC.dot(ep)  # 得到人脸

        X = np.reshape(X, [int(len(X)/3), 3]).T
        #----- estimate pose
        # 应用黄金标准算法可以得到仿射矩阵, 也就是x2d=P*X3d
        P = mesh.transform.estimate_affine_matrix_3d22d(X.T, x.T)

        s, R, t = mesh.transform.P2sRt(P)  # 分解P可以得到缩放系数s, 旋转矩阵R和位移矩阵t

        rx, ry, rz = mesh.transform.matrix2angle(R)
        # print('Iter:{}; estimated pose: s {}, rx {}, ry {}, rz {}, t1 {}, t2 {}'.format(i, s, rx, ry, rz, t[0], t[1]))

        #----- estimate shape & expression
        # expression
        shape = shapePC.dot(sp)

        shape = np.reshape(shape, [int(len(shape)/3), 3]).T

        ep = estimate_expression(x, shapeMU, expPC, model['expEV'][:n_ep,:], shape, s, R, t[:2], lamb = 20)

        # shape
        expression = expPC.dot(ep)
        expression = np.reshape(expression, [int(len(expression)/3), 3]).T
        sp = estimate_shape(x, shapeMU, shapePC, model['shapeEV'][:n_sp,:], expression, s, R, t[:2], lamb = 40)

    return sp, ep, s, R, t


猜你喜欢

转载自blog.csdn.net/landing_guy_/article/details/120892800
今日推荐