[3D Vision] mesh sampling into sdf code analysis, sample_SDF_points

Complete code for sample_SDF_points

gpu_id = '5'
source_dir = 'demo_data/input'
target_dir = 'demo_data/output'
all_classes = [
	"02691156",
    "04090263"
]
num_samples_and_method = [(100000, 'uniformly'), (100000, 'near')]
################################################

import os
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
from datetime import datetime
import numpy as np
import torch
from utils import *

for c in all_classes:
    input_dir = os.path.join(source_dir, c)
    output_dir = os.path.join(target_dir, c)
    os.makedirs(output_dir, exist_ok=True)
    all_shapes = os.listdir(input_dir)
    all_shapes = [f.split('.')[0] for f in all_shapes]
    for i, shape_id in enumerate(all_shapes):
        print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), c, 'processing: %d/%d'%(i,len(all_shapes)))
        in_path = os.path.join(input_dir, shape_id+'.obj')
        out_path = os.path.join(output_dir, shape_id+'.npy')

        vertices, faces = load_obj(in_path)
        mesh = obj2nvc(vertices, faces).cuda()
        mesh_normals = face_normals(mesh)
        distrib = area_weighted_distribution(mesh, mesh_normals)

        xyz = sample_points(mesh, num_samples_and_method, mesh_normals, distrib)
        sd = points_mesh_signed_distance(xyz, mesh)
        xyz_sd = torch.cat([xyz, sd.unsqueeze(1)], dim=1)
        rand_idx = torch.randperm(xyz_sd.shape[0])
        xyz_sd = xyz_sd[rand_idx].cpu().numpy()
        np.save(out_path, xyz_sd)

Import obj to read points and faces: vertices, faces = load_obj(in_path)

def load_obj(filename):
    """
    Args:
        filename: str, path to .obj file
    Returns:
        vertices: tensor(float), shape (num_vertices, 3)
        faces: tensor(long), shape (num_faces, 3)
    """
    assert os.path.exists(filename), 'File \''+filename+'\' does not exist.'
    reader = tinyobjloader.ObjReader() # 构造tinyobjloader的ObjReader类
    config = tinyobjloader.ObjReaderConfig()
    config.triangulate = True
    reader.ParseFromFile(filename, config) # 从文件filename中按配置config读obj
    attrib = reader.GetAttrib()  # 得到逐点的属性
    vertices = torch.FloatTensor(attrib.vertices).reshape(-1, 3) # 得到n*3维的张量
    shapes = reader.GetShapes()
    faces = []
    for shape in shapes:
        faces += [idx.vertex_index for idx in shape.mesh.indices]
    faces = torch.LongTensor(faces).reshape(-1, 3)
    return vertices, faces

attrib = reader.GetAttrib()

The returned type is attrib_t.
Among them, attrib.vertices arranges the 3 coordinates of each point in a list in sequence
. For example, if there are n points, the dimension of the obtained attrib.vertices is 3n.
Insert image description here

class attrib_t(__pybind11_builtins.pybind11_object):
    # no doc
    def numpy_vertices(self): # real signature unknown; restored from __doc__
        """ numpy_vertices(self: tinyobjloader.attrib_t) -> numpy.ndarray[float64] """
        pass

    def __init__(self): # real signature unknown; restored from __doc__
        """ __init__(self: tinyobjloader.attrib_t) -> None """
        pass

    colors = property(lambda self: object(), lambda self, v: None, lambda self: None)  # default

    normals = property(lambda self: object(), lambda self, v: None, lambda self: None)  # default

    texcoords = property(lambda self: object(), lambda self, v: None, lambda self: None)  # default

    vertices = property(lambda self: object(), lambda self, v: None, lambda self: None)  # default

vertices = torch.FloatTensor(attrib.vertices).reshape(-1, 3)

torch.FloatTensor()

Type conversion, convert list, numpy into tensor, each data in it is float type

reshape(-1,3)

Dimension conversion, convert the tensor to the dimension of 3 columns, (-1) indicates that the dimension of the converted row needs to be calculated.
Insert image description here

shapes = reader.GetShapes()

Data structure of shapes
Insert image description here
Data structure of shape.mesh.indices
Insert image description here

class shape_t(__pybind11_builtins.pybind11_object):
    # no doc
    def __init__(self): # real signature unknown; restored from __doc__
        """ __init__(self: tinyobjloader.shape_t) -> None """
        pass

    lines = property(lambda self: object(), lambda self, v: None, lambda self: None)  # default

    mesh = property(lambda self: object(), lambda self, v: None, lambda self: None)  # default

    name = property(lambda self: object(), lambda self, v: None, lambda self: None)  # default

    points = property(lambda self: object(), lambda self, v: None, lambda self: None)  # default

faces += [idx.vertex_index for idx in shape.mesh.indices]

The dimensions of the obtained faces are: 301,656. Arrange the three point indexes of each face into the list in sequence.
Insert image description here

faces = torch.LongTensor(faces).reshape(-1, 3)

The dimensions of the obtained faces are: 100,552x3, which means there are 100552 faces. Each row represents the index of the three vertices that make up this face.

Convert the data format to N-face-3-point-xyz: mesh = obj2nvc(vertices, faces).cuda()

def obj2nvc(vertices, faces):
    """
    Args:
        vertices: tensor(float), shape (num_vertices, 3)
        faces: tensor(long), shape (num_faces, 3)
    Returns:
        mesh: tensor(float), shape (num_faces, 3, 3), (num_faces, 3 vertices, xyz coordinates)
    """
    mesh = vertices[faces.flatten()].reshape(faces.size()[0], 3, 3)
    return mesh.contiguous()

mesh = vertices[faces.flatten()].reshape(faces.size()[0], 3, 3)

The dimension of the obtained mesh is 100,552x3x3, that is, 3 points corresponding to each surface, and 3 coordinate values ​​corresponding to each point
Insert image description here

Find the normal of each face: mesh_normals = face_normals(mesh)

def face_normals(mesh):
    """
    Args:
        mesh: tensor(float), shape (num_faces, 3, 3)
    Returns:
        normals: tensor(float), shape (num_faces, 3)
    """
    vec_a = mesh[:, 0] - mesh[:, 1]  # 每个三角形面的第0个顶点坐标-第1个顶点坐标=矢量10
    vec_b = mesh[:, 1] - mesh[:, 2]  # 每个三角形面的第1个顶点坐标-第2个顶点坐标=矢量21
    normals = torch.cross(vec_a, vec_b)  # 计算两个矢量的叉乘得到法线
    return normals

Analysis surface distribution: distrib = area_weighted_distribution(mesh, mesh_normals)

def area_weighted_distribution(mesh, normals=None):
    """
    Args:
        mesh: tensor(float), shape (num_faces, 3, 3)
        normals: tensor(float), shape (num_faces, 3)
    Returns:
        distrib: distribution
    """
    if normals is None:
        normals = face_normals(mesh)
    areas = torch.norm(normals, p=2, dim=1) * 0.5
    areas /= torch.sum(areas) + 1e-10
    distrib = torch.distributions.Categorical(areas.view(-1))
    return distrib

torch.norm()

Find the p-norm of the input tensor input on the given dimension dim

def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None)

Reference:
Usage of torch.norm() function

Norm understanding

重头戏1:xyz = sample_points(mesh, num_samples_and_method, mesh_normals, distrib)

num_samples_and_method = [(100000, 'uniformly'), (100000, 'near')]

Sample points from mesh model

def sample_points(mesh, num_samples_and_method, normals=None, distrib=None):
    """
    Args:
        mesh: tensor(float), shape (num_faces, 3, 3)
        num_samples_and_method: [tuple(int, str)]
        normals: tensor(float), shape (num_faces, 3)
        distrib: distribution
    Returns:
        samples: tensor(float), shape (num_samples, 3)
    """
    if normals is None:
        normals = face_normals(mesh)
    if distrib is None:
        distrib = area_weighted_distribution(mesh, normals)
    samples = []
    for num_samples, method in num_samples_and_method:
        if method == 'uniformly':
            samples.append(sample_uniformly(mesh, num_samples))
        elif method == 'surface':
            samples.append(sample_on_surface(mesh, num_samples, normals, distrib)[0])
        elif method == 'near':
            samples.append(sample_near_surface(mesh, num_samples, normals, distrib))
    samples = torch.cat(samples, dim=0)
    return samples

Uniform sampling: sample_uniformly(mesh, num_samples)

def sample_uniformly(mesh, num_samples):
    """
    sample uniformly in [-1,1] bounding volume.
    Args:
        mesh: tensor(float), shape (num_faces, 3, 3)
        num_samples: int
    Returns:
        samples: tensor(float), shape (num_samples, 3)
    """
    samples = (torch.rand(num_samples, 3) - 0.5) * 1.1
    samples = samples.to(mesh.device)
    return samples

Sampling on each surface: sample_on_surface(mesh, num_samples, normals, distrib)

squeeze: compress the dimension,
unsqueeze: expand the dimension, unsqueeze(-1) adds a dimension to the last dimension

def sample_on_surface(mesh, num_samples, normals=None, distrib=None):
    """
    Args:
        mesh: tensor(float), shape (num_faces, 3, 3)
        num_samples: int
        normals: tensor(float), shape (num_faces, 3)
        distrib: distribution
    Returns:
        samples: tensor(float), shape (num_samples, 3)
        normals: tensor(float), shape (num_samples, 3)
    """
    if normals is None:
        normals = face_normals(mesh)
    if distrib is None:
        distrib = area_weighted_distribution(mesh, normals)
    idx = distrib.sample([num_samples])
    selected_faces = mesh[idx]
    selected_normals = normals[idx]
    u = torch.sqrt(torch.rand(num_samples)).to(mesh.device).unsqueeze(-1)
    v = torch.rand(num_samples).to(mesh.device).unsqueeze(-1)
    samples = (1 - u) * selected_faces[:,0,:] + (u * (1 - v)) * selected_faces[:,1,:] + u * v * selected_faces[:,2,:]  # 每个面上采样了一个点
    return samples, selected_normals

sample_near_surface(mesh, num_samples, normals, distrib)

def sample_near_surface(mesh, num_samples, normals=None, distrib=None):
    """
    Args:
        mesh: tensor(float), shape (num_faces, 3, 3)
        num_samples: int
        normals: tensor(float), shape (num_faces, 3)
        distrib: distribution
    Returns:
        samples: tensor(float), shape (num_samples, 3)
    """
    samples = sample_on_surface(mesh, num_samples, normals, distrib)[0] #  面上采样
    samples += torch.randn_like(samples) * 0.01 # 面上采样完后再对坐标进行一些偏移
    return samples

torch.randn is a set of numbers randomly selected from the standard normal distribution (mean 0, variance 1)

Highlight 2: sd = points_mesh_signed_distance(xyz, mesh)

Find the distance from the sampling point to the mesh surface

def points_mesh_signed_distance(points, mesh):
    """
    Args:
        points: tensor(float), shape (num_points, 3)
        mesh: tensor(float), shape (num_faces, 3, 3)
    Returns:
        sd: tensor(float), shape (num_points,)
    """
    sd = mesh2sdf.mesh2sdf_gpu(points, mesh)[0]  # nvidia实现的C++,gpu版本的mesh2sdf
    return sd

Concatenate the coordinates xyz and the distance sd to the surface: xyz_sd = torch.cat([xyz, sd.unsqueeze(1)], dim=1)

sd.unsqueeze(1), for example, the original
sd.shape is torch.Size([num_samples])
sd.unsqueeze(1).shape is torch.Size([num_samples, 1])

Guess you like

Origin blog.csdn.net/weixin_43693967/article/details/127319278