mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2026-02-06 22:12:16 +08:00
Address black + isort fbsource linter warnings
Summary: Address black + isort fbsource linter warnings from D20558374 (previous diff) Reviewed By: nikhilaravi Differential Revision: D20558373 fbshipit-source-id: d3607de4a01fb24c0d5269634563a7914bddf1c8
This commit is contained in:
committed by
Facebook GitHub Bot
parent
eb512ffde3
commit
d57daa6f85
@@ -4,4 +4,5 @@
|
||||
from .obj_io import load_obj, load_objs_as_meshes, save_obj
|
||||
from .ply_io import load_ply, save_ply
|
||||
|
||||
|
||||
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
||||
|
||||
@@ -2,16 +2,16 @@
|
||||
|
||||
|
||||
"""This module implements utility functions for loading and saving meshes."""
|
||||
import numpy as np
|
||||
import os
|
||||
import pathlib
|
||||
import warnings
|
||||
from collections import namedtuple
|
||||
from typing import List, Optional
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from fvcore.common.file_io import PathManager
|
||||
from PIL import Image
|
||||
|
||||
from pytorch3d.structures import Meshes, Textures, join_meshes
|
||||
|
||||
|
||||
@@ -51,9 +51,7 @@ def _read_image(file_name: str, format=None):
|
||||
|
||||
# Faces & Aux type returned from load_obj function.
|
||||
_Faces = namedtuple("Faces", "verts_idx normals_idx textures_idx materials_idx")
|
||||
_Aux = namedtuple(
|
||||
"Properties", "normals verts_uvs material_colors texture_images"
|
||||
)
|
||||
_Aux = namedtuple("Properties", "normals verts_uvs material_colors texture_images")
|
||||
|
||||
|
||||
def _format_faces_indices(faces_indices, max_index):
|
||||
@@ -247,9 +245,7 @@ def load_objs_as_meshes(files: list, device=None, load_textures: bool = True):
|
||||
image = list(tex_maps.values())[0].to(device)[None]
|
||||
tex = Textures(verts_uvs=verts_uvs, faces_uvs=faces_uvs, maps=image)
|
||||
|
||||
mesh = Meshes(
|
||||
verts=[verts], faces=[faces.verts_idx.to(device)], textures=tex
|
||||
)
|
||||
mesh = Meshes(verts=[verts], faces=[faces.verts_idx.to(device)], textures=tex)
|
||||
mesh_list.append(mesh)
|
||||
if len(mesh_list) == 1:
|
||||
return mesh_list[0]
|
||||
@@ -308,9 +304,7 @@ def _parse_face(
|
||||
# Subdivide faces with more than 3 vertices. See comments of the
|
||||
# load_obj function for more details.
|
||||
for i in range(len(face_verts) - 2):
|
||||
faces_verts_idx.append(
|
||||
(face_verts[0], face_verts[i + 1], face_verts[i + 2])
|
||||
)
|
||||
faces_verts_idx.append((face_verts[0], face_verts[i + 1], face_verts[i + 2]))
|
||||
if len(face_normals) > 0:
|
||||
faces_normals_idx.append(
|
||||
(face_normals[0], face_normals[i + 1], face_normals[i + 2])
|
||||
@@ -367,8 +361,7 @@ def _load(f_obj, data_dir, load_textures=True):
|
||||
tx = [float(x) for x in line.split()[1:3]]
|
||||
if len(tx) != 2:
|
||||
raise ValueError(
|
||||
"Texture %s does not have 2 values. Line: %s"
|
||||
% (str(tx), str(line))
|
||||
"Texture %s does not have 2 values. Line: %s" % (str(tx), str(line))
|
||||
)
|
||||
verts_uvs.append(tx)
|
||||
elif line.startswith("vn "):
|
||||
@@ -397,17 +390,13 @@ def _load(f_obj, data_dir, load_textures=True):
|
||||
|
||||
# Repeat for normals and textures if present.
|
||||
if len(faces_normals_idx) > 0:
|
||||
faces_normals_idx = _format_faces_indices(
|
||||
faces_normals_idx, normals.shape[0]
|
||||
)
|
||||
faces_normals_idx = _format_faces_indices(faces_normals_idx, normals.shape[0])
|
||||
if len(faces_textures_idx) > 0:
|
||||
faces_textures_idx = _format_faces_indices(
|
||||
faces_textures_idx, verts_uvs.shape[0]
|
||||
)
|
||||
if len(faces_materials_idx) > 0:
|
||||
faces_materials_idx = torch.tensor(
|
||||
faces_materials_idx, dtype=torch.int64
|
||||
)
|
||||
faces_materials_idx = torch.tensor(faces_materials_idx, dtype=torch.int64)
|
||||
|
||||
# Load materials
|
||||
material_colors, texture_images = None, None
|
||||
|
||||
@@ -4,15 +4,17 @@
|
||||
|
||||
|
||||
"""This module implements utility functions for loading and saving meshes."""
|
||||
import numpy as np
|
||||
import pathlib
|
||||
import struct
|
||||
import sys
|
||||
import warnings
|
||||
from collections import namedtuple
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
|
||||
_PlyTypeData = namedtuple("_PlyTypeData", "size struct_char np_type")
|
||||
|
||||
_PLY_TYPES = {
|
||||
@@ -257,11 +259,7 @@ def _try_read_ply_constant_list_ascii(f, definition: _PlyElementType):
|
||||
"ignore", message=".* Empty input file.*", category=UserWarning
|
||||
)
|
||||
data = np.loadtxt(
|
||||
f,
|
||||
dtype=np_type,
|
||||
comments=None,
|
||||
ndmin=2,
|
||||
max_rows=definition.count,
|
||||
f, dtype=np_type, comments=None, ndmin=2, max_rows=definition.count
|
||||
)
|
||||
except ValueError:
|
||||
f.seek(start_point)
|
||||
@@ -301,9 +299,7 @@ def _parse_heterogenous_property_ascii(datum, line_iter, property: _Property):
|
||||
length = int(value)
|
||||
except ValueError:
|
||||
raise ValueError("A list length was not a number.")
|
||||
list_value = np.zeros(
|
||||
length, dtype=_PLY_TYPES[property.data_type].np_type
|
||||
)
|
||||
list_value = np.zeros(length, dtype=_PLY_TYPES[property.data_type].np_type)
|
||||
for i in range(length):
|
||||
inner_value = next(line_iter, None)
|
||||
if inner_value is None:
|
||||
@@ -404,8 +400,7 @@ def _read_ply_element_struct(f, definition: _PlyElementType, endian_str: str):
|
||||
values. There is one column for each property.
|
||||
"""
|
||||
format = "".join(
|
||||
_PLY_TYPES[property.data_type].struct_char
|
||||
for property in definition.properties
|
||||
_PLY_TYPES[property.data_type].struct_char for property in definition.properties
|
||||
)
|
||||
format = endian_str + format
|
||||
pattern = struct.Struct(format)
|
||||
@@ -414,10 +409,7 @@ def _read_ply_element_struct(f, definition: _PlyElementType, endian_str: str):
|
||||
bytes_data = f.read(needed_bytes)
|
||||
if len(bytes_data) != needed_bytes:
|
||||
raise ValueError("Not enough data for %s." % definition.name)
|
||||
data = [
|
||||
pattern.unpack_from(bytes_data, i * size)
|
||||
for i in range(definition.count)
|
||||
]
|
||||
data = [pattern.unpack_from(bytes_data, i * size) for i in range(definition.count)]
|
||||
return data
|
||||
|
||||
|
||||
@@ -475,9 +467,7 @@ def _try_read_ply_constant_list_binary(
|
||||
return output
|
||||
|
||||
|
||||
def _read_ply_element_binary(
|
||||
f, definition: _PlyElementType, big_endian: bool
|
||||
) -> list:
|
||||
def _read_ply_element_binary(f, definition: _PlyElementType, big_endian: bool) -> list:
|
||||
"""
|
||||
Decode all instances of a single element from a binary .ply file.
|
||||
|
||||
@@ -515,9 +505,7 @@ def _read_ply_element_binary(
|
||||
data = []
|
||||
for _i in range(definition.count):
|
||||
datum = []
|
||||
for property, property_struct in zip(
|
||||
definition.properties, property_structs
|
||||
):
|
||||
for property, property_struct in zip(definition.properties, property_structs):
|
||||
size = property_struct.size
|
||||
initial_data = f.read(size)
|
||||
if len(initial_data) != size:
|
||||
@@ -656,28 +644,19 @@ def load_ply(f):
|
||||
if face is None:
|
||||
raise ValueError("The ply file has no face element.")
|
||||
|
||||
if (
|
||||
not isinstance(vertex, np.ndarray)
|
||||
or vertex.ndim != 2
|
||||
or vertex.shape[1] != 3
|
||||
):
|
||||
if not isinstance(vertex, np.ndarray) or vertex.ndim != 2 or vertex.shape[1] != 3:
|
||||
raise ValueError("Invalid vertices in file.")
|
||||
verts = torch.tensor(vertex, dtype=torch.float32)
|
||||
|
||||
face_head = next(head for head in header.elements if head.name == "face")
|
||||
if (
|
||||
len(face_head.properties) != 1
|
||||
or face_head.properties[0].list_size_type is None
|
||||
):
|
||||
if len(face_head.properties) != 1 or face_head.properties[0].list_size_type is None:
|
||||
raise ValueError("Unexpected form of faces data.")
|
||||
# face_head.properties[0].name is usually "vertex_index" or "vertex_indices"
|
||||
# but we don't need to enforce this.
|
||||
if isinstance(face, np.ndarray) and face.ndim == 2:
|
||||
if face.shape[1] < 3:
|
||||
raise ValueError("Faces must have at least 3 vertices.")
|
||||
face_arrays = [
|
||||
face[:, [0, i + 1, i + 2]] for i in range(face.shape[1] - 2)
|
||||
]
|
||||
face_arrays = [face[:, [0, i + 1, i + 2]] for i in range(face.shape[1] - 2)]
|
||||
faces = torch.tensor(np.vstack(face_arrays), dtype=torch.int64)
|
||||
else:
|
||||
face_list = []
|
||||
@@ -687,9 +666,7 @@ def load_ply(f):
|
||||
if face_item.shape[0] < 3:
|
||||
raise ValueError("Faces must have at least 3 vertices.")
|
||||
for i in range(face_item.shape[0] - 2):
|
||||
face_list.append(
|
||||
[face_item[0], face_item[i + 1], face_item[i + 2]]
|
||||
)
|
||||
face_list.append([face_item[0], face_item[i + 1], face_item[i + 2]])
|
||||
faces = torch.tensor(face_list, dtype=torch.int64)
|
||||
|
||||
return verts, faces
|
||||
|
||||
@@ -6,4 +6,5 @@ from .mesh_edge_loss import mesh_edge_loss
|
||||
from .mesh_laplacian_smoothing import mesh_laplacian_smoothing
|
||||
from .mesh_normal_consistency import mesh_normal_consistency
|
||||
|
||||
|
||||
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
||||
|
||||
@@ -2,13 +2,10 @@
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from pytorch3d.ops.nearest_neighbor_points import nn_points_idx
|
||||
|
||||
|
||||
def _validate_chamfer_reduction_inputs(
|
||||
batch_reduction: str, point_reduction: str
|
||||
):
|
||||
def _validate_chamfer_reduction_inputs(batch_reduction: str, point_reduction: str):
|
||||
"""Check the requested reductions are valid.
|
||||
|
||||
Args:
|
||||
@@ -18,17 +15,11 @@ def _validate_chamfer_reduction_inputs(
|
||||
points, can be one of ["none", "mean", "sum"].
|
||||
"""
|
||||
if batch_reduction not in ["none", "mean", "sum"]:
|
||||
raise ValueError(
|
||||
'batch_reduction must be one of ["none", "mean", "sum"]'
|
||||
)
|
||||
raise ValueError('batch_reduction must be one of ["none", "mean", "sum"]')
|
||||
if point_reduction not in ["none", "mean", "sum"]:
|
||||
raise ValueError(
|
||||
'point_reduction must be one of ["none", "mean", "sum"]'
|
||||
)
|
||||
raise ValueError('point_reduction must be one of ["none", "mean", "sum"]')
|
||||
if batch_reduction == "none" and point_reduction == "none":
|
||||
raise ValueError(
|
||||
'batch_reduction and point_reduction cannot both be "none".'
|
||||
)
|
||||
raise ValueError('batch_reduction and point_reduction cannot both be "none".')
|
||||
|
||||
|
||||
def chamfer_distance(
|
||||
@@ -87,10 +78,7 @@ def chamfer_distance(
|
||||
(x.sum((1, 2)) * weights).sum() * 0.0,
|
||||
(x.sum((1, 2)) * weights).sum() * 0.0,
|
||||
)
|
||||
return (
|
||||
(x.sum((1, 2)) * weights) * 0.0,
|
||||
(x.sum((1, 2)) * weights) * 0.0,
|
||||
)
|
||||
return ((x.sum((1, 2)) * weights) * 0.0, (x.sum((1, 2)) * weights) * 0.0)
|
||||
|
||||
return_normals = x_normals is not None and y_normals is not None
|
||||
cham_norm_x = x.new_zeros(())
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
|
||||
from itertools import islice
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
@@ -76,10 +77,7 @@ def mesh_normal_consistency(meshes):
|
||||
with torch.no_grad():
|
||||
edge_idx = face_to_edge.reshape(F * 3) # (3 * F,) indexes into edges
|
||||
vert_idx = (
|
||||
faces_packed.view(1, F, 3)
|
||||
.expand(3, F, 3)
|
||||
.transpose(0, 1)
|
||||
.reshape(3 * F, 3)
|
||||
faces_packed.view(1, F, 3).expand(3, F, 3).transpose(0, 1).reshape(3 * F, 3)
|
||||
)
|
||||
edge_idx, edge_sort_idx = edge_idx.sort()
|
||||
vert_idx = vert_idx[edge_sort_idx]
|
||||
@@ -132,9 +130,7 @@ def mesh_normal_consistency(meshes):
|
||||
loss = 1 - torch.cosine_similarity(n0, n1, dim=1)
|
||||
|
||||
verts_packed_to_mesh_idx = verts_packed_to_mesh_idx[vert_idx[:, 0]]
|
||||
verts_packed_to_mesh_idx = verts_packed_to_mesh_idx[
|
||||
vert_edge_pair_idx[:, 0]
|
||||
]
|
||||
verts_packed_to_mesh_idx = verts_packed_to_mesh_idx[vert_edge_pair_idx[:, 0]]
|
||||
num_normals = verts_packed_to_mesh_idx.bincount(minlength=N)
|
||||
weights = 1.0 / num_normals[verts_packed_to_mesh_idx].float()
|
||||
|
||||
|
||||
@@ -10,4 +10,5 @@ from .sample_points_from_meshes import sample_points_from_meshes
|
||||
from .subdivide_meshes import SubdivideMeshes
|
||||
from .vert_align import vert_align
|
||||
|
||||
|
||||
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from pytorch3d.structures import Meshes
|
||||
|
||||
|
||||
@@ -200,8 +199,6 @@ def cubify(voxels, thresh, device=None) -> Meshes:
|
||||
grid_verts.index_select(0, (idleverts[n] == 0).nonzero()[:, 0])
|
||||
for n in range(N)
|
||||
]
|
||||
faces_list = [
|
||||
nface - idlenum[n][nface] for n, nface in enumerate(faces_list)
|
||||
]
|
||||
faces_list = [nface - idlenum[n][nface] for n, nface in enumerate(faces_list)]
|
||||
|
||||
return Meshes(verts=verts_list, faces=faces_list)
|
||||
|
||||
@@ -3,11 +3,10 @@
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from pytorch3d import _C
|
||||
from torch.autograd import Function
|
||||
from torch.autograd.function import once_differentiable
|
||||
|
||||
from pytorch3d import _C
|
||||
|
||||
|
||||
class GraphConv(nn.Module):
|
||||
"""A single graph convolution layer."""
|
||||
@@ -60,9 +59,7 @@ class GraphConv(nn.Module):
|
||||
number of output features per vertex.
|
||||
"""
|
||||
if verts.is_cuda != edges.is_cuda:
|
||||
raise ValueError(
|
||||
"verts and edges tensors must be on the same device."
|
||||
)
|
||||
raise ValueError("verts and edges tensors must be on the same device.")
|
||||
if verts.shape[0] == 0:
|
||||
# empty graph.
|
||||
return verts.new_zeros((0, self.output_dim)) * verts.sum()
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d import _C
|
||||
|
||||
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
import torch
|
||||
from pytorch3d import _C
|
||||
from torch.autograd import Function
|
||||
from torch.autograd.function import once_differentiable
|
||||
|
||||
from pytorch3d import _C
|
||||
|
||||
|
||||
class _MeshFaceAreasNormals(Function):
|
||||
"""
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d import _C
|
||||
|
||||
|
||||
@@ -31,9 +30,7 @@ def nn_points_idx(p1, p2, p2_normals=None) -> torch.Tensor:
|
||||
"""
|
||||
N, P1, D = p1.shape
|
||||
with torch.no_grad():
|
||||
p1_nn_idx = _C.nn_points_idx(
|
||||
p1.contiguous(), p2.contiguous()
|
||||
) # (N, P1)
|
||||
p1_nn_idx = _C.nn_points_idx(p1.contiguous(), p2.contiguous()) # (N, P1)
|
||||
p1_nn_idx_expanded = p1_nn_idx.view(N, P1, 1).expand(N, P1, D)
|
||||
p1_nn_points = p2.gather(1, p1_nn_idx_expanded)
|
||||
if p2_normals is None:
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
import torch
|
||||
from pytorch3d import _C
|
||||
from torch.autograd import Function
|
||||
from torch.autograd.function import once_differentiable
|
||||
|
||||
from pytorch3d import _C
|
||||
|
||||
|
||||
class _PackedToPadded(Function):
|
||||
"""
|
||||
|
||||
@@ -7,8 +7,8 @@ batches of meshes.
|
||||
"""
|
||||
import sys
|
||||
from typing import Tuple, Union
|
||||
import torch
|
||||
|
||||
import torch
|
||||
from pytorch3d.ops.mesh_face_areas_normals import mesh_face_areas_normals
|
||||
from pytorch3d.ops.packed_to_padded import packed_to_padded
|
||||
|
||||
@@ -53,9 +53,7 @@ def sample_points_from_meshes(
|
||||
|
||||
# Only compute samples for non empty meshes
|
||||
with torch.no_grad():
|
||||
areas, _ = mesh_face_areas_normals(
|
||||
verts, faces
|
||||
) # Face areas can be zero.
|
||||
areas, _ = mesh_face_areas_normals(verts, faces) # Face areas can be zero.
|
||||
max_faces = meshes.num_faces_per_mesh().max().item()
|
||||
areas_padded = packed_to_padded(
|
||||
areas, mesh_to_face[meshes.valid], max_faces
|
||||
@@ -80,21 +78,17 @@ def sample_points_from_meshes(
|
||||
a = v0[sample_face_idxs] # (N, num_samples, 3)
|
||||
b = v1[sample_face_idxs]
|
||||
c = v2[sample_face_idxs]
|
||||
samples[meshes.valid] = (
|
||||
w0[:, :, None] * a + w1[:, :, None] * b + w2[:, :, None] * c
|
||||
)
|
||||
samples[meshes.valid] = w0[:, :, None] * a + w1[:, :, None] * b + w2[:, :, None] * c
|
||||
|
||||
if return_normals:
|
||||
# Intialize normals tensor with fill value 0 for empty meshes.
|
||||
# Normals for the sampled points are face normals computed from
|
||||
# the vertices of the face in which the sampled point lies.
|
||||
normals = torch.zeros(
|
||||
(num_meshes, num_samples, 3), device=meshes.device
|
||||
)
|
||||
normals = torch.zeros((num_meshes, num_samples, 3), device=meshes.device)
|
||||
vert_normals = (v1 - v0).cross(v2 - v1, dim=1)
|
||||
vert_normals = vert_normals / vert_normals.norm(
|
||||
dim=1, p=2, keepdim=True
|
||||
).clamp(min=sys.float_info.epsilon)
|
||||
vert_normals = vert_normals / vert_normals.norm(dim=1, p=2, keepdim=True).clamp(
|
||||
min=sys.float_info.epsilon
|
||||
)
|
||||
vert_normals = vert_normals[sample_face_idxs]
|
||||
normals[meshes.valid] = vert_normals
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from pytorch3d.structures import Meshes
|
||||
|
||||
|
||||
@@ -193,16 +192,12 @@ class SubdivideMeshes(nn.Module):
|
||||
edges = meshes[0].edges_packed()
|
||||
|
||||
# The set of faces is the same across the different meshes.
|
||||
new_faces = self._subdivided_faces.view(1, -1, 3).expand(
|
||||
self._N, -1, -1
|
||||
)
|
||||
new_faces = self._subdivided_faces.view(1, -1, 3).expand(self._N, -1, -1)
|
||||
|
||||
# Add one new vertex at the midpoint of each edge by taking the average
|
||||
# of the vertices that form each edge.
|
||||
new_verts = verts[:, edges].mean(dim=2)
|
||||
new_verts = torch.cat(
|
||||
[verts, new_verts], dim=1
|
||||
) # (sum(V_n)+sum(E_n), 3)
|
||||
new_verts = torch.cat([verts, new_verts], dim=1) # (sum(V_n)+sum(E_n), 3)
|
||||
new_feats = None
|
||||
|
||||
# Calculate features for new vertices.
|
||||
@@ -212,15 +207,11 @@ class SubdivideMeshes(nn.Module):
|
||||
# padded, i.e. (N*V, D) to (N, V, D).
|
||||
feats = feats.view(verts.size(0), verts.size(1), feats.size(1))
|
||||
if feats.dim() != 3:
|
||||
raise ValueError(
|
||||
"features need to be of shape (N, V, D) or (N*V, D)"
|
||||
)
|
||||
raise ValueError("features need to be of shape (N, V, D) or (N*V, D)")
|
||||
|
||||
# Take average of the features at the vertices that form each edge.
|
||||
new_feats = feats[:, edges].mean(dim=2)
|
||||
new_feats = torch.cat(
|
||||
[feats, new_feats], dim=1
|
||||
) # (sum(V_n)+sum(E_n), 3)
|
||||
new_feats = torch.cat([feats, new_feats], dim=1) # (sum(V_n)+sum(E_n), 3)
|
||||
|
||||
new_meshes = Meshes(verts=new_verts, faces=new_faces)
|
||||
|
||||
@@ -270,9 +261,7 @@ class SubdivideMeshes(nn.Module):
|
||||
) # (sum(V_n)+sum(E_n),)
|
||||
|
||||
verts_ordered_idx_init = torch.zeros(
|
||||
new_verts_per_mesh.sum(),
|
||||
dtype=torch.int64,
|
||||
device=meshes.device,
|
||||
new_verts_per_mesh.sum(), dtype=torch.int64, device=meshes.device
|
||||
) # (sum(V_n)+sum(E_n),)
|
||||
|
||||
# Reassign vertex indices so that existing and new vertices for each
|
||||
@@ -288,9 +277,7 @@ class SubdivideMeshes(nn.Module):
|
||||
|
||||
# Calculate the indices needed to group the existing and new faces
|
||||
# for each mesh.
|
||||
face_sort_idx = create_faces_index(
|
||||
num_faces_per_mesh, device=meshes.device
|
||||
)
|
||||
face_sort_idx = create_faces_index(num_faces_per_mesh, device=meshes.device)
|
||||
|
||||
# Reorder the faces to sequentially group existing and new faces
|
||||
# for each mesh.
|
||||
@@ -361,9 +348,7 @@ def create_verts_index(verts_per_mesh, edges_per_mesh, device=None):
|
||||
E = edges_per_mesh.sum() # e.g. 21
|
||||
|
||||
verts_per_mesh_cumsum = verts_per_mesh.cumsum(dim=0) # (N,) e.g. (4, 9, 15)
|
||||
edges_per_mesh_cumsum = edges_per_mesh.cumsum(
|
||||
dim=0
|
||||
) # (N,) e.g. (5, 12, 21)
|
||||
edges_per_mesh_cumsum = edges_per_mesh.cumsum(dim=0) # (N,) e.g. (5, 12, 21)
|
||||
|
||||
v_to_e_idx = verts_per_mesh_cumsum.clone()
|
||||
|
||||
@@ -373,9 +358,7 @@ def create_verts_index(verts_per_mesh, edges_per_mesh, device=None):
|
||||
] # e.g. (4, 9, 15) + (0, 5, 12) = (4, 14, 27)
|
||||
|
||||
# vertex to edge offset.
|
||||
v_to_e_offset = (
|
||||
V - verts_per_mesh_cumsum
|
||||
) # e.g. 15 - (4, 9, 15) = (11, 6, 0)
|
||||
v_to_e_offset = V - verts_per_mesh_cumsum # e.g. 15 - (4, 9, 15) = (11, 6, 0)
|
||||
v_to_e_offset[1:] += edges_per_mesh_cumsum[
|
||||
:-1
|
||||
] # e.g. (11, 6, 0) + (0, 5, 12) = (11, 11, 12)
|
||||
|
||||
@@ -59,9 +59,7 @@ def vert_align(
|
||||
elif hasattr(verts, "verts_padded"):
|
||||
grid = verts.verts_padded()
|
||||
else:
|
||||
raise ValueError(
|
||||
"verts must be a tensor or have a `verts_padded` attribute"
|
||||
)
|
||||
raise ValueError("verts must be a tensor or have a `verts_padded` attribute")
|
||||
|
||||
grid = grid[:, None, :, :2] # (N, 1, V, 2)
|
||||
|
||||
|
||||
@@ -44,4 +44,5 @@ from .points import (
|
||||
)
|
||||
from .utils import TensorProperties, convert_to_tensors_and_broadcast
|
||||
|
||||
|
||||
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
|
||||
import numpy as np
|
||||
from typing import NamedTuple, Sequence
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
|
||||
# Example functions for blending the top K colors per pixel using the outputs
|
||||
# from rasterization.
|
||||
# NOTE: All blending function should return an RGBA image per batch element
|
||||
@@ -63,9 +65,7 @@ def sigmoid_alpha_blend(colors, fragments, blend_params) -> torch.Tensor:
|
||||
3D Reasoning', ICCV 2019
|
||||
"""
|
||||
N, H, W, K = fragments.pix_to_face.shape
|
||||
pixel_colors = torch.ones(
|
||||
(N, H, W, 4), dtype=colors.dtype, device=colors.device
|
||||
)
|
||||
pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=colors.device)
|
||||
mask = fragments.pix_to_face >= 0
|
||||
|
||||
# The distance is negative if a pixel is inside a face and positive outside
|
||||
@@ -124,14 +124,10 @@ def softmax_rgb_blend(
|
||||
|
||||
N, H, W, K = fragments.pix_to_face.shape
|
||||
device = fragments.pix_to_face.device
|
||||
pixel_colors = torch.ones(
|
||||
(N, H, W, 4), dtype=colors.dtype, device=colors.device
|
||||
)
|
||||
pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=colors.device)
|
||||
background = blend_params.background_color
|
||||
if not torch.is_tensor(background):
|
||||
background = torch.tensor(
|
||||
background, dtype=torch.float32, device=device
|
||||
)
|
||||
background = torch.tensor(background, dtype=torch.float32, device=device)
|
||||
|
||||
# Background color
|
||||
delta = np.exp(1e-10 / blend_params.gamma) * 1e-10
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
import math
|
||||
import numpy as np
|
||||
from typing import Optional, Sequence, Tuple
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from pytorch3d.transforms import Rotate, Transform3d, Translate
|
||||
|
||||
from .utils import TensorProperties, convert_to_tensors_and_broadcast
|
||||
|
||||
|
||||
# Default values for rotation and translation matrices.
|
||||
r = np.expand_dims(np.eye(3), axis=0) # (1, 3, 3)
|
||||
t = np.expand_dims(np.zeros(3), axis=0) # (1, 3)
|
||||
@@ -106,9 +107,7 @@ class OpenGLPerspectiveCameras(TensorProperties):
|
||||
aspect_ratio = kwargs.get("aspect_ratio", self.aspect_ratio)
|
||||
degrees = kwargs.get("degrees", self.degrees)
|
||||
|
||||
P = torch.zeros(
|
||||
(self._N, 4, 4), device=self.device, dtype=torch.float32
|
||||
)
|
||||
P = torch.zeros((self._N, 4, 4), device=self.device, dtype=torch.float32)
|
||||
ones = torch.ones((self._N), dtype=torch.float32, device=self.device)
|
||||
if degrees:
|
||||
fov = (np.pi / 180) * fov
|
||||
@@ -204,9 +203,7 @@ class OpenGLPerspectiveCameras(TensorProperties):
|
||||
"""
|
||||
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
|
||||
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
|
||||
world_to_view_transform = get_world_to_view_transform(
|
||||
R=self.R, T=self.T
|
||||
)
|
||||
world_to_view_transform = get_world_to_view_transform(R=self.R, T=self.T)
|
||||
return world_to_view_transform
|
||||
|
||||
def get_full_projection_transform(self, **kwargs) -> Transform3d:
|
||||
@@ -229,9 +226,7 @@ class OpenGLPerspectiveCameras(TensorProperties):
|
||||
"""
|
||||
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
|
||||
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
|
||||
world_to_view_transform = self.get_world_to_view_transform(
|
||||
R=self.R, T=self.T
|
||||
)
|
||||
world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T)
|
||||
view_to_screen_transform = self.get_projection_transform(**kwargs)
|
||||
return world_to_view_transform.compose(view_to_screen_transform)
|
||||
|
||||
@@ -337,9 +332,7 @@ class OpenGLOrthographicCameras(TensorProperties):
|
||||
bottom = kwargs.get("bottom", self.bottom) # pyre-ignore[16]
|
||||
scale_xyz = kwargs.get("scale_xyz", self.scale_xyz) # pyre-ignore[16]
|
||||
|
||||
P = torch.zeros(
|
||||
(self._N, 4, 4), dtype=torch.float32, device=self.device
|
||||
)
|
||||
P = torch.zeros((self._N, 4, 4), dtype=torch.float32, device=self.device)
|
||||
ones = torch.ones((self._N), dtype=torch.float32, device=self.device)
|
||||
# NOTE: OpenGL flips handedness of coordinate system between camera
|
||||
# space and NDC space so z sign is -ve. In PyTorch3D we maintain a
|
||||
@@ -417,9 +410,7 @@ class OpenGLOrthographicCameras(TensorProperties):
|
||||
"""
|
||||
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
|
||||
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
|
||||
world_to_view_transform = get_world_to_view_transform(
|
||||
R=self.R, T=self.T
|
||||
)
|
||||
world_to_view_transform = get_world_to_view_transform(R=self.R, T=self.T)
|
||||
return world_to_view_transform
|
||||
|
||||
def get_full_projection_transform(self, **kwargs) -> Transform3d:
|
||||
@@ -442,9 +433,7 @@ class OpenGLOrthographicCameras(TensorProperties):
|
||||
"""
|
||||
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
|
||||
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
|
||||
world_to_view_transform = self.get_world_to_view_transform(
|
||||
R=self.R, T=self.T
|
||||
)
|
||||
world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T)
|
||||
view_to_screen_transform = self.get_projection_transform(**kwargs)
|
||||
return world_to_view_transform.compose(view_to_screen_transform)
|
||||
|
||||
@@ -470,12 +459,7 @@ class SfMPerspectiveCameras(TensorProperties):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
focal_length=1.0,
|
||||
principal_point=((0.0, 0.0),),
|
||||
R=r,
|
||||
T=t,
|
||||
device="cpu",
|
||||
self, focal_length=1.0, principal_point=((0.0, 0.0),), R=r, T=t, device="cpu"
|
||||
):
|
||||
"""
|
||||
__init__(self, focal_length, principal_point, R, T, device) -> None
|
||||
@@ -589,9 +573,7 @@ class SfMPerspectiveCameras(TensorProperties):
|
||||
"""
|
||||
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
|
||||
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
|
||||
world_to_view_transform = get_world_to_view_transform(
|
||||
R=self.R, T=self.T
|
||||
)
|
||||
world_to_view_transform = get_world_to_view_transform(R=self.R, T=self.T)
|
||||
return world_to_view_transform
|
||||
|
||||
def get_full_projection_transform(self, **kwargs) -> Transform3d:
|
||||
@@ -610,9 +592,7 @@ class SfMPerspectiveCameras(TensorProperties):
|
||||
"""
|
||||
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
|
||||
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
|
||||
world_to_view_transform = self.get_world_to_view_transform(
|
||||
R=self.R, T=self.T
|
||||
)
|
||||
world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T)
|
||||
view_to_screen_transform = self.get_projection_transform(**kwargs)
|
||||
return world_to_view_transform.compose(view_to_screen_transform)
|
||||
|
||||
@@ -638,12 +618,7 @@ class SfMOrthographicCameras(TensorProperties):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
focal_length=1.0,
|
||||
principal_point=((0.0, 0.0),),
|
||||
R=r,
|
||||
T=t,
|
||||
device="cpu",
|
||||
self, focal_length=1.0, principal_point=((0.0, 0.0),), R=r, T=t, device="cpu"
|
||||
):
|
||||
"""
|
||||
__init__(self, focal_length, principal_point, R, T, device) -> None
|
||||
@@ -757,9 +732,7 @@ class SfMOrthographicCameras(TensorProperties):
|
||||
"""
|
||||
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
|
||||
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
|
||||
world_to_view_transform = get_world_to_view_transform(
|
||||
R=self.R, T=self.T
|
||||
)
|
||||
world_to_view_transform = get_world_to_view_transform(R=self.R, T=self.T)
|
||||
return world_to_view_transform
|
||||
|
||||
def get_full_projection_transform(self, **kwargs) -> Transform3d:
|
||||
@@ -778,9 +751,7 @@ class SfMOrthographicCameras(TensorProperties):
|
||||
"""
|
||||
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
|
||||
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
|
||||
world_to_view_transform = self.get_world_to_view_transform(
|
||||
R=self.R, T=self.T
|
||||
)
|
||||
world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T)
|
||||
view_to_screen_transform = self.get_projection_transform(**kwargs)
|
||||
return world_to_view_transform.compose(view_to_screen_transform)
|
||||
|
||||
@@ -990,9 +961,7 @@ def look_at_rotation(
|
||||
z_axis = F.normalize(at - camera_position, eps=1e-5)
|
||||
x_axis = F.normalize(torch.cross(up, z_axis), eps=1e-5)
|
||||
y_axis = F.normalize(torch.cross(z_axis, x_axis), eps=1e-5)
|
||||
R = torch.cat(
|
||||
(x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1
|
||||
)
|
||||
R = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1)
|
||||
return R.transpose(1, 2)
|
||||
|
||||
|
||||
@@ -1038,9 +1007,7 @@ def look_at_view_transform(
|
||||
"""
|
||||
|
||||
if eye is not None:
|
||||
broadcasted_args = convert_to_tensors_and_broadcast(
|
||||
eye, at, up, device=device
|
||||
)
|
||||
broadcasted_args = convert_to_tensors_and_broadcast(eye, at, up, device=device)
|
||||
eye, at, up = broadcasted_args
|
||||
C = eye
|
||||
else:
|
||||
|
||||
@@ -3,10 +3,11 @@
|
||||
|
||||
|
||||
from typing import NamedTuple
|
||||
import torch
|
||||
|
||||
import torch
|
||||
from pytorch3d import _C
|
||||
|
||||
|
||||
# Example functions for blending the top K features per pixel using the outputs
|
||||
# from rasterization.
|
||||
# NOTE: All blending function should return a (N, H, W, C) tensor per batch element.
|
||||
@@ -49,9 +50,7 @@ class _CompositeAlphaPoints(torch.autograd.Function):
|
||||
def forward(ctx, features, alphas, points_idx):
|
||||
pt_cld = _C.accum_alphacomposite(features, alphas, points_idx)
|
||||
|
||||
ctx.save_for_backward(
|
||||
features.clone(), alphas.clone(), points_idx.clone()
|
||||
)
|
||||
ctx.save_for_backward(features.clone(), alphas.clone(), points_idx.clone())
|
||||
return pt_cld
|
||||
|
||||
@staticmethod
|
||||
@@ -68,9 +67,7 @@ class _CompositeAlphaPoints(torch.autograd.Function):
|
||||
return grad_features, grad_alphas, grad_points_idx, None
|
||||
|
||||
|
||||
def alpha_composite(
|
||||
pointsidx, alphas, pt_clds, blend_params=None
|
||||
) -> torch.Tensor:
|
||||
def alpha_composite(pointsidx, alphas, pt_clds, blend_params=None) -> torch.Tensor:
|
||||
"""
|
||||
Composite features within a z-buffer using alpha compositing. Given a zbuffer
|
||||
with corresponding features and weights, these values are accumulated according
|
||||
@@ -131,9 +128,7 @@ class _CompositeNormWeightedSumPoints(torch.autograd.Function):
|
||||
def forward(ctx, features, alphas, points_idx):
|
||||
pt_cld = _C.accum_weightedsumnorm(features, alphas, points_idx)
|
||||
|
||||
ctx.save_for_backward(
|
||||
features.clone(), alphas.clone(), points_idx.clone()
|
||||
)
|
||||
ctx.save_for_backward(features.clone(), alphas.clone(), points_idx.clone())
|
||||
return pt_cld
|
||||
|
||||
@staticmethod
|
||||
@@ -150,9 +145,7 @@ class _CompositeNormWeightedSumPoints(torch.autograd.Function):
|
||||
return grad_features, grad_alphas, grad_points_idx, None
|
||||
|
||||
|
||||
def norm_weighted_sum(
|
||||
pointsidx, alphas, pt_clds, blend_params=None
|
||||
) -> torch.Tensor:
|
||||
def norm_weighted_sum(pointsidx, alphas, pt_clds, blend_params=None) -> torch.Tensor:
|
||||
"""
|
||||
Composite features within a z-buffer using normalized weighted sum. Given a zbuffer
|
||||
with corresponding features and weights, these values are accumulated
|
||||
@@ -213,9 +206,7 @@ class _CompositeWeightedSumPoints(torch.autograd.Function):
|
||||
def forward(ctx, features, alphas, points_idx):
|
||||
pt_cld = _C.accum_weightedsum(features, alphas, points_idx)
|
||||
|
||||
ctx.save_for_backward(
|
||||
features.clone(), alphas.clone(), points_idx.clone()
|
||||
)
|
||||
ctx.save_for_backward(features.clone(), alphas.clone(), points_idx.clone())
|
||||
return pt_cld
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -114,12 +114,7 @@ def specular(
|
||||
|
||||
# Ensure all inputs have same batch dimension as points
|
||||
matched_tensors = convert_to_tensors_and_broadcast(
|
||||
points,
|
||||
color,
|
||||
direction,
|
||||
camera_position,
|
||||
shininess,
|
||||
device=points.device,
|
||||
points, color, direction, camera_position, shininess, device=points.device
|
||||
)
|
||||
_, color, direction, camera_position, shininess = matched_tensors
|
||||
|
||||
@@ -201,9 +196,7 @@ class DirectionalLights(TensorProperties):
|
||||
normals=normals, color=self.diffuse_color, direction=self.direction
|
||||
)
|
||||
|
||||
def specular(
|
||||
self, normals, points, camera_position, shininess
|
||||
) -> torch.Tensor:
|
||||
def specular(self, normals, points, camera_position, shininess) -> torch.Tensor:
|
||||
return specular(
|
||||
points=points,
|
||||
normals=normals,
|
||||
@@ -256,13 +249,9 @@ class PointLights(TensorProperties):
|
||||
|
||||
def diffuse(self, normals, points) -> torch.Tensor:
|
||||
direction = self.location - points
|
||||
return diffuse(
|
||||
normals=normals, color=self.diffuse_color, direction=direction
|
||||
)
|
||||
return diffuse(normals=normals, color=self.diffuse_color, direction=direction)
|
||||
|
||||
def specular(
|
||||
self, normals, points, camera_position, shininess
|
||||
) -> torch.Tensor:
|
||||
def specular(self, normals, points, camera_position, shininess) -> torch.Tensor:
|
||||
direction = self.location - points
|
||||
return specular(
|
||||
points=points,
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
|
||||
from .texturing import ( # isort:skip
|
||||
interpolate_texture_map,
|
||||
interpolate_vertex_colors,
|
||||
)
|
||||
from .texturing import interpolate_texture_map, interpolate_vertex_colors # isort:skip
|
||||
from .rasterize_meshes import rasterize_meshes
|
||||
from .rasterizer import MeshRasterizer, RasterizationSettings
|
||||
from .renderer import MeshRenderer
|
||||
@@ -20,4 +17,5 @@ from .shader import (
|
||||
from .shading import gouraud_shading, phong_shading
|
||||
from .utils import interpolate_face_attributes
|
||||
|
||||
|
||||
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
|
||||
import numpy as np
|
||||
from typing import Optional
|
||||
import torch
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from pytorch3d import _C
|
||||
|
||||
|
||||
# TODO make the epsilon user configurable
|
||||
kEpsilon = 1e-30
|
||||
|
||||
@@ -172,9 +173,7 @@ class _RasterizeFaceVerts(torch.autograd.Function):
|
||||
return pix_to_face, zbuf, barycentric_coords, dists
|
||||
|
||||
@staticmethod
|
||||
def backward(
|
||||
ctx, grad_pix_to_face, grad_zbuf, grad_barycentric_coords, grad_dists
|
||||
):
|
||||
def backward(ctx, grad_pix_to_face, grad_zbuf, grad_barycentric_coords, grad_dists):
|
||||
grad_face_verts = None
|
||||
grad_mesh_to_face_first_idx = None
|
||||
grad_num_faces_per_mesh = None
|
||||
@@ -243,9 +242,7 @@ def rasterize_meshes_python(
|
||||
face_idxs = torch.full(
|
||||
(N, H, W, K), fill_value=-1, dtype=torch.int64, device=device
|
||||
)
|
||||
zbuf = torch.full(
|
||||
(N, H, W, K), fill_value=-1, dtype=torch.float32, device=device
|
||||
)
|
||||
zbuf = torch.full((N, H, W, K), fill_value=-1, dtype=torch.float32, device=device)
|
||||
bary_coords = torch.full(
|
||||
(N, H, W, K, 3), fill_value=-1, dtype=torch.float32, device=device
|
||||
)
|
||||
@@ -308,9 +305,7 @@ def rasterize_meshes_python(
|
||||
continue
|
||||
|
||||
# Compute barycentric coordinates and pixel z distance.
|
||||
pxy = torch.tensor(
|
||||
[xf, yf], dtype=torch.float32, device=device
|
||||
)
|
||||
pxy = torch.tensor([xf, yf], dtype=torch.float32, device=device)
|
||||
|
||||
bary = barycentric_coordinates(pxy, v0[:2], v1[:2], v2[:2])
|
||||
if perspective_correct:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
from typing import NamedTuple, Optional
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
@@ -123,8 +124,5 @@ class MeshRasterizer(nn.Module):
|
||||
perspective_correct=raster_settings.perspective_correct,
|
||||
)
|
||||
return Fragments(
|
||||
pix_to_face=pix_to_face,
|
||||
zbuf=zbuf,
|
||||
bary_coords=bary_coords,
|
||||
dists=dists,
|
||||
pix_to_face=pix_to_face, zbuf=zbuf, bary_coords=bary_coords, dists=dists
|
||||
)
|
||||
|
||||
@@ -7,6 +7,7 @@ import torch.nn as nn
|
||||
from .rasterizer import Fragments
|
||||
from .utils import _clip_barycentric_coordinates, _interpolate_zbuf
|
||||
|
||||
|
||||
# A renderer class should be initialized with a
|
||||
# function for rasterization and a function for shading.
|
||||
# The rasterizer should:
|
||||
@@ -48,16 +49,12 @@ class MeshRenderer(nn.Module):
|
||||
the range for the corresponding face.
|
||||
"""
|
||||
fragments = self.rasterizer(meshes_world, **kwargs)
|
||||
raster_settings = kwargs.get(
|
||||
"raster_settings", self.rasterizer.raster_settings
|
||||
)
|
||||
raster_settings = kwargs.get("raster_settings", self.rasterizer.raster_settings)
|
||||
if raster_settings.blur_radius > 0.0:
|
||||
# TODO: potentially move barycentric clipping to the rasterizer
|
||||
# if no downstream functions requires unclipped values.
|
||||
# This will avoid unnecssary re-interpolation of the z buffer.
|
||||
clipped_bary_coords = _clip_barycentric_coordinates(
|
||||
fragments.bary_coords
|
||||
)
|
||||
clipped_bary_coords = _clip_barycentric_coordinates(fragments.bary_coords)
|
||||
clipped_zbuf = _interpolate_zbuf(
|
||||
fragments.pix_to_face, clipped_bary_coords, meshes_world
|
||||
)
|
||||
|
||||
@@ -16,6 +16,7 @@ from ..materials import Materials
|
||||
from .shading import flat_shading, gouraud_shading, phong_shading
|
||||
from .texturing import interpolate_texture_map, interpolate_vertex_colors
|
||||
|
||||
|
||||
# A Shader should take as input fragments from the output of rasterization
|
||||
# along with scene params and output images. A shader could perform operations
|
||||
# such as:
|
||||
@@ -41,16 +42,12 @@ class HardPhongShader(nn.Module):
|
||||
|
||||
def __init__(self, device="cpu", cameras=None, lights=None, materials=None):
|
||||
super().__init__()
|
||||
self.lights = (
|
||||
lights if lights is not None else PointLights(device=device)
|
||||
)
|
||||
self.lights = lights if lights is not None else PointLights(device=device)
|
||||
self.materials = (
|
||||
materials if materials is not None else Materials(device=device)
|
||||
)
|
||||
self.cameras = (
|
||||
cameras
|
||||
if cameras is not None
|
||||
else OpenGLPerspectiveCameras(device=device)
|
||||
cameras if cameras is not None else OpenGLPerspectiveCameras(device=device)
|
||||
)
|
||||
|
||||
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
|
||||
@@ -85,28 +82,17 @@ class SoftPhongShader(nn.Module):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
device="cpu",
|
||||
cameras=None,
|
||||
lights=None,
|
||||
materials=None,
|
||||
blend_params=None,
|
||||
self, device="cpu", cameras=None, lights=None, materials=None, blend_params=None
|
||||
):
|
||||
super().__init__()
|
||||
self.lights = (
|
||||
lights if lights is not None else PointLights(device=device)
|
||||
)
|
||||
self.lights = lights if lights is not None else PointLights(device=device)
|
||||
self.materials = (
|
||||
materials if materials is not None else Materials(device=device)
|
||||
)
|
||||
self.cameras = (
|
||||
cameras
|
||||
if cameras is not None
|
||||
else OpenGLPerspectiveCameras(device=device)
|
||||
)
|
||||
self.blend_params = (
|
||||
blend_params if blend_params is not None else BlendParams()
|
||||
cameras if cameras is not None else OpenGLPerspectiveCameras(device=device)
|
||||
)
|
||||
self.blend_params = blend_params if blend_params is not None else BlendParams()
|
||||
|
||||
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
|
||||
texels = interpolate_vertex_colors(fragments, meshes)
|
||||
@@ -142,16 +128,12 @@ class HardGouraudShader(nn.Module):
|
||||
|
||||
def __init__(self, device="cpu", cameras=None, lights=None, materials=None):
|
||||
super().__init__()
|
||||
self.lights = (
|
||||
lights if lights is not None else PointLights(device=device)
|
||||
)
|
||||
self.lights = lights if lights is not None else PointLights(device=device)
|
||||
self.materials = (
|
||||
materials if materials is not None else Materials(device=device)
|
||||
)
|
||||
self.cameras = (
|
||||
cameras
|
||||
if cameras is not None
|
||||
else OpenGLPerspectiveCameras(device=device)
|
||||
cameras if cameras is not None else OpenGLPerspectiveCameras(device=device)
|
||||
)
|
||||
|
||||
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
|
||||
@@ -185,28 +167,17 @@ class SoftGouraudShader(nn.Module):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
device="cpu",
|
||||
cameras=None,
|
||||
lights=None,
|
||||
materials=None,
|
||||
blend_params=None,
|
||||
self, device="cpu", cameras=None, lights=None, materials=None, blend_params=None
|
||||
):
|
||||
super().__init__()
|
||||
self.lights = (
|
||||
lights if lights is not None else PointLights(device=device)
|
||||
)
|
||||
self.lights = lights if lights is not None else PointLights(device=device)
|
||||
self.materials = (
|
||||
materials if materials is not None else Materials(device=device)
|
||||
)
|
||||
self.cameras = (
|
||||
cameras
|
||||
if cameras is not None
|
||||
else OpenGLPerspectiveCameras(device=device)
|
||||
)
|
||||
self.blend_params = (
|
||||
blend_params if blend_params is not None else BlendParams()
|
||||
cameras if cameras is not None else OpenGLPerspectiveCameras(device=device)
|
||||
)
|
||||
self.blend_params = blend_params if blend_params is not None else BlendParams()
|
||||
|
||||
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
|
||||
cameras = kwargs.get("cameras", self.cameras)
|
||||
@@ -241,28 +212,17 @@ class TexturedSoftPhongShader(nn.Module):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
device="cpu",
|
||||
cameras=None,
|
||||
lights=None,
|
||||
materials=None,
|
||||
blend_params=None,
|
||||
self, device="cpu", cameras=None, lights=None, materials=None, blend_params=None
|
||||
):
|
||||
super().__init__()
|
||||
self.lights = (
|
||||
lights if lights is not None else PointLights(device=device)
|
||||
)
|
||||
self.lights = lights if lights is not None else PointLights(device=device)
|
||||
self.materials = (
|
||||
materials if materials is not None else Materials(device=device)
|
||||
)
|
||||
self.cameras = (
|
||||
cameras
|
||||
if cameras is not None
|
||||
else OpenGLPerspectiveCameras(device=device)
|
||||
)
|
||||
self.blend_params = (
|
||||
blend_params if blend_params is not None else BlendParams()
|
||||
cameras if cameras is not None else OpenGLPerspectiveCameras(device=device)
|
||||
)
|
||||
self.blend_params = blend_params if blend_params is not None else BlendParams()
|
||||
|
||||
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
|
||||
texels = interpolate_texture_map(fragments, meshes)
|
||||
@@ -298,16 +258,12 @@ class HardFlatShader(nn.Module):
|
||||
|
||||
def __init__(self, device="cpu", cameras=None, lights=None, materials=None):
|
||||
super().__init__()
|
||||
self.lights = (
|
||||
lights if lights is not None else PointLights(device=device)
|
||||
)
|
||||
self.lights = lights if lights is not None else PointLights(device=device)
|
||||
self.materials = (
|
||||
materials if materials is not None else Materials(device=device)
|
||||
)
|
||||
self.cameras = (
|
||||
cameras
|
||||
if cameras is not None
|
||||
else OpenGLPerspectiveCameras(device=device)
|
||||
cameras if cameras is not None else OpenGLPerspectiveCameras(device=device)
|
||||
)
|
||||
|
||||
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
|
||||
@@ -346,9 +302,7 @@ class SoftSilhouetteShader(nn.Module):
|
||||
|
||||
def __init__(self, blend_params=None):
|
||||
super().__init__()
|
||||
self.blend_params = (
|
||||
blend_params if blend_params is not None else BlendParams()
|
||||
)
|
||||
self.blend_params = blend_params if blend_params is not None else BlendParams()
|
||||
|
||||
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
|
||||
""""
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
|
||||
from typing import Tuple
|
||||
|
||||
import torch
|
||||
|
||||
from .texturing import interpolate_face_attributes
|
||||
@@ -82,9 +83,7 @@ def phong_shading(
|
||||
return colors
|
||||
|
||||
|
||||
def gouraud_shading(
|
||||
meshes, fragments, lights, cameras, materials
|
||||
) -> torch.Tensor:
|
||||
def gouraud_shading(meshes, fragments, lights, cameras, materials) -> torch.Tensor:
|
||||
"""
|
||||
Apply per vertex shading. First compute the vertex illumination by applying
|
||||
ambient, diffuse and specular lighting. If vertex color is available,
|
||||
@@ -131,9 +130,7 @@ def gouraud_shading(
|
||||
return colors
|
||||
|
||||
|
||||
def flat_shading(
|
||||
meshes, fragments, lights, cameras, materials, texels
|
||||
) -> torch.Tensor:
|
||||
def flat_shading(meshes, fragments, lights, cameras, materials, texels) -> torch.Tensor:
|
||||
"""
|
||||
Apply per face shading. Use the average face position and the face normals
|
||||
to compute the ambient, diffuse and specular lighting. Apply the ambient
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from pytorch3d.structures.textures import Textures
|
||||
|
||||
from .utils import interpolate_face_attributes
|
||||
@@ -75,9 +74,7 @@ def interpolate_texture_map(fragments, meshes) -> torch.Tensor:
|
||||
# right-bottom pixel of input.
|
||||
|
||||
pixel_uvs = pixel_uvs * 2.0 - 1.0
|
||||
texture_maps = torch.flip(
|
||||
texture_maps, [2]
|
||||
) # flip y axis of the texture map
|
||||
texture_maps = torch.flip(texture_maps, [2]) # flip y axis of the texture map
|
||||
if texture_maps.device != pixel_uvs.device:
|
||||
texture_maps = texture_maps.to(pixel_uvs.device)
|
||||
texels = F.grid_sample(texture_maps, pixel_uvs, align_corners=False)
|
||||
@@ -107,9 +104,7 @@ def interpolate_vertex_colors(fragments, meshes) -> torch.Tensor:
|
||||
There will be one C dimensional value for each element in
|
||||
fragments.pix_to_face.
|
||||
"""
|
||||
vertex_textures = meshes.textures.verts_rgb_padded().reshape(
|
||||
-1, 3
|
||||
) # (V, C)
|
||||
vertex_textures = meshes.textures.verts_rgb_padded().reshape(-1, 3) # (V, C)
|
||||
vertex_textures = vertex_textures[meshes.verts_padded_to_packed_idx(), :]
|
||||
faces_packed = meshes.faces_packed()
|
||||
faces_textures = vertex_textures[faces_packed] # (F, 3, C)
|
||||
|
||||
@@ -92,8 +92,6 @@ def _interpolate_zbuf(
|
||||
verts = meshes.verts_packed()
|
||||
faces = meshes.faces_packed()
|
||||
faces_verts_z = verts[faces][..., 2][..., None] # (F, 3, 1)
|
||||
return interpolate_face_attributes(
|
||||
pix_to_face, barycentric_coords, faces_verts_z
|
||||
)[
|
||||
return interpolate_face_attributes(pix_to_face, barycentric_coords, faces_verts_z)[
|
||||
..., 0
|
||||
] # (1, H, W, K)
|
||||
|
||||
@@ -5,4 +5,5 @@ from .rasterize_points import rasterize_points
|
||||
from .rasterizer import PointsRasterizationSettings, PointsRasterizer
|
||||
from .renderer import PointsRenderer
|
||||
|
||||
|
||||
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
||||
|
||||
@@ -5,6 +5,7 @@ import torch.nn as nn
|
||||
|
||||
from ..compositing import CompositeParams, alpha_composite, norm_weighted_sum
|
||||
|
||||
|
||||
# A compositor should take as input 3D points and some corresponding information.
|
||||
# Given this information, the compositor can:
|
||||
# - blend colors across the top K vertices at a pixel
|
||||
@@ -19,15 +20,11 @@ class AlphaCompositor(nn.Module):
|
||||
super().__init__()
|
||||
|
||||
self.composite_params = (
|
||||
composite_params
|
||||
if composite_params is not None
|
||||
else CompositeParams()
|
||||
composite_params if composite_params is not None else CompositeParams()
|
||||
)
|
||||
|
||||
def forward(self, fragments, alphas, ptclds, **kwargs) -> torch.Tensor:
|
||||
images = alpha_composite(
|
||||
fragments, alphas, ptclds, self.composite_params
|
||||
)
|
||||
images = alpha_composite(fragments, alphas, ptclds, self.composite_params)
|
||||
return images
|
||||
|
||||
|
||||
@@ -39,13 +36,9 @@ class NormWeightedCompositor(nn.Module):
|
||||
def __init__(self, composite_params=None):
|
||||
super().__init__()
|
||||
self.composite_params = (
|
||||
composite_params
|
||||
if composite_params is not None
|
||||
else CompositeParams()
|
||||
composite_params if composite_params is not None else CompositeParams()
|
||||
)
|
||||
|
||||
def forward(self, fragments, alphas, ptclds, **kwargs) -> torch.Tensor:
|
||||
images = norm_weighted_sum(
|
||||
fragments, alphas, ptclds, self.composite_params
|
||||
)
|
||||
images = norm_weighted_sum(fragments, alphas, ptclds, self.composite_params)
|
||||
return images
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
from typing import Optional
|
||||
import torch
|
||||
|
||||
import torch
|
||||
from pytorch3d import _C
|
||||
from pytorch3d.renderer.mesh.rasterize_meshes import pix_to_ndc
|
||||
|
||||
@@ -155,10 +155,7 @@ class _RasterizePoints(torch.autograd.Function):
|
||||
|
||||
|
||||
def rasterize_points_python(
|
||||
pointclouds,
|
||||
image_size: int = 256,
|
||||
radius: float = 0.01,
|
||||
points_per_pixel: int = 8,
|
||||
pointclouds, image_size: int = 256, radius: float = 0.01, points_per_pixel: int = 8
|
||||
):
|
||||
"""
|
||||
Naive pure PyTorch implementation of pointcloud rasterization.
|
||||
@@ -177,9 +174,7 @@ def rasterize_points_python(
|
||||
point_idxs = torch.full(
|
||||
(N, S, S, K), fill_value=-1, dtype=torch.int32, device=device
|
||||
)
|
||||
zbuf = torch.full(
|
||||
(N, S, S, K), fill_value=-1, dtype=torch.float32, device=device
|
||||
)
|
||||
zbuf = torch.full((N, S, S, K), fill_value=-1, dtype=torch.float32, device=device)
|
||||
pix_dists = torch.full(
|
||||
(N, S, S, K), fill_value=-1, dtype=torch.float32, device=device
|
||||
)
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
|
||||
from typing import NamedTuple, Optional
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
# A renderer class should be initialized with a
|
||||
# function for rasterization and a function for compositing.
|
||||
# The rasterizer should:
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
|
||||
import numpy as np
|
||||
import warnings
|
||||
from typing import Any, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
|
||||
@@ -45,10 +46,7 @@ class TensorAccessor(object):
|
||||
# Convert the attribute to a tensor if it is not a tensor.
|
||||
if not torch.is_tensor(value):
|
||||
value = torch.tensor(
|
||||
value,
|
||||
device=v.device,
|
||||
dtype=v.dtype,
|
||||
requires_grad=v.requires_grad,
|
||||
value, device=v.device, dtype=v.dtype, requires_grad=v.requires_grad
|
||||
)
|
||||
|
||||
# Check the shapes match the existing shape and the shape of the index.
|
||||
@@ -253,9 +251,7 @@ class TensorProperties(object):
|
||||
return self
|
||||
|
||||
|
||||
def format_tensor(
|
||||
input, dtype=torch.float32, device: str = "cpu"
|
||||
) -> torch.Tensor:
|
||||
def format_tensor(input, dtype=torch.float32, device: str = "cpu") -> torch.Tensor:
|
||||
"""
|
||||
Helper function for converting a scalar value to a tensor.
|
||||
|
||||
@@ -276,9 +272,7 @@ def format_tensor(
|
||||
return input
|
||||
|
||||
|
||||
def convert_to_tensors_and_broadcast(
|
||||
*args, dtype=torch.float32, device: str = "cpu"
|
||||
):
|
||||
def convert_to_tensors_and_broadcast(*args, dtype=torch.float32, device: str = "cpu"):
|
||||
"""
|
||||
Helper function to handle parsing an arbitrary number of inputs (*args)
|
||||
which all need to have the same batch dimension.
|
||||
|
||||
@@ -3,11 +3,7 @@
|
||||
from .meshes import Meshes, join_meshes
|
||||
from .pointclouds import Pointclouds
|
||||
from .textures import Textures
|
||||
from .utils import (
|
||||
list_to_packed,
|
||||
list_to_padded,
|
||||
packed_to_list,
|
||||
padded_to_list,
|
||||
)
|
||||
from .utils import list_to_packed, list_to_padded, packed_to_list, padded_to_list
|
||||
|
||||
|
||||
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
from typing import List
|
||||
|
||||
import torch
|
||||
|
||||
from . import utils as struct_utils
|
||||
@@ -314,14 +315,11 @@ class Meshes(object):
|
||||
if isinstance(verts, list) and isinstance(faces, list):
|
||||
self._verts_list = verts
|
||||
self._faces_list = [
|
||||
f[f.gt(-1).all(1)].to(torch.int64) if len(f) > 0 else f
|
||||
for f in faces
|
||||
f[f.gt(-1).all(1)].to(torch.int64) if len(f) > 0 else f for f in faces
|
||||
]
|
||||
self._N = len(self._verts_list)
|
||||
self.device = torch.device("cpu")
|
||||
self.valid = torch.zeros(
|
||||
(self._N,), dtype=torch.bool, device=self.device
|
||||
)
|
||||
self.valid = torch.zeros((self._N,), dtype=torch.bool, device=self.device)
|
||||
if self._N > 0:
|
||||
self.device = self._verts_list[0].device
|
||||
self._num_verts_per_mesh = torch.tensor(
|
||||
@@ -348,18 +346,14 @@ class Meshes(object):
|
||||
|
||||
elif torch.is_tensor(verts) and torch.is_tensor(faces):
|
||||
if verts.size(2) != 3 and faces.size(2) != 3:
|
||||
raise ValueError(
|
||||
"Verts and Faces tensors have incorrect dimensions."
|
||||
)
|
||||
raise ValueError("Verts and Faces tensors have incorrect dimensions.")
|
||||
self._verts_padded = verts
|
||||
self._faces_padded = faces.to(torch.int64)
|
||||
self._N = self._verts_padded.shape[0]
|
||||
self._V = self._verts_padded.shape[1]
|
||||
|
||||
self.device = self._verts_padded.device
|
||||
self.valid = torch.zeros(
|
||||
(self._N,), dtype=torch.bool, device=self.device
|
||||
)
|
||||
self.valid = torch.zeros((self._N,), dtype=torch.bool, device=self.device)
|
||||
if self._N > 0:
|
||||
# Check that padded faces - which have value -1 - are at the
|
||||
# end of the tensors
|
||||
@@ -400,12 +394,8 @@ class Meshes(object):
|
||||
|
||||
# Set the num verts/faces on the textures if present.
|
||||
if self.textures is not None:
|
||||
self.textures._num_faces_per_mesh = (
|
||||
self._num_faces_per_mesh.tolist()
|
||||
)
|
||||
self.textures._num_verts_per_mesh = (
|
||||
self._num_verts_per_mesh.tolist()
|
||||
)
|
||||
self.textures._num_faces_per_mesh = self._num_faces_per_mesh.tolist()
|
||||
self.textures._num_verts_per_mesh = self._num_verts_per_mesh.tolist()
|
||||
|
||||
def __len__(self):
|
||||
return self._N
|
||||
@@ -665,8 +655,7 @@ class Meshes(object):
|
||||
|
||||
self._verts_padded_to_packed_idx = torch.cat(
|
||||
[
|
||||
torch.arange(v, dtype=torch.int64, device=self.device)
|
||||
+ i * self._V
|
||||
torch.arange(v, dtype=torch.int64, device=self.device) + i * self._V
|
||||
for (i, v) in enumerate(self._num_verts_per_mesh)
|
||||
],
|
||||
dim=0,
|
||||
@@ -706,15 +695,10 @@ class Meshes(object):
|
||||
tensor of normals of shape (N, max(V_n), 3).
|
||||
"""
|
||||
if self.isempty():
|
||||
return torch.zeros(
|
||||
(self._N, 0, 3), dtype=torch.float32, device=self.device
|
||||
)
|
||||
return torch.zeros((self._N, 0, 3), dtype=torch.float32, device=self.device)
|
||||
verts_normals_list = self.verts_normals_list()
|
||||
return struct_utils.list_to_padded(
|
||||
verts_normals_list,
|
||||
(self._V, 3),
|
||||
pad_value=0.0,
|
||||
equisized=self.equisized,
|
||||
verts_normals_list, (self._V, 3), pad_value=0.0, equisized=self.equisized
|
||||
)
|
||||
|
||||
def faces_normals_packed(self):
|
||||
@@ -750,15 +734,10 @@ class Meshes(object):
|
||||
tensor of normals of shape (N, max(F_n), 3).
|
||||
"""
|
||||
if self.isempty():
|
||||
return torch.zeros(
|
||||
(self._N, 0, 3), dtype=torch.float32, device=self.device
|
||||
)
|
||||
return torch.zeros((self._N, 0, 3), dtype=torch.float32, device=self.device)
|
||||
faces_normals_list = self.faces_normals_list()
|
||||
return struct_utils.list_to_padded(
|
||||
faces_normals_list,
|
||||
(self._F, 3),
|
||||
pad_value=0.0,
|
||||
equisized=self.equisized,
|
||||
faces_normals_list, (self._F, 3), pad_value=0.0, equisized=self.equisized
|
||||
)
|
||||
|
||||
def faces_areas_packed(self):
|
||||
@@ -797,9 +776,7 @@ class Meshes(object):
|
||||
return
|
||||
faces_packed = self.faces_packed()
|
||||
verts_packed = self.verts_packed()
|
||||
face_areas, face_normals = mesh_face_areas_normals(
|
||||
verts_packed, faces_packed
|
||||
)
|
||||
face_areas, face_normals = mesh_face_areas_normals(verts_packed, faces_packed)
|
||||
self._faces_areas_packed = face_areas
|
||||
self._faces_normals_packed = face_normals
|
||||
|
||||
@@ -813,9 +790,7 @@ class Meshes(object):
|
||||
refresh: Set to True to force recomputation of vertex normals.
|
||||
Default: False.
|
||||
"""
|
||||
if not (
|
||||
refresh or any(v is None for v in [self._verts_normals_packed])
|
||||
):
|
||||
if not (refresh or any(v is None for v in [self._verts_normals_packed])):
|
||||
return
|
||||
|
||||
if self.isempty():
|
||||
@@ -867,8 +842,7 @@ class Meshes(object):
|
||||
Computes the padded version of meshes from verts_list and faces_list.
|
||||
"""
|
||||
if not (
|
||||
refresh
|
||||
or any(v is None for v in [self._verts_padded, self._faces_padded])
|
||||
refresh or any(v is None for v in [self._verts_padded, self._faces_padded])
|
||||
):
|
||||
return
|
||||
|
||||
@@ -887,16 +861,10 @@ class Meshes(object):
|
||||
)
|
||||
else:
|
||||
self._faces_padded = struct_utils.list_to_padded(
|
||||
faces_list,
|
||||
(self._F, 3),
|
||||
pad_value=-1.0,
|
||||
equisized=self.equisized,
|
||||
faces_list, (self._F, 3), pad_value=-1.0, equisized=self.equisized
|
||||
)
|
||||
self._verts_padded = struct_utils.list_to_padded(
|
||||
verts_list,
|
||||
(self._V, 3),
|
||||
pad_value=0.0,
|
||||
equisized=self.equisized,
|
||||
verts_list, (self._V, 3), pad_value=0.0, equisized=self.equisized
|
||||
)
|
||||
|
||||
# TODO(nikhilar) Improve performance of _compute_packed.
|
||||
@@ -1055,9 +1023,7 @@ class Meshes(object):
|
||||
face_to_edge = inverse_idxs[face_to_edge]
|
||||
self._faces_packed_to_edges_packed = face_to_edge
|
||||
|
||||
num_edges_per_mesh = torch.zeros(
|
||||
self._N, dtype=torch.int32, device=self.device
|
||||
)
|
||||
num_edges_per_mesh = torch.zeros(self._N, dtype=torch.int32, device=self.device)
|
||||
ones = torch.ones(1, dtype=torch.int32, device=self.device).expand(
|
||||
self._edges_packed_to_mesh_idx.shape
|
||||
)
|
||||
|
||||
@@ -176,17 +176,13 @@ class Pointclouds(object):
|
||||
self._points_list = points
|
||||
self._N = len(self._points_list)
|
||||
self.device = torch.device("cpu")
|
||||
self.valid = torch.zeros(
|
||||
(self._N,), dtype=torch.bool, device=self.device
|
||||
)
|
||||
self.valid = torch.zeros((self._N,), dtype=torch.bool, device=self.device)
|
||||
self._num_points_per_cloud = []
|
||||
|
||||
if self._N > 0:
|
||||
for p in self._points_list:
|
||||
if len(p) > 0 and (p.dim() != 2 or p.shape[1] != 3):
|
||||
raise ValueError(
|
||||
"Clouds in list must be of shape Px3 or empty"
|
||||
)
|
||||
raise ValueError("Clouds in list must be of shape Px3 or empty")
|
||||
|
||||
self.device = self._points_list[0].device
|
||||
num_points_per_cloud = torch.tensor(
|
||||
@@ -210,9 +206,7 @@ class Pointclouds(object):
|
||||
self._N = self._points_padded.shape[0]
|
||||
self._P = self._points_padded.shape[1]
|
||||
self.device = self._points_padded.device
|
||||
self.valid = torch.ones(
|
||||
(self._N,), dtype=torch.bool, device=self.device
|
||||
)
|
||||
self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device)
|
||||
self._num_points_per_cloud = torch.tensor(
|
||||
[self._P] * self._N, device=self.device
|
||||
)
|
||||
@@ -260,9 +254,7 @@ class Pointclouds(object):
|
||||
|
||||
if isinstance(aux_input, list):
|
||||
if len(aux_input) != self._N:
|
||||
raise ValueError(
|
||||
"Points and auxiliary input must be the same length."
|
||||
)
|
||||
raise ValueError("Points and auxiliary input must be the same length.")
|
||||
for p, d in zip(self._num_points_per_cloud, aux_input):
|
||||
if p != d.shape[0]:
|
||||
raise ValueError(
|
||||
@@ -282,9 +274,7 @@ class Pointclouds(object):
|
||||
return aux_input, None, aux_input_C
|
||||
elif torch.is_tensor(aux_input):
|
||||
if aux_input.dim() != 3:
|
||||
raise ValueError(
|
||||
"Auxiliary input tensor has incorrect dimensions."
|
||||
)
|
||||
raise ValueError("Auxiliary input tensor has incorrect dimensions.")
|
||||
if self._N != aux_input.shape[0]:
|
||||
raise ValueError("Points and inputs must be the same length.")
|
||||
if self._P != aux_input.shape[1]:
|
||||
@@ -531,8 +521,7 @@ class Pointclouds(object):
|
||||
else:
|
||||
self._padded_to_packed_idx = torch.cat(
|
||||
[
|
||||
torch.arange(v, dtype=torch.int64, device=self.device)
|
||||
+ i * self._P
|
||||
torch.arange(v, dtype=torch.int64, device=self.device) + i * self._P
|
||||
for (i, v) in enumerate(self._num_points_per_cloud)
|
||||
],
|
||||
dim=0,
|
||||
@@ -551,9 +540,7 @@ class Pointclouds(object):
|
||||
|
||||
self._normals_padded, self._features_padded = None, None
|
||||
if self.isempty():
|
||||
self._points_padded = torch.zeros(
|
||||
(self._N, 0, 3), device=self.device
|
||||
)
|
||||
self._points_padded = torch.zeros((self._N, 0, 3), device=self.device)
|
||||
else:
|
||||
self._points_padded = struct_utils.list_to_padded(
|
||||
self.points_list(),
|
||||
@@ -621,9 +608,7 @@ class Pointclouds(object):
|
||||
|
||||
points_list_to_packed = struct_utils.list_to_packed(points_list)
|
||||
self._points_packed = points_list_to_packed[0]
|
||||
if not torch.allclose(
|
||||
self._num_points_per_cloud, points_list_to_packed[1]
|
||||
):
|
||||
if not torch.allclose(self._num_points_per_cloud, points_list_to_packed[1]):
|
||||
raise ValueError("Inconsistent list to packed conversion")
|
||||
self._cloud_to_packed_first_idx = points_list_to_packed[2]
|
||||
self._packed_to_cloud_idx = points_list_to_packed[3]
|
||||
@@ -696,13 +681,9 @@ class Pointclouds(object):
|
||||
if other._N > 0:
|
||||
other._points_list = [v.to(device) for v in other.points_list()]
|
||||
if other._normals_list is not None:
|
||||
other._normals_list = [
|
||||
n.to(device) for n in other.normals_list()
|
||||
]
|
||||
other._normals_list = [n.to(device) for n in other.normals_list()]
|
||||
if other._features_list is not None:
|
||||
other._features_list = [
|
||||
f.to(device) for f in other.features_list()
|
||||
]
|
||||
other._features_list = [f.to(device) for f in other.features_list()]
|
||||
for k in self._INTERNAL_TENSORS:
|
||||
v = getattr(self, k)
|
||||
if torch.is_tensor(v):
|
||||
@@ -892,16 +873,11 @@ class Pointclouds(object):
|
||||
for features in self.features_list():
|
||||
new_features_list.extend(features.clone() for _ in range(N))
|
||||
return Pointclouds(
|
||||
points=new_points_list,
|
||||
normals=new_normals_list,
|
||||
features=new_features_list,
|
||||
points=new_points_list, normals=new_normals_list, features=new_features_list
|
||||
)
|
||||
|
||||
def update_padded(
|
||||
self,
|
||||
new_points_padded,
|
||||
new_normals_padded=None,
|
||||
new_features_padded=None,
|
||||
self, new_points_padded, new_normals_padded=None, new_features_padded=None
|
||||
):
|
||||
"""
|
||||
Returns a Pointcloud structure with updated padded tensors and copies of
|
||||
@@ -920,13 +896,9 @@ class Pointclouds(object):
|
||||
|
||||
def check_shapes(x, size):
|
||||
if x.shape[0] != size[0]:
|
||||
raise ValueError(
|
||||
"new values must have the same batch dimension."
|
||||
)
|
||||
raise ValueError("new values must have the same batch dimension.")
|
||||
if x.shape[1] != size[1]:
|
||||
raise ValueError(
|
||||
"new values must have the same number of points."
|
||||
)
|
||||
raise ValueError("new values must have the same number of points.")
|
||||
if size[2] is not None:
|
||||
if x.shape[2] != size[2]:
|
||||
raise ValueError(
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
|
||||
@@ -233,11 +234,7 @@ class Textures(object):
|
||||
|
||||
if all(
|
||||
v is not None
|
||||
for v in [
|
||||
self._faces_uvs_padded,
|
||||
self._verts_uvs_padded,
|
||||
self._maps_padded,
|
||||
]
|
||||
for v in [self._faces_uvs_padded, self._verts_uvs_padded, self._maps_padded]
|
||||
):
|
||||
new_verts_uvs = _extend_tensor(self._verts_uvs_padded, N)
|
||||
new_faces_uvs = _extend_tensor(self._faces_uvs_padded, N)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
from typing import List, Union
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
@@ -38,9 +39,7 @@ def list_to_padded(
|
||||
pad_dim1 = max(y.shape[1] for y in x if len(y) > 0)
|
||||
else:
|
||||
if len(pad_size) != 2:
|
||||
raise ValueError(
|
||||
"Pad size must contain target size for 1st and 2nd dim"
|
||||
)
|
||||
raise ValueError("Pad size must contain target size for 1st and 2nd dim")
|
||||
pad_dim0, pad_dim1 = pad_size
|
||||
|
||||
N = len(x)
|
||||
@@ -55,9 +54,7 @@ def list_to_padded(
|
||||
return x_padded
|
||||
|
||||
|
||||
def padded_to_list(
|
||||
x: torch.Tensor, split_size: Union[list, tuple, None] = None
|
||||
):
|
||||
def padded_to_list(x: torch.Tensor, split_size: Union[list, tuple, None] = None):
|
||||
r"""
|
||||
Transforms a padded tensor of shape (N, M, K) into a list of N tensors
|
||||
of shape (Mi, Ki) where (Mi, Ki) is specified in split_size(i), or of shape
|
||||
@@ -81,9 +78,7 @@ def padded_to_list(
|
||||
|
||||
N = len(split_size)
|
||||
if x.shape[0] != N:
|
||||
raise ValueError(
|
||||
"Split size must be of same length as inputs first dimension"
|
||||
)
|
||||
raise ValueError("Split size must be of same length as inputs first dimension")
|
||||
|
||||
for i in range(N):
|
||||
if isinstance(split_size[i], int):
|
||||
@@ -119,9 +114,7 @@ def list_to_packed(x: List[torch.Tensor]):
|
||||
"""
|
||||
N = len(x)
|
||||
num_items = torch.zeros(N, dtype=torch.int64, device=x[0].device)
|
||||
item_packed_first_idx = torch.zeros(
|
||||
N, dtype=torch.int64, device=x[0].device
|
||||
)
|
||||
item_packed_first_idx = torch.zeros(N, dtype=torch.int64, device=x[0].device)
|
||||
item_packed_to_list_idx = []
|
||||
cur = 0
|
||||
for i, y in enumerate(x):
|
||||
@@ -187,9 +180,7 @@ def padded_to_packed(
|
||||
N, M, D = x.shape
|
||||
|
||||
if split_size is not None and pad_value is not None:
|
||||
raise ValueError(
|
||||
"Only one of split_size or pad_value should be provided."
|
||||
)
|
||||
raise ValueError("Only one of split_size or pad_value should be provided.")
|
||||
|
||||
x_packed = x.reshape(-1, D) # flatten padded
|
||||
|
||||
@@ -205,9 +196,7 @@ def padded_to_packed(
|
||||
# Convert to packed using split sizes
|
||||
N = len(split_size)
|
||||
if x.shape[0] != N:
|
||||
raise ValueError(
|
||||
"Split size must be of same length as inputs first dimension"
|
||||
)
|
||||
raise ValueError("Split size must be of same length as inputs first dimension")
|
||||
|
||||
if not all(isinstance(i, int) for i in split_size):
|
||||
raise ValueError(
|
||||
|
||||
@@ -22,4 +22,5 @@ from .so3 import (
|
||||
)
|
||||
from .transform3d import Rotate, RotateAxisAngle, Scale, Transform3d, Translate
|
||||
|
||||
|
||||
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import functools
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
@@ -155,9 +156,7 @@ def euler_angles_to_matrix(euler_angles, convention: str):
|
||||
for letter in convention:
|
||||
if letter not in ("X", "Y", "Z"):
|
||||
raise ValueError(f"Invalid letter {letter} in convention string.")
|
||||
matrices = map(
|
||||
_axis_angle_rotation, convention, torch.unbind(euler_angles, -1)
|
||||
)
|
||||
matrices = map(_axis_angle_rotation, convention, torch.unbind(euler_angles, -1))
|
||||
return functools.reduce(torch.matmul, matrices)
|
||||
|
||||
|
||||
@@ -246,10 +245,7 @@ def matrix_to_euler_angles(matrix, convention: str):
|
||||
|
||||
|
||||
def random_quaternions(
|
||||
n: int,
|
||||
dtype: Optional[torch.dtype] = None,
|
||||
device=None,
|
||||
requires_grad=False,
|
||||
n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
|
||||
):
|
||||
"""
|
||||
Generate random quaternions representing rotations,
|
||||
@@ -266,19 +262,14 @@ def random_quaternions(
|
||||
Returns:
|
||||
Quaternions as tensor of shape (N, 4).
|
||||
"""
|
||||
o = torch.randn(
|
||||
(n, 4), dtype=dtype, device=device, requires_grad=requires_grad
|
||||
)
|
||||
o = torch.randn((n, 4), dtype=dtype, device=device, requires_grad=requires_grad)
|
||||
s = (o * o).sum(1)
|
||||
o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None]
|
||||
return o
|
||||
|
||||
|
||||
def random_rotations(
|
||||
n: int,
|
||||
dtype: Optional[torch.dtype] = None,
|
||||
device=None,
|
||||
requires_grad=False,
|
||||
n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
|
||||
):
|
||||
"""
|
||||
Generate random rotations as 3x3 rotation matrices.
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
HAT_INV_SKEW_SYMMETRIC_TOL = 1e-5
|
||||
|
||||
|
||||
@@ -65,9 +66,7 @@ def so3_rotation_angle(R, eps: float = 1e-4, cos_angle: bool = False):
|
||||
rot_trace = R[:, 0, 0] + R[:, 1, 1] + R[:, 2, 2]
|
||||
|
||||
if ((rot_trace < -1.0 - eps) + (rot_trace > 3.0 + eps)).any():
|
||||
raise ValueError(
|
||||
"A matrix has trace outside valid range [-1-eps,3+eps]."
|
||||
)
|
||||
raise ValueError("A matrix has trace outside valid range [-1-eps,3+eps].")
|
||||
|
||||
# clamp to valid range
|
||||
rot_trace = torch.clamp(rot_trace, -1.0, 3.0)
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import math
|
||||
import warnings
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
|
||||
from .rotation_conversions import _axis_angle_rotation
|
||||
@@ -230,9 +231,7 @@ class Transform3d:
|
||||
# the transformations with get_matrix(), this correctly
|
||||
# right-multiplies by the inverse of self._matrix
|
||||
# at the end of the composition.
|
||||
tinv._transforms = [
|
||||
t.inverse() for t in reversed(self._transforms)
|
||||
]
|
||||
tinv._transforms = [t.inverse() for t in reversed(self._transforms)]
|
||||
last = Transform3d(device=self.device)
|
||||
last._matrix = i_matrix
|
||||
tinv._transforms.append(last)
|
||||
@@ -334,9 +333,7 @@ class Transform3d:
|
||||
return self.compose(Scale(device=self.device, *args, **kwargs))
|
||||
|
||||
def rotate_axis_angle(self, *args, **kwargs):
|
||||
return self.compose(
|
||||
RotateAxisAngle(device=self.device, *args, **kwargs)
|
||||
)
|
||||
return self.compose(RotateAxisAngle(device=self.device, *args, **kwargs))
|
||||
|
||||
def clone(self):
|
||||
"""
|
||||
@@ -388,9 +385,7 @@ class Transform3d:
|
||||
|
||||
|
||||
class Translate(Transform3d):
|
||||
def __init__(
|
||||
self, x, y=None, z=None, dtype=torch.float32, device: str = "cpu"
|
||||
):
|
||||
def __init__(self, x, y=None, z=None, dtype=torch.float32, device: str = "cpu"):
|
||||
"""
|
||||
Create a new Transform3d representing 3D translations.
|
||||
|
||||
@@ -424,9 +419,7 @@ class Translate(Transform3d):
|
||||
|
||||
|
||||
class Scale(Transform3d):
|
||||
def __init__(
|
||||
self, x, y=None, z=None, dtype=torch.float32, device: str = "cpu"
|
||||
):
|
||||
def __init__(self, x, y=None, z=None, dtype=torch.float32, device: str = "cpu"):
|
||||
"""
|
||||
A Transform3d representing a scaling operation, with different scale
|
||||
factors along each coordinate axis.
|
||||
@@ -444,9 +437,7 @@ class Scale(Transform3d):
|
||||
- 1D torch tensor
|
||||
"""
|
||||
super().__init__(device=device)
|
||||
xyz = _handle_input(
|
||||
x, y, z, dtype, device, "scale", allow_singleton=True
|
||||
)
|
||||
xyz = _handle_input(x, y, z, dtype, device, "scale", allow_singleton=True)
|
||||
N = xyz.shape[0]
|
||||
|
||||
# TODO: Can we do this all in one go somehow?
|
||||
@@ -469,11 +460,7 @@ class Scale(Transform3d):
|
||||
|
||||
class Rotate(Transform3d):
|
||||
def __init__(
|
||||
self,
|
||||
R,
|
||||
dtype=torch.float32,
|
||||
device: str = "cpu",
|
||||
orthogonal_tol: float = 1e-5,
|
||||
self, R, dtype=torch.float32, device: str = "cpu", orthogonal_tol: float = 1e-5
|
||||
):
|
||||
"""
|
||||
Create a new Transform3d representing 3D rotation using a rotation
|
||||
@@ -562,9 +549,7 @@ def _handle_coord(c, dtype, device):
|
||||
return c
|
||||
|
||||
|
||||
def _handle_input(
|
||||
x, y, z, dtype, device, name: str, allow_singleton: bool = False
|
||||
):
|
||||
def _handle_input(x, y, z, dtype, device, name: str, allow_singleton: bool = False):
|
||||
"""
|
||||
Helper function to handle parsing logic for building transforms. The output
|
||||
is always a tensor of shape (N, 3), but there are several types of allowed
|
||||
|
||||
@@ -3,4 +3,5 @@
|
||||
from .ico_sphere import ico_sphere
|
||||
from .torus import torus
|
||||
|
||||
|
||||
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d.ops.subdivide_meshes import SubdivideMeshes
|
||||
from pytorch3d.structures.meshes import Meshes
|
||||
|
||||
|
||||
# Vertex coordinates for a level 0 ico-sphere.
|
||||
_ico_verts0 = [
|
||||
[-0.5257, 0.8507, 0.0000],
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
from itertools import tee
|
||||
from math import cos, pi, sin
|
||||
from typing import Iterator, Optional, Tuple
|
||||
import torch
|
||||
|
||||
import torch
|
||||
from pytorch3d.structures.meshes import Meshes
|
||||
|
||||
|
||||
@@ -16,11 +16,7 @@ def _make_pair_range(N: int) -> Iterator[Tuple[int, int]]:
|
||||
|
||||
|
||||
def torus(
|
||||
r: float,
|
||||
R: float,
|
||||
sides: int,
|
||||
rings: int,
|
||||
device: Optional[torch.device] = None,
|
||||
r: float, R: float, sides: int, rings: int, device: Optional[torch.device] = None
|
||||
) -> Meshes:
|
||||
"""
|
||||
Create vertices and faces for a torus.
|
||||
|
||||
Reference in New Issue
Block a user