Address black + isort fbsource linter warnings

Summary: Address black + isort fbsource linter warnings from D20558374 (previous diff)

Reviewed By: nikhilaravi

Differential Revision: D20558373

fbshipit-source-id: d3607de4a01fb24c0d5269634563a7914bddf1c8
This commit is contained in:
Patrick Labatut 2020-03-29 14:46:33 -07:00 committed by Facebook GitHub Bot
parent eb512ffde3
commit d57daa6f85
110 changed files with 705 additions and 1850 deletions

View File

@ -8,6 +8,7 @@ TODO: python 3.8 when pytorch 1.4.
"""
import os.path
import jinja2
import yaml
@ -45,9 +46,7 @@ def workflow_pair(
):
w = []
base_workflow_name = (
f"{prefix}binary_linux_{btype}_py{python_version}_{cu_version}"
)
base_workflow_name = f"{prefix}binary_linux_{btype}_py{python_version}_{cu_version}"
w.append(
generate_base_workflow(
@ -94,9 +93,7 @@ def generate_base_workflow(
return {f"binary_linux_{btype}": d}
def generate_upload_workflow(
*, base_workflow_name, btype, cu_version, filter_branch
):
def generate_upload_workflow(*, base_workflow_name, btype, cu_version, filter_branch):
d = {
"name": f"{base_workflow_name}_upload",
"context": "org-member",

View File

@ -22,6 +22,7 @@ from recommonmark.states import DummyStateMachine
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.ext.autodoc import between
# Monkey patch to fix recommonmark 0.4 doc reference issues.
orig_run_role = DummyStateMachine.run_role
@ -154,9 +155,7 @@ html_theme_options = {"collapse_navigation": True}
def url_resolver(url):
if ".html" not in url:
url = url.replace("../", "")
return (
"https://github.com/facebookresearch/pytorch3d/blob/master/" + url
)
return "https://github.com/facebookresearch/pytorch3d/blob/master/" + url
else:
if DEPLOY:
return "http://pytorch3d.readthedocs.io/" + url
@ -188,9 +187,7 @@ def setup(app):
# Register a sphinx.ext.autodoc.between listener to ignore everything
# between lines that contain the word IGNORE
app.connect(
"autodoc-process-docstring", between("^.*IGNORE.*$", exclude=True)
)
app.connect("autodoc-process-docstring", between("^.*IGNORE.*$", exclude=True))
app.add_transform(AutoStructify)
return app

View File

@ -15,7 +15,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# Render a coloured point cloud\n",
"# Render a colored point cloud\n",
"\n",
"This tutorial shows how to:\n",
"- set up a renderer \n",
@ -84,7 +84,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load a point cloud and corresponding colours\n",
"### Load a point cloud and corresponding colors\n",
"\n",
"Load a `.ply` file and create a **Point Cloud** object. \n",
"\n",

View File

@ -1,8 +1,4 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .camera_visualization import (
get_camera_wireframe,
plot_camera_scene,
plot_cameras,
)
from .camera_visualization import get_camera_wireframe, plot_camera_scene, plot_cameras
from .plot_image_grid import image_grid

View File

@ -34,13 +34,9 @@ def image_grid(
cols = 1
gridspec_kw = {"wspace": 0.0, "hspace": 0.0} if fill else {}
fig, axarr = plt.subplots(
rows, cols, gridspec_kw=gridspec_kw, figsize=(15, 9)
)
fig, axarr = plt.subplots(rows, cols, gridspec_kw=gridspec_kw, figsize=(15, 9))
bleed = 0
fig.subplots_adjust(
left=bleed, bottom=bleed, right=(1 - bleed), top=(1 - bleed)
)
fig.subplots_adjust(left=bleed, bottom=bleed, right=(1 - bleed), top=(1 - bleed))
for ax, im in zip(axarr.ravel(), images):
if rgb:

View File

@ -4,4 +4,5 @@
from .obj_io import load_obj, load_objs_as_meshes, save_obj
from .ply_io import load_ply, save_ply
__all__ = [k for k in globals().keys() if not k.startswith("_")]

View File

@ -2,16 +2,16 @@
"""This module implements utility functions for loading and saving meshes."""
import numpy as np
import os
import pathlib
import warnings
from collections import namedtuple
from typing import List, Optional
import numpy as np
import torch
from fvcore.common.file_io import PathManager
from PIL import Image
from pytorch3d.structures import Meshes, Textures, join_meshes
@ -51,9 +51,7 @@ def _read_image(file_name: str, format=None):
# Faces & Aux type returned from load_obj function.
_Faces = namedtuple("Faces", "verts_idx normals_idx textures_idx materials_idx")
_Aux = namedtuple(
"Properties", "normals verts_uvs material_colors texture_images"
)
_Aux = namedtuple("Properties", "normals verts_uvs material_colors texture_images")
def _format_faces_indices(faces_indices, max_index):
@ -247,9 +245,7 @@ def load_objs_as_meshes(files: list, device=None, load_textures: bool = True):
image = list(tex_maps.values())[0].to(device)[None]
tex = Textures(verts_uvs=verts_uvs, faces_uvs=faces_uvs, maps=image)
mesh = Meshes(
verts=[verts], faces=[faces.verts_idx.to(device)], textures=tex
)
mesh = Meshes(verts=[verts], faces=[faces.verts_idx.to(device)], textures=tex)
mesh_list.append(mesh)
if len(mesh_list) == 1:
return mesh_list[0]
@ -308,9 +304,7 @@ def _parse_face(
# Subdivide faces with more than 3 vertices. See comments of the
# load_obj function for more details.
for i in range(len(face_verts) - 2):
faces_verts_idx.append(
(face_verts[0], face_verts[i + 1], face_verts[i + 2])
)
faces_verts_idx.append((face_verts[0], face_verts[i + 1], face_verts[i + 2]))
if len(face_normals) > 0:
faces_normals_idx.append(
(face_normals[0], face_normals[i + 1], face_normals[i + 2])
@ -367,8 +361,7 @@ def _load(f_obj, data_dir, load_textures=True):
tx = [float(x) for x in line.split()[1:3]]
if len(tx) != 2:
raise ValueError(
"Texture %s does not have 2 values. Line: %s"
% (str(tx), str(line))
"Texture %s does not have 2 values. Line: %s" % (str(tx), str(line))
)
verts_uvs.append(tx)
elif line.startswith("vn "):
@ -397,17 +390,13 @@ def _load(f_obj, data_dir, load_textures=True):
# Repeat for normals and textures if present.
if len(faces_normals_idx) > 0:
faces_normals_idx = _format_faces_indices(
faces_normals_idx, normals.shape[0]
)
faces_normals_idx = _format_faces_indices(faces_normals_idx, normals.shape[0])
if len(faces_textures_idx) > 0:
faces_textures_idx = _format_faces_indices(
faces_textures_idx, verts_uvs.shape[0]
)
if len(faces_materials_idx) > 0:
faces_materials_idx = torch.tensor(
faces_materials_idx, dtype=torch.int64
)
faces_materials_idx = torch.tensor(faces_materials_idx, dtype=torch.int64)
# Load materials
material_colors, texture_images = None, None

View File

@ -4,15 +4,17 @@
"""This module implements utility functions for loading and saving meshes."""
import numpy as np
import pathlib
import struct
import sys
import warnings
from collections import namedtuple
from typing import Optional, Tuple
import numpy as np
import torch
_PlyTypeData = namedtuple("_PlyTypeData", "size struct_char np_type")
_PLY_TYPES = {
@ -257,11 +259,7 @@ def _try_read_ply_constant_list_ascii(f, definition: _PlyElementType):
"ignore", message=".* Empty input file.*", category=UserWarning
)
data = np.loadtxt(
f,
dtype=np_type,
comments=None,
ndmin=2,
max_rows=definition.count,
f, dtype=np_type, comments=None, ndmin=2, max_rows=definition.count
)
except ValueError:
f.seek(start_point)
@ -301,9 +299,7 @@ def _parse_heterogenous_property_ascii(datum, line_iter, property: _Property):
length = int(value)
except ValueError:
raise ValueError("A list length was not a number.")
list_value = np.zeros(
length, dtype=_PLY_TYPES[property.data_type].np_type
)
list_value = np.zeros(length, dtype=_PLY_TYPES[property.data_type].np_type)
for i in range(length):
inner_value = next(line_iter, None)
if inner_value is None:
@ -404,8 +400,7 @@ def _read_ply_element_struct(f, definition: _PlyElementType, endian_str: str):
values. There is one column for each property.
"""
format = "".join(
_PLY_TYPES[property.data_type].struct_char
for property in definition.properties
_PLY_TYPES[property.data_type].struct_char for property in definition.properties
)
format = endian_str + format
pattern = struct.Struct(format)
@ -414,10 +409,7 @@ def _read_ply_element_struct(f, definition: _PlyElementType, endian_str: str):
bytes_data = f.read(needed_bytes)
if len(bytes_data) != needed_bytes:
raise ValueError("Not enough data for %s." % definition.name)
data = [
pattern.unpack_from(bytes_data, i * size)
for i in range(definition.count)
]
data = [pattern.unpack_from(bytes_data, i * size) for i in range(definition.count)]
return data
@ -475,9 +467,7 @@ def _try_read_ply_constant_list_binary(
return output
def _read_ply_element_binary(
f, definition: _PlyElementType, big_endian: bool
) -> list:
def _read_ply_element_binary(f, definition: _PlyElementType, big_endian: bool) -> list:
"""
Decode all instances of a single element from a binary .ply file.
@ -515,9 +505,7 @@ def _read_ply_element_binary(
data = []
for _i in range(definition.count):
datum = []
for property, property_struct in zip(
definition.properties, property_structs
):
for property, property_struct in zip(definition.properties, property_structs):
size = property_struct.size
initial_data = f.read(size)
if len(initial_data) != size:
@ -656,28 +644,19 @@ def load_ply(f):
if face is None:
raise ValueError("The ply file has no face element.")
if (
not isinstance(vertex, np.ndarray)
or vertex.ndim != 2
or vertex.shape[1] != 3
):
if not isinstance(vertex, np.ndarray) or vertex.ndim != 2 or vertex.shape[1] != 3:
raise ValueError("Invalid vertices in file.")
verts = torch.tensor(vertex, dtype=torch.float32)
face_head = next(head for head in header.elements if head.name == "face")
if (
len(face_head.properties) != 1
or face_head.properties[0].list_size_type is None
):
if len(face_head.properties) != 1 or face_head.properties[0].list_size_type is None:
raise ValueError("Unexpected form of faces data.")
# face_head.properties[0].name is usually "vertex_index" or "vertex_indices"
# but we don't need to enforce this.
if isinstance(face, np.ndarray) and face.ndim == 2:
if face.shape[1] < 3:
raise ValueError("Faces must have at least 3 vertices.")
face_arrays = [
face[:, [0, i + 1, i + 2]] for i in range(face.shape[1] - 2)
]
face_arrays = [face[:, [0, i + 1, i + 2]] for i in range(face.shape[1] - 2)]
faces = torch.tensor(np.vstack(face_arrays), dtype=torch.int64)
else:
face_list = []
@ -687,9 +666,7 @@ def load_ply(f):
if face_item.shape[0] < 3:
raise ValueError("Faces must have at least 3 vertices.")
for i in range(face_item.shape[0] - 2):
face_list.append(
[face_item[0], face_item[i + 1], face_item[i + 2]]
)
face_list.append([face_item[0], face_item[i + 1], face_item[i + 2]])
faces = torch.tensor(face_list, dtype=torch.int64)
return verts, faces

View File

@ -6,4 +6,5 @@ from .mesh_edge_loss import mesh_edge_loss
from .mesh_laplacian_smoothing import mesh_laplacian_smoothing
from .mesh_normal_consistency import mesh_normal_consistency
__all__ = [k for k in globals().keys() if not k.startswith("_")]

View File

@ -2,13 +2,10 @@
import torch
import torch.nn.functional as F
from pytorch3d.ops.nearest_neighbor_points import nn_points_idx
def _validate_chamfer_reduction_inputs(
batch_reduction: str, point_reduction: str
):
def _validate_chamfer_reduction_inputs(batch_reduction: str, point_reduction: str):
"""Check the requested reductions are valid.
Args:
@ -18,17 +15,11 @@ def _validate_chamfer_reduction_inputs(
points, can be one of ["none", "mean", "sum"].
"""
if batch_reduction not in ["none", "mean", "sum"]:
raise ValueError(
'batch_reduction must be one of ["none", "mean", "sum"]'
)
raise ValueError('batch_reduction must be one of ["none", "mean", "sum"]')
if point_reduction not in ["none", "mean", "sum"]:
raise ValueError(
'point_reduction must be one of ["none", "mean", "sum"]'
)
raise ValueError('point_reduction must be one of ["none", "mean", "sum"]')
if batch_reduction == "none" and point_reduction == "none":
raise ValueError(
'batch_reduction and point_reduction cannot both be "none".'
)
raise ValueError('batch_reduction and point_reduction cannot both be "none".')
def chamfer_distance(
@ -87,10 +78,7 @@ def chamfer_distance(
(x.sum((1, 2)) * weights).sum() * 0.0,
(x.sum((1, 2)) * weights).sum() * 0.0,
)
return (
(x.sum((1, 2)) * weights) * 0.0,
(x.sum((1, 2)) * weights) * 0.0,
)
return ((x.sum((1, 2)) * weights) * 0.0, (x.sum((1, 2)) * weights) * 0.0)
return_normals = x_normals is not None and y_normals is not None
cham_norm_x = x.new_zeros(())

View File

@ -2,6 +2,7 @@
from itertools import islice
import torch
@ -76,10 +77,7 @@ def mesh_normal_consistency(meshes):
with torch.no_grad():
edge_idx = face_to_edge.reshape(F * 3) # (3 * F,) indexes into edges
vert_idx = (
faces_packed.view(1, F, 3)
.expand(3, F, 3)
.transpose(0, 1)
.reshape(3 * F, 3)
faces_packed.view(1, F, 3).expand(3, F, 3).transpose(0, 1).reshape(3 * F, 3)
)
edge_idx, edge_sort_idx = edge_idx.sort()
vert_idx = vert_idx[edge_sort_idx]
@ -132,9 +130,7 @@ def mesh_normal_consistency(meshes):
loss = 1 - torch.cosine_similarity(n0, n1, dim=1)
verts_packed_to_mesh_idx = verts_packed_to_mesh_idx[vert_idx[:, 0]]
verts_packed_to_mesh_idx = verts_packed_to_mesh_idx[
vert_edge_pair_idx[:, 0]
]
verts_packed_to_mesh_idx = verts_packed_to_mesh_idx[vert_edge_pair_idx[:, 0]]
num_normals = verts_packed_to_mesh_idx.bincount(minlength=N)
weights = 1.0 / num_normals[verts_packed_to_mesh_idx].float()

View File

@ -10,4 +10,5 @@ from .sample_points_from_meshes import sample_points_from_meshes
from .subdivide_meshes import SubdivideMeshes
from .vert_align import vert_align
__all__ = [k for k in globals().keys() if not k.startswith("_")]

View File

@ -3,7 +3,6 @@
import torch
import torch.nn.functional as F
from pytorch3d.structures import Meshes
@ -200,8 +199,6 @@ def cubify(voxels, thresh, device=None) -> Meshes:
grid_verts.index_select(0, (idleverts[n] == 0).nonzero()[:, 0])
for n in range(N)
]
faces_list = [
nface - idlenum[n][nface] for n, nface in enumerate(faces_list)
]
faces_list = [nface - idlenum[n][nface] for n, nface in enumerate(faces_list)]
return Meshes(verts=verts_list, faces=faces_list)

View File

@ -3,11 +3,10 @@
import torch
import torch.nn as nn
from pytorch3d import _C
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from pytorch3d import _C
class GraphConv(nn.Module):
"""A single graph convolution layer."""
@ -60,9 +59,7 @@ class GraphConv(nn.Module):
number of output features per vertex.
"""
if verts.is_cuda != edges.is_cuda:
raise ValueError(
"verts and edges tensors must be on the same device."
)
raise ValueError("verts and edges tensors must be on the same device.")
if verts.shape[0] == 0:
# empty graph.
return verts.new_zeros((0, self.output_dim)) * verts.sum()

View File

@ -1,7 +1,6 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
from pytorch3d import _C

View File

@ -1,11 +1,10 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
from pytorch3d import _C
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from pytorch3d import _C
class _MeshFaceAreasNormals(Function):
"""

View File

@ -2,7 +2,6 @@
import torch
from pytorch3d import _C
@ -31,9 +30,7 @@ def nn_points_idx(p1, p2, p2_normals=None) -> torch.Tensor:
"""
N, P1, D = p1.shape
with torch.no_grad():
p1_nn_idx = _C.nn_points_idx(
p1.contiguous(), p2.contiguous()
) # (N, P1)
p1_nn_idx = _C.nn_points_idx(p1.contiguous(), p2.contiguous()) # (N, P1)
p1_nn_idx_expanded = p1_nn_idx.view(N, P1, 1).expand(N, P1, D)
p1_nn_points = p2.gather(1, p1_nn_idx_expanded)
if p2_normals is None:

View File

@ -1,11 +1,10 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
from pytorch3d import _C
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from pytorch3d import _C
class _PackedToPadded(Function):
"""

View File

@ -7,8 +7,8 @@ batches of meshes.
"""
import sys
from typing import Tuple, Union
import torch
import torch
from pytorch3d.ops.mesh_face_areas_normals import mesh_face_areas_normals
from pytorch3d.ops.packed_to_padded import packed_to_padded
@ -53,9 +53,7 @@ def sample_points_from_meshes(
# Only compute samples for non empty meshes
with torch.no_grad():
areas, _ = mesh_face_areas_normals(
verts, faces
) # Face areas can be zero.
areas, _ = mesh_face_areas_normals(verts, faces) # Face areas can be zero.
max_faces = meshes.num_faces_per_mesh().max().item()
areas_padded = packed_to_padded(
areas, mesh_to_face[meshes.valid], max_faces
@ -80,21 +78,17 @@ def sample_points_from_meshes(
a = v0[sample_face_idxs] # (N, num_samples, 3)
b = v1[sample_face_idxs]
c = v2[sample_face_idxs]
samples[meshes.valid] = (
w0[:, :, None] * a + w1[:, :, None] * b + w2[:, :, None] * c
)
samples[meshes.valid] = w0[:, :, None] * a + w1[:, :, None] * b + w2[:, :, None] * c
if return_normals:
# Intialize normals tensor with fill value 0 for empty meshes.
# Normals for the sampled points are face normals computed from
# the vertices of the face in which the sampled point lies.
normals = torch.zeros(
(num_meshes, num_samples, 3), device=meshes.device
)
normals = torch.zeros((num_meshes, num_samples, 3), device=meshes.device)
vert_normals = (v1 - v0).cross(v2 - v1, dim=1)
vert_normals = vert_normals / vert_normals.norm(
dim=1, p=2, keepdim=True
).clamp(min=sys.float_info.epsilon)
vert_normals = vert_normals / vert_normals.norm(dim=1, p=2, keepdim=True).clamp(
min=sys.float_info.epsilon
)
vert_normals = vert_normals[sample_face_idxs]
normals[meshes.valid] = vert_normals

View File

@ -3,7 +3,6 @@
import torch
import torch.nn as nn
from pytorch3d.structures import Meshes
@ -193,16 +192,12 @@ class SubdivideMeshes(nn.Module):
edges = meshes[0].edges_packed()
# The set of faces is the same across the different meshes.
new_faces = self._subdivided_faces.view(1, -1, 3).expand(
self._N, -1, -1
)
new_faces = self._subdivided_faces.view(1, -1, 3).expand(self._N, -1, -1)
# Add one new vertex at the midpoint of each edge by taking the average
# of the vertices that form each edge.
new_verts = verts[:, edges].mean(dim=2)
new_verts = torch.cat(
[verts, new_verts], dim=1
) # (sum(V_n)+sum(E_n), 3)
new_verts = torch.cat([verts, new_verts], dim=1) # (sum(V_n)+sum(E_n), 3)
new_feats = None
# Calculate features for new vertices.
@ -212,15 +207,11 @@ class SubdivideMeshes(nn.Module):
# padded, i.e. (N*V, D) to (N, V, D).
feats = feats.view(verts.size(0), verts.size(1), feats.size(1))
if feats.dim() != 3:
raise ValueError(
"features need to be of shape (N, V, D) or (N*V, D)"
)
raise ValueError("features need to be of shape (N, V, D) or (N*V, D)")
# Take average of the features at the vertices that form each edge.
new_feats = feats[:, edges].mean(dim=2)
new_feats = torch.cat(
[feats, new_feats], dim=1
) # (sum(V_n)+sum(E_n), 3)
new_feats = torch.cat([feats, new_feats], dim=1) # (sum(V_n)+sum(E_n), 3)
new_meshes = Meshes(verts=new_verts, faces=new_faces)
@ -270,9 +261,7 @@ class SubdivideMeshes(nn.Module):
) # (sum(V_n)+sum(E_n),)
verts_ordered_idx_init = torch.zeros(
new_verts_per_mesh.sum(),
dtype=torch.int64,
device=meshes.device,
new_verts_per_mesh.sum(), dtype=torch.int64, device=meshes.device
) # (sum(V_n)+sum(E_n),)
# Reassign vertex indices so that existing and new vertices for each
@ -288,9 +277,7 @@ class SubdivideMeshes(nn.Module):
# Calculate the indices needed to group the existing and new faces
# for each mesh.
face_sort_idx = create_faces_index(
num_faces_per_mesh, device=meshes.device
)
face_sort_idx = create_faces_index(num_faces_per_mesh, device=meshes.device)
# Reorder the faces to sequentially group existing and new faces
# for each mesh.
@ -361,9 +348,7 @@ def create_verts_index(verts_per_mesh, edges_per_mesh, device=None):
E = edges_per_mesh.sum() # e.g. 21
verts_per_mesh_cumsum = verts_per_mesh.cumsum(dim=0) # (N,) e.g. (4, 9, 15)
edges_per_mesh_cumsum = edges_per_mesh.cumsum(
dim=0
) # (N,) e.g. (5, 12, 21)
edges_per_mesh_cumsum = edges_per_mesh.cumsum(dim=0) # (N,) e.g. (5, 12, 21)
v_to_e_idx = verts_per_mesh_cumsum.clone()
@ -373,9 +358,7 @@ def create_verts_index(verts_per_mesh, edges_per_mesh, device=None):
] # e.g. (4, 9, 15) + (0, 5, 12) = (4, 14, 27)
# vertex to edge offset.
v_to_e_offset = (
V - verts_per_mesh_cumsum
) # e.g. 15 - (4, 9, 15) = (11, 6, 0)
v_to_e_offset = V - verts_per_mesh_cumsum # e.g. 15 - (4, 9, 15) = (11, 6, 0)
v_to_e_offset[1:] += edges_per_mesh_cumsum[
:-1
] # e.g. (11, 6, 0) + (0, 5, 12) = (11, 11, 12)

View File

@ -59,9 +59,7 @@ def vert_align(
elif hasattr(verts, "verts_padded"):
grid = verts.verts_padded()
else:
raise ValueError(
"verts must be a tensor or have a `verts_padded` attribute"
)
raise ValueError("verts must be a tensor or have a `verts_padded` attribute")
grid = grid[:, None, :, :2] # (N, 1, V, 2)

View File

@ -44,4 +44,5 @@ from .points import (
)
from .utils import TensorProperties, convert_to_tensors_and_broadcast
__all__ = [k for k in globals().keys() if not k.startswith("_")]

View File

@ -1,10 +1,12 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
from typing import NamedTuple, Sequence
import numpy as np
import torch
# Example functions for blending the top K colors per pixel using the outputs
# from rasterization.
# NOTE: All blending function should return an RGBA image per batch element
@ -63,9 +65,7 @@ def sigmoid_alpha_blend(colors, fragments, blend_params) -> torch.Tensor:
3D Reasoning', ICCV 2019
"""
N, H, W, K = fragments.pix_to_face.shape
pixel_colors = torch.ones(
(N, H, W, 4), dtype=colors.dtype, device=colors.device
)
pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=colors.device)
mask = fragments.pix_to_face >= 0
# The distance is negative if a pixel is inside a face and positive outside
@ -124,14 +124,10 @@ def softmax_rgb_blend(
N, H, W, K = fragments.pix_to_face.shape
device = fragments.pix_to_face.device
pixel_colors = torch.ones(
(N, H, W, 4), dtype=colors.dtype, device=colors.device
)
pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=colors.device)
background = blend_params.background_color
if not torch.is_tensor(background):
background = torch.tensor(
background, dtype=torch.float32, device=device
)
background = torch.tensor(background, dtype=torch.float32, device=device)
# Background color
delta = np.exp(1e-10 / blend_params.gamma) * 1e-10

View File

@ -1,15 +1,16 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import math
import numpy as np
from typing import Optional, Sequence, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from pytorch3d.transforms import Rotate, Transform3d, Translate
from .utils import TensorProperties, convert_to_tensors_and_broadcast
# Default values for rotation and translation matrices.
r = np.expand_dims(np.eye(3), axis=0) # (1, 3, 3)
t = np.expand_dims(np.zeros(3), axis=0) # (1, 3)
@ -106,9 +107,7 @@ class OpenGLPerspectiveCameras(TensorProperties):
aspect_ratio = kwargs.get("aspect_ratio", self.aspect_ratio)
degrees = kwargs.get("degrees", self.degrees)
P = torch.zeros(
(self._N, 4, 4), device=self.device, dtype=torch.float32
)
P = torch.zeros((self._N, 4, 4), device=self.device, dtype=torch.float32)
ones = torch.ones((self._N), dtype=torch.float32, device=self.device)
if degrees:
fov = (np.pi / 180) * fov
@ -204,9 +203,7 @@ class OpenGLPerspectiveCameras(TensorProperties):
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = get_world_to_view_transform(
R=self.R, T=self.T
)
world_to_view_transform = get_world_to_view_transform(R=self.R, T=self.T)
return world_to_view_transform
def get_full_projection_transform(self, **kwargs) -> Transform3d:
@ -229,9 +226,7 @@ class OpenGLPerspectiveCameras(TensorProperties):
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = self.get_world_to_view_transform(
R=self.R, T=self.T
)
world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T)
view_to_screen_transform = self.get_projection_transform(**kwargs)
return world_to_view_transform.compose(view_to_screen_transform)
@ -337,9 +332,7 @@ class OpenGLOrthographicCameras(TensorProperties):
bottom = kwargs.get("bottom", self.bottom) # pyre-ignore[16]
scale_xyz = kwargs.get("scale_xyz", self.scale_xyz) # pyre-ignore[16]
P = torch.zeros(
(self._N, 4, 4), dtype=torch.float32, device=self.device
)
P = torch.zeros((self._N, 4, 4), dtype=torch.float32, device=self.device)
ones = torch.ones((self._N), dtype=torch.float32, device=self.device)
# NOTE: OpenGL flips handedness of coordinate system between camera
# space and NDC space so z sign is -ve. In PyTorch3D we maintain a
@ -417,9 +410,7 @@ class OpenGLOrthographicCameras(TensorProperties):
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = get_world_to_view_transform(
R=self.R, T=self.T
)
world_to_view_transform = get_world_to_view_transform(R=self.R, T=self.T)
return world_to_view_transform
def get_full_projection_transform(self, **kwargs) -> Transform3d:
@ -442,9 +433,7 @@ class OpenGLOrthographicCameras(TensorProperties):
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = self.get_world_to_view_transform(
R=self.R, T=self.T
)
world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T)
view_to_screen_transform = self.get_projection_transform(**kwargs)
return world_to_view_transform.compose(view_to_screen_transform)
@ -470,12 +459,7 @@ class SfMPerspectiveCameras(TensorProperties):
"""
def __init__(
self,
focal_length=1.0,
principal_point=((0.0, 0.0),),
R=r,
T=t,
device="cpu",
self, focal_length=1.0, principal_point=((0.0, 0.0),), R=r, T=t, device="cpu"
):
"""
__init__(self, focal_length, principal_point, R, T, device) -> None
@ -589,9 +573,7 @@ class SfMPerspectiveCameras(TensorProperties):
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = get_world_to_view_transform(
R=self.R, T=self.T
)
world_to_view_transform = get_world_to_view_transform(R=self.R, T=self.T)
return world_to_view_transform
def get_full_projection_transform(self, **kwargs) -> Transform3d:
@ -610,9 +592,7 @@ class SfMPerspectiveCameras(TensorProperties):
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = self.get_world_to_view_transform(
R=self.R, T=self.T
)
world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T)
view_to_screen_transform = self.get_projection_transform(**kwargs)
return world_to_view_transform.compose(view_to_screen_transform)
@ -638,12 +618,7 @@ class SfMOrthographicCameras(TensorProperties):
"""
def __init__(
self,
focal_length=1.0,
principal_point=((0.0, 0.0),),
R=r,
T=t,
device="cpu",
self, focal_length=1.0, principal_point=((0.0, 0.0),), R=r, T=t, device="cpu"
):
"""
__init__(self, focal_length, principal_point, R, T, device) -> None
@ -757,9 +732,7 @@ class SfMOrthographicCameras(TensorProperties):
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = get_world_to_view_transform(
R=self.R, T=self.T
)
world_to_view_transform = get_world_to_view_transform(R=self.R, T=self.T)
return world_to_view_transform
def get_full_projection_transform(self, **kwargs) -> Transform3d:
@ -778,9 +751,7 @@ class SfMOrthographicCameras(TensorProperties):
"""
self.R = kwargs.get("R", self.R) # pyre-ignore[16]
self.T = kwargs.get("T", self.T) # pyre-ignore[16]
world_to_view_transform = self.get_world_to_view_transform(
R=self.R, T=self.T
)
world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T)
view_to_screen_transform = self.get_projection_transform(**kwargs)
return world_to_view_transform.compose(view_to_screen_transform)
@ -990,9 +961,7 @@ def look_at_rotation(
z_axis = F.normalize(at - camera_position, eps=1e-5)
x_axis = F.normalize(torch.cross(up, z_axis), eps=1e-5)
y_axis = F.normalize(torch.cross(z_axis, x_axis), eps=1e-5)
R = torch.cat(
(x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1
)
R = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1)
return R.transpose(1, 2)
@ -1038,9 +1007,7 @@ def look_at_view_transform(
"""
if eye is not None:
broadcasted_args = convert_to_tensors_and_broadcast(
eye, at, up, device=device
)
broadcasted_args = convert_to_tensors_and_broadcast(eye, at, up, device=device)
eye, at, up = broadcasted_args
C = eye
else:

View File

@ -3,10 +3,11 @@
from typing import NamedTuple
import torch
import torch
from pytorch3d import _C
# Example functions for blending the top K features per pixel using the outputs
# from rasterization.
# NOTE: All blending function should return a (N, H, W, C) tensor per batch element.
@ -49,9 +50,7 @@ class _CompositeAlphaPoints(torch.autograd.Function):
def forward(ctx, features, alphas, points_idx):
pt_cld = _C.accum_alphacomposite(features, alphas, points_idx)
ctx.save_for_backward(
features.clone(), alphas.clone(), points_idx.clone()
)
ctx.save_for_backward(features.clone(), alphas.clone(), points_idx.clone())
return pt_cld
@staticmethod
@ -68,9 +67,7 @@ class _CompositeAlphaPoints(torch.autograd.Function):
return grad_features, grad_alphas, grad_points_idx, None
def alpha_composite(
pointsidx, alphas, pt_clds, blend_params=None
) -> torch.Tensor:
def alpha_composite(pointsidx, alphas, pt_clds, blend_params=None) -> torch.Tensor:
"""
Composite features within a z-buffer using alpha compositing. Given a zbuffer
with corresponding features and weights, these values are accumulated according
@ -131,9 +128,7 @@ class _CompositeNormWeightedSumPoints(torch.autograd.Function):
def forward(ctx, features, alphas, points_idx):
pt_cld = _C.accum_weightedsumnorm(features, alphas, points_idx)
ctx.save_for_backward(
features.clone(), alphas.clone(), points_idx.clone()
)
ctx.save_for_backward(features.clone(), alphas.clone(), points_idx.clone())
return pt_cld
@staticmethod
@ -150,9 +145,7 @@ class _CompositeNormWeightedSumPoints(torch.autograd.Function):
return grad_features, grad_alphas, grad_points_idx, None
def norm_weighted_sum(
pointsidx, alphas, pt_clds, blend_params=None
) -> torch.Tensor:
def norm_weighted_sum(pointsidx, alphas, pt_clds, blend_params=None) -> torch.Tensor:
"""
Composite features within a z-buffer using normalized weighted sum. Given a zbuffer
with corresponding features and weights, these values are accumulated
@ -213,9 +206,7 @@ class _CompositeWeightedSumPoints(torch.autograd.Function):
def forward(ctx, features, alphas, points_idx):
pt_cld = _C.accum_weightedsum(features, alphas, points_idx)
ctx.save_for_backward(
features.clone(), alphas.clone(), points_idx.clone()
)
ctx.save_for_backward(features.clone(), alphas.clone(), points_idx.clone())
return pt_cld
@staticmethod

View File

@ -114,12 +114,7 @@ def specular(
# Ensure all inputs have same batch dimension as points
matched_tensors = convert_to_tensors_and_broadcast(
points,
color,
direction,
camera_position,
shininess,
device=points.device,
points, color, direction, camera_position, shininess, device=points.device
)
_, color, direction, camera_position, shininess = matched_tensors
@ -201,9 +196,7 @@ class DirectionalLights(TensorProperties):
normals=normals, color=self.diffuse_color, direction=self.direction
)
def specular(
self, normals, points, camera_position, shininess
) -> torch.Tensor:
def specular(self, normals, points, camera_position, shininess) -> torch.Tensor:
return specular(
points=points,
normals=normals,
@ -256,13 +249,9 @@ class PointLights(TensorProperties):
def diffuse(self, normals, points) -> torch.Tensor:
direction = self.location - points
return diffuse(
normals=normals, color=self.diffuse_color, direction=direction
)
return diffuse(normals=normals, color=self.diffuse_color, direction=direction)
def specular(
self, normals, points, camera_position, shininess
) -> torch.Tensor:
def specular(self, normals, points, camera_position, shininess) -> torch.Tensor:
direction = self.location - points
return specular(
points=points,

View File

@ -1,10 +1,7 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .texturing import ( # isort:skip
interpolate_texture_map,
interpolate_vertex_colors,
)
from .texturing import interpolate_texture_map, interpolate_vertex_colors # isort:skip
from .rasterize_meshes import rasterize_meshes
from .rasterizer import MeshRasterizer, RasterizationSettings
from .renderer import MeshRenderer
@ -20,4 +17,5 @@ from .shader import (
from .shading import gouraud_shading, phong_shading
from .utils import interpolate_face_attributes
__all__ = [k for k in globals().keys() if not k.startswith("_")]

View File

@ -1,12 +1,13 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
from typing import Optional
import torch
import numpy as np
import torch
from pytorch3d import _C
# TODO make the epsilon user configurable
kEpsilon = 1e-30
@ -172,9 +173,7 @@ class _RasterizeFaceVerts(torch.autograd.Function):
return pix_to_face, zbuf, barycentric_coords, dists
@staticmethod
def backward(
ctx, grad_pix_to_face, grad_zbuf, grad_barycentric_coords, grad_dists
):
def backward(ctx, grad_pix_to_face, grad_zbuf, grad_barycentric_coords, grad_dists):
grad_face_verts = None
grad_mesh_to_face_first_idx = None
grad_num_faces_per_mesh = None
@ -243,9 +242,7 @@ def rasterize_meshes_python(
face_idxs = torch.full(
(N, H, W, K), fill_value=-1, dtype=torch.int64, device=device
)
zbuf = torch.full(
(N, H, W, K), fill_value=-1, dtype=torch.float32, device=device
)
zbuf = torch.full((N, H, W, K), fill_value=-1, dtype=torch.float32, device=device)
bary_coords = torch.full(
(N, H, W, K, 3), fill_value=-1, dtype=torch.float32, device=device
)
@ -308,9 +305,7 @@ def rasterize_meshes_python(
continue
# Compute barycentric coordinates and pixel z distance.
pxy = torch.tensor(
[xf, yf], dtype=torch.float32, device=device
)
pxy = torch.tensor([xf, yf], dtype=torch.float32, device=device)
bary = barycentric_coordinates(pxy, v0[:2], v1[:2], v2[:2])
if perspective_correct:

View File

@ -1,6 +1,7 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import NamedTuple, Optional
import torch
import torch.nn as nn
@ -123,8 +124,5 @@ class MeshRasterizer(nn.Module):
perspective_correct=raster_settings.perspective_correct,
)
return Fragments(
pix_to_face=pix_to_face,
zbuf=zbuf,
bary_coords=bary_coords,
dists=dists,
pix_to_face=pix_to_face, zbuf=zbuf, bary_coords=bary_coords, dists=dists
)

View File

@ -7,6 +7,7 @@ import torch.nn as nn
from .rasterizer import Fragments
from .utils import _clip_barycentric_coordinates, _interpolate_zbuf
# A renderer class should be initialized with a
# function for rasterization and a function for shading.
# The rasterizer should:
@ -48,16 +49,12 @@ class MeshRenderer(nn.Module):
the range for the corresponding face.
"""
fragments = self.rasterizer(meshes_world, **kwargs)
raster_settings = kwargs.get(
"raster_settings", self.rasterizer.raster_settings
)
raster_settings = kwargs.get("raster_settings", self.rasterizer.raster_settings)
if raster_settings.blur_radius > 0.0:
# TODO: potentially move barycentric clipping to the rasterizer
# if no downstream functions requires unclipped values.
# This will avoid unnecssary re-interpolation of the z buffer.
clipped_bary_coords = _clip_barycentric_coordinates(
fragments.bary_coords
)
clipped_bary_coords = _clip_barycentric_coordinates(fragments.bary_coords)
clipped_zbuf = _interpolate_zbuf(
fragments.pix_to_face, clipped_bary_coords, meshes_world
)

View File

@ -16,6 +16,7 @@ from ..materials import Materials
from .shading import flat_shading, gouraud_shading, phong_shading
from .texturing import interpolate_texture_map, interpolate_vertex_colors
# A Shader should take as input fragments from the output of rasterization
# along with scene params and output images. A shader could perform operations
# such as:
@ -41,16 +42,12 @@ class HardPhongShader(nn.Module):
def __init__(self, device="cpu", cameras=None, lights=None, materials=None):
super().__init__()
self.lights = (
lights if lights is not None else PointLights(device=device)
)
self.lights = lights if lights is not None else PointLights(device=device)
self.materials = (
materials if materials is not None else Materials(device=device)
)
self.cameras = (
cameras
if cameras is not None
else OpenGLPerspectiveCameras(device=device)
cameras if cameras is not None else OpenGLPerspectiveCameras(device=device)
)
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
@ -85,28 +82,17 @@ class SoftPhongShader(nn.Module):
"""
def __init__(
self,
device="cpu",
cameras=None,
lights=None,
materials=None,
blend_params=None,
self, device="cpu", cameras=None, lights=None, materials=None, blend_params=None
):
super().__init__()
self.lights = (
lights if lights is not None else PointLights(device=device)
)
self.lights = lights if lights is not None else PointLights(device=device)
self.materials = (
materials if materials is not None else Materials(device=device)
)
self.cameras = (
cameras
if cameras is not None
else OpenGLPerspectiveCameras(device=device)
)
self.blend_params = (
blend_params if blend_params is not None else BlendParams()
cameras if cameras is not None else OpenGLPerspectiveCameras(device=device)
)
self.blend_params = blend_params if blend_params is not None else BlendParams()
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
texels = interpolate_vertex_colors(fragments, meshes)
@ -142,16 +128,12 @@ class HardGouraudShader(nn.Module):
def __init__(self, device="cpu", cameras=None, lights=None, materials=None):
super().__init__()
self.lights = (
lights if lights is not None else PointLights(device=device)
)
self.lights = lights if lights is not None else PointLights(device=device)
self.materials = (
materials if materials is not None else Materials(device=device)
)
self.cameras = (
cameras
if cameras is not None
else OpenGLPerspectiveCameras(device=device)
cameras if cameras is not None else OpenGLPerspectiveCameras(device=device)
)
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
@ -185,28 +167,17 @@ class SoftGouraudShader(nn.Module):
"""
def __init__(
self,
device="cpu",
cameras=None,
lights=None,
materials=None,
blend_params=None,
self, device="cpu", cameras=None, lights=None, materials=None, blend_params=None
):
super().__init__()
self.lights = (
lights if lights is not None else PointLights(device=device)
)
self.lights = lights if lights is not None else PointLights(device=device)
self.materials = (
materials if materials is not None else Materials(device=device)
)
self.cameras = (
cameras
if cameras is not None
else OpenGLPerspectiveCameras(device=device)
)
self.blend_params = (
blend_params if blend_params is not None else BlendParams()
cameras if cameras is not None else OpenGLPerspectiveCameras(device=device)
)
self.blend_params = blend_params if blend_params is not None else BlendParams()
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
cameras = kwargs.get("cameras", self.cameras)
@ -241,28 +212,17 @@ class TexturedSoftPhongShader(nn.Module):
"""
def __init__(
self,
device="cpu",
cameras=None,
lights=None,
materials=None,
blend_params=None,
self, device="cpu", cameras=None, lights=None, materials=None, blend_params=None
):
super().__init__()
self.lights = (
lights if lights is not None else PointLights(device=device)
)
self.lights = lights if lights is not None else PointLights(device=device)
self.materials = (
materials if materials is not None else Materials(device=device)
)
self.cameras = (
cameras
if cameras is not None
else OpenGLPerspectiveCameras(device=device)
)
self.blend_params = (
blend_params if blend_params is not None else BlendParams()
cameras if cameras is not None else OpenGLPerspectiveCameras(device=device)
)
self.blend_params = blend_params if blend_params is not None else BlendParams()
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
texels = interpolate_texture_map(fragments, meshes)
@ -298,16 +258,12 @@ class HardFlatShader(nn.Module):
def __init__(self, device="cpu", cameras=None, lights=None, materials=None):
super().__init__()
self.lights = (
lights if lights is not None else PointLights(device=device)
)
self.lights = lights if lights is not None else PointLights(device=device)
self.materials = (
materials if materials is not None else Materials(device=device)
)
self.cameras = (
cameras
if cameras is not None
else OpenGLPerspectiveCameras(device=device)
cameras if cameras is not None else OpenGLPerspectiveCameras(device=device)
)
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
@ -346,9 +302,7 @@ class SoftSilhouetteShader(nn.Module):
def __init__(self, blend_params=None):
super().__init__()
self.blend_params = (
blend_params if blend_params is not None else BlendParams()
)
self.blend_params = blend_params if blend_params is not None else BlendParams()
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
""""

View File

@ -2,6 +2,7 @@
from typing import Tuple
import torch
from .texturing import interpolate_face_attributes
@ -82,9 +83,7 @@ def phong_shading(
return colors
def gouraud_shading(
meshes, fragments, lights, cameras, materials
) -> torch.Tensor:
def gouraud_shading(meshes, fragments, lights, cameras, materials) -> torch.Tensor:
"""
Apply per vertex shading. First compute the vertex illumination by applying
ambient, diffuse and specular lighting. If vertex color is available,
@ -131,9 +130,7 @@ def gouraud_shading(
return colors
def flat_shading(
meshes, fragments, lights, cameras, materials, texels
) -> torch.Tensor:
def flat_shading(meshes, fragments, lights, cameras, materials, texels) -> torch.Tensor:
"""
Apply per face shading. Use the average face position and the face normals
to compute the ambient, diffuse and specular lighting. Apply the ambient

View File

@ -3,7 +3,6 @@
import torch
import torch.nn.functional as F
from pytorch3d.structures.textures import Textures
from .utils import interpolate_face_attributes
@ -75,9 +74,7 @@ def interpolate_texture_map(fragments, meshes) -> torch.Tensor:
# right-bottom pixel of input.
pixel_uvs = pixel_uvs * 2.0 - 1.0
texture_maps = torch.flip(
texture_maps, [2]
) # flip y axis of the texture map
texture_maps = torch.flip(texture_maps, [2]) # flip y axis of the texture map
if texture_maps.device != pixel_uvs.device:
texture_maps = texture_maps.to(pixel_uvs.device)
texels = F.grid_sample(texture_maps, pixel_uvs, align_corners=False)
@ -107,9 +104,7 @@ def interpolate_vertex_colors(fragments, meshes) -> torch.Tensor:
There will be one C dimensional value for each element in
fragments.pix_to_face.
"""
vertex_textures = meshes.textures.verts_rgb_padded().reshape(
-1, 3
) # (V, C)
vertex_textures = meshes.textures.verts_rgb_padded().reshape(-1, 3) # (V, C)
vertex_textures = vertex_textures[meshes.verts_padded_to_packed_idx(), :]
faces_packed = meshes.faces_packed()
faces_textures = vertex_textures[faces_packed] # (F, 3, C)

View File

@ -92,8 +92,6 @@ def _interpolate_zbuf(
verts = meshes.verts_packed()
faces = meshes.faces_packed()
faces_verts_z = verts[faces][..., 2][..., None] # (F, 3, 1)
return interpolate_face_attributes(
pix_to_face, barycentric_coords, faces_verts_z
)[
return interpolate_face_attributes(pix_to_face, barycentric_coords, faces_verts_z)[
..., 0
] # (1, H, W, K)

View File

@ -5,4 +5,5 @@ from .rasterize_points import rasterize_points
from .rasterizer import PointsRasterizationSettings, PointsRasterizer
from .renderer import PointsRenderer
__all__ = [k for k in globals().keys() if not k.startswith("_")]

View File

@ -5,6 +5,7 @@ import torch.nn as nn
from ..compositing import CompositeParams, alpha_composite, norm_weighted_sum
# A compositor should take as input 3D points and some corresponding information.
# Given this information, the compositor can:
# - blend colors across the top K vertices at a pixel
@ -19,15 +20,11 @@ class AlphaCompositor(nn.Module):
super().__init__()
self.composite_params = (
composite_params
if composite_params is not None
else CompositeParams()
composite_params if composite_params is not None else CompositeParams()
)
def forward(self, fragments, alphas, ptclds, **kwargs) -> torch.Tensor:
images = alpha_composite(
fragments, alphas, ptclds, self.composite_params
)
images = alpha_composite(fragments, alphas, ptclds, self.composite_params)
return images
@ -39,13 +36,9 @@ class NormWeightedCompositor(nn.Module):
def __init__(self, composite_params=None):
super().__init__()
self.composite_params = (
composite_params
if composite_params is not None
else CompositeParams()
composite_params if composite_params is not None else CompositeParams()
)
def forward(self, fragments, alphas, ptclds, **kwargs) -> torch.Tensor:
images = norm_weighted_sum(
fragments, alphas, ptclds, self.composite_params
)
images = norm_weighted_sum(fragments, alphas, ptclds, self.composite_params)
return images

View File

@ -1,8 +1,8 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Optional
import torch
import torch
from pytorch3d import _C
from pytorch3d.renderer.mesh.rasterize_meshes import pix_to_ndc
@ -155,10 +155,7 @@ class _RasterizePoints(torch.autograd.Function):
def rasterize_points_python(
pointclouds,
image_size: int = 256,
radius: float = 0.01,
points_per_pixel: int = 8,
pointclouds, image_size: int = 256, radius: float = 0.01, points_per_pixel: int = 8
):
"""
Naive pure PyTorch implementation of pointcloud rasterization.
@ -177,9 +174,7 @@ def rasterize_points_python(
point_idxs = torch.full(
(N, S, S, K), fill_value=-1, dtype=torch.int32, device=device
)
zbuf = torch.full(
(N, S, S, K), fill_value=-1, dtype=torch.float32, device=device
)
zbuf = torch.full((N, S, S, K), fill_value=-1, dtype=torch.float32, device=device)
pix_dists = torch.full(
(N, S, S, K), fill_value=-1, dtype=torch.float32, device=device
)

View File

@ -3,6 +3,7 @@
from typing import NamedTuple, Optional
import torch
import torch.nn as nn

View File

@ -5,6 +5,7 @@
import torch
import torch.nn as nn
# A renderer class should be initialized with a
# function for rasterization and a function for compositing.
# The rasterizer should:

View File

@ -1,9 +1,10 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import warnings
from typing import Any, Union
import numpy as np
import torch
@ -45,10 +46,7 @@ class TensorAccessor(object):
# Convert the attribute to a tensor if it is not a tensor.
if not torch.is_tensor(value):
value = torch.tensor(
value,
device=v.device,
dtype=v.dtype,
requires_grad=v.requires_grad,
value, device=v.device, dtype=v.dtype, requires_grad=v.requires_grad
)
# Check the shapes match the existing shape and the shape of the index.
@ -253,9 +251,7 @@ class TensorProperties(object):
return self
def format_tensor(
input, dtype=torch.float32, device: str = "cpu"
) -> torch.Tensor:
def format_tensor(input, dtype=torch.float32, device: str = "cpu") -> torch.Tensor:
"""
Helper function for converting a scalar value to a tensor.
@ -276,9 +272,7 @@ def format_tensor(
return input
def convert_to_tensors_and_broadcast(
*args, dtype=torch.float32, device: str = "cpu"
):
def convert_to_tensors_and_broadcast(*args, dtype=torch.float32, device: str = "cpu"):
"""
Helper function to handle parsing an arbitrary number of inputs (*args)
which all need to have the same batch dimension.

View File

@ -3,11 +3,7 @@
from .meshes import Meshes, join_meshes
from .pointclouds import Pointclouds
from .textures import Textures
from .utils import (
list_to_packed,
list_to_padded,
packed_to_list,
padded_to_list,
)
from .utils import list_to_packed, list_to_padded, packed_to_list, padded_to_list
__all__ = [k for k in globals().keys() if not k.startswith("_")]

View File

@ -1,6 +1,7 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import List
import torch
from . import utils as struct_utils
@ -314,14 +315,11 @@ class Meshes(object):
if isinstance(verts, list) and isinstance(faces, list):
self._verts_list = verts
self._faces_list = [
f[f.gt(-1).all(1)].to(torch.int64) if len(f) > 0 else f
for f in faces
f[f.gt(-1).all(1)].to(torch.int64) if len(f) > 0 else f for f in faces
]
self._N = len(self._verts_list)
self.device = torch.device("cpu")
self.valid = torch.zeros(
(self._N,), dtype=torch.bool, device=self.device
)
self.valid = torch.zeros((self._N,), dtype=torch.bool, device=self.device)
if self._N > 0:
self.device = self._verts_list[0].device
self._num_verts_per_mesh = torch.tensor(
@ -348,18 +346,14 @@ class Meshes(object):
elif torch.is_tensor(verts) and torch.is_tensor(faces):
if verts.size(2) != 3 and faces.size(2) != 3:
raise ValueError(
"Verts and Faces tensors have incorrect dimensions."
)
raise ValueError("Verts and Faces tensors have incorrect dimensions.")
self._verts_padded = verts
self._faces_padded = faces.to(torch.int64)
self._N = self._verts_padded.shape[0]
self._V = self._verts_padded.shape[1]
self.device = self._verts_padded.device
self.valid = torch.zeros(
(self._N,), dtype=torch.bool, device=self.device
)
self.valid = torch.zeros((self._N,), dtype=torch.bool, device=self.device)
if self._N > 0:
# Check that padded faces - which have value -1 - are at the
# end of the tensors
@ -400,12 +394,8 @@ class Meshes(object):
# Set the num verts/faces on the textures if present.
if self.textures is not None:
self.textures._num_faces_per_mesh = (
self._num_faces_per_mesh.tolist()
)
self.textures._num_verts_per_mesh = (
self._num_verts_per_mesh.tolist()
)
self.textures._num_faces_per_mesh = self._num_faces_per_mesh.tolist()
self.textures._num_verts_per_mesh = self._num_verts_per_mesh.tolist()
def __len__(self):
return self._N
@ -665,8 +655,7 @@ class Meshes(object):
self._verts_padded_to_packed_idx = torch.cat(
[
torch.arange(v, dtype=torch.int64, device=self.device)
+ i * self._V
torch.arange(v, dtype=torch.int64, device=self.device) + i * self._V
for (i, v) in enumerate(self._num_verts_per_mesh)
],
dim=0,
@ -706,15 +695,10 @@ class Meshes(object):
tensor of normals of shape (N, max(V_n), 3).
"""
if self.isempty():
return torch.zeros(
(self._N, 0, 3), dtype=torch.float32, device=self.device
)
return torch.zeros((self._N, 0, 3), dtype=torch.float32, device=self.device)
verts_normals_list = self.verts_normals_list()
return struct_utils.list_to_padded(
verts_normals_list,
(self._V, 3),
pad_value=0.0,
equisized=self.equisized,
verts_normals_list, (self._V, 3), pad_value=0.0, equisized=self.equisized
)
def faces_normals_packed(self):
@ -750,15 +734,10 @@ class Meshes(object):
tensor of normals of shape (N, max(F_n), 3).
"""
if self.isempty():
return torch.zeros(
(self._N, 0, 3), dtype=torch.float32, device=self.device
)
return torch.zeros((self._N, 0, 3), dtype=torch.float32, device=self.device)
faces_normals_list = self.faces_normals_list()
return struct_utils.list_to_padded(
faces_normals_list,
(self._F, 3),
pad_value=0.0,
equisized=self.equisized,
faces_normals_list, (self._F, 3), pad_value=0.0, equisized=self.equisized
)
def faces_areas_packed(self):
@ -797,9 +776,7 @@ class Meshes(object):
return
faces_packed = self.faces_packed()
verts_packed = self.verts_packed()
face_areas, face_normals = mesh_face_areas_normals(
verts_packed, faces_packed
)
face_areas, face_normals = mesh_face_areas_normals(verts_packed, faces_packed)
self._faces_areas_packed = face_areas
self._faces_normals_packed = face_normals
@ -813,9 +790,7 @@ class Meshes(object):
refresh: Set to True to force recomputation of vertex normals.
Default: False.
"""
if not (
refresh or any(v is None for v in [self._verts_normals_packed])
):
if not (refresh or any(v is None for v in [self._verts_normals_packed])):
return
if self.isempty():
@ -867,8 +842,7 @@ class Meshes(object):
Computes the padded version of meshes from verts_list and faces_list.
"""
if not (
refresh
or any(v is None for v in [self._verts_padded, self._faces_padded])
refresh or any(v is None for v in [self._verts_padded, self._faces_padded])
):
return
@ -887,16 +861,10 @@ class Meshes(object):
)
else:
self._faces_padded = struct_utils.list_to_padded(
faces_list,
(self._F, 3),
pad_value=-1.0,
equisized=self.equisized,
faces_list, (self._F, 3), pad_value=-1.0, equisized=self.equisized
)
self._verts_padded = struct_utils.list_to_padded(
verts_list,
(self._V, 3),
pad_value=0.0,
equisized=self.equisized,
verts_list, (self._V, 3), pad_value=0.0, equisized=self.equisized
)
# TODO(nikhilar) Improve performance of _compute_packed.
@ -1055,9 +1023,7 @@ class Meshes(object):
face_to_edge = inverse_idxs[face_to_edge]
self._faces_packed_to_edges_packed = face_to_edge
num_edges_per_mesh = torch.zeros(
self._N, dtype=torch.int32, device=self.device
)
num_edges_per_mesh = torch.zeros(self._N, dtype=torch.int32, device=self.device)
ones = torch.ones(1, dtype=torch.int32, device=self.device).expand(
self._edges_packed_to_mesh_idx.shape
)

View File

@ -176,17 +176,13 @@ class Pointclouds(object):
self._points_list = points
self._N = len(self._points_list)
self.device = torch.device("cpu")
self.valid = torch.zeros(
(self._N,), dtype=torch.bool, device=self.device
)
self.valid = torch.zeros((self._N,), dtype=torch.bool, device=self.device)
self._num_points_per_cloud = []
if self._N > 0:
for p in self._points_list:
if len(p) > 0 and (p.dim() != 2 or p.shape[1] != 3):
raise ValueError(
"Clouds in list must be of shape Px3 or empty"
)
raise ValueError("Clouds in list must be of shape Px3 or empty")
self.device = self._points_list[0].device
num_points_per_cloud = torch.tensor(
@ -210,9 +206,7 @@ class Pointclouds(object):
self._N = self._points_padded.shape[0]
self._P = self._points_padded.shape[1]
self.device = self._points_padded.device
self.valid = torch.ones(
(self._N,), dtype=torch.bool, device=self.device
)
self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device)
self._num_points_per_cloud = torch.tensor(
[self._P] * self._N, device=self.device
)
@ -260,9 +254,7 @@ class Pointclouds(object):
if isinstance(aux_input, list):
if len(aux_input) != self._N:
raise ValueError(
"Points and auxiliary input must be the same length."
)
raise ValueError("Points and auxiliary input must be the same length.")
for p, d in zip(self._num_points_per_cloud, aux_input):
if p != d.shape[0]:
raise ValueError(
@ -282,9 +274,7 @@ class Pointclouds(object):
return aux_input, None, aux_input_C
elif torch.is_tensor(aux_input):
if aux_input.dim() != 3:
raise ValueError(
"Auxiliary input tensor has incorrect dimensions."
)
raise ValueError("Auxiliary input tensor has incorrect dimensions.")
if self._N != aux_input.shape[0]:
raise ValueError("Points and inputs must be the same length.")
if self._P != aux_input.shape[1]:
@ -531,8 +521,7 @@ class Pointclouds(object):
else:
self._padded_to_packed_idx = torch.cat(
[
torch.arange(v, dtype=torch.int64, device=self.device)
+ i * self._P
torch.arange(v, dtype=torch.int64, device=self.device) + i * self._P
for (i, v) in enumerate(self._num_points_per_cloud)
],
dim=0,
@ -551,9 +540,7 @@ class Pointclouds(object):
self._normals_padded, self._features_padded = None, None
if self.isempty():
self._points_padded = torch.zeros(
(self._N, 0, 3), device=self.device
)
self._points_padded = torch.zeros((self._N, 0, 3), device=self.device)
else:
self._points_padded = struct_utils.list_to_padded(
self.points_list(),
@ -621,9 +608,7 @@ class Pointclouds(object):
points_list_to_packed = struct_utils.list_to_packed(points_list)
self._points_packed = points_list_to_packed[0]
if not torch.allclose(
self._num_points_per_cloud, points_list_to_packed[1]
):
if not torch.allclose(self._num_points_per_cloud, points_list_to_packed[1]):
raise ValueError("Inconsistent list to packed conversion")
self._cloud_to_packed_first_idx = points_list_to_packed[2]
self._packed_to_cloud_idx = points_list_to_packed[3]
@ -696,13 +681,9 @@ class Pointclouds(object):
if other._N > 0:
other._points_list = [v.to(device) for v in other.points_list()]
if other._normals_list is not None:
other._normals_list = [
n.to(device) for n in other.normals_list()
]
other._normals_list = [n.to(device) for n in other.normals_list()]
if other._features_list is not None:
other._features_list = [
f.to(device) for f in other.features_list()
]
other._features_list = [f.to(device) for f in other.features_list()]
for k in self._INTERNAL_TENSORS:
v = getattr(self, k)
if torch.is_tensor(v):
@ -892,16 +873,11 @@ class Pointclouds(object):
for features in self.features_list():
new_features_list.extend(features.clone() for _ in range(N))
return Pointclouds(
points=new_points_list,
normals=new_normals_list,
features=new_features_list,
points=new_points_list, normals=new_normals_list, features=new_features_list
)
def update_padded(
self,
new_points_padded,
new_normals_padded=None,
new_features_padded=None,
self, new_points_padded, new_normals_padded=None, new_features_padded=None
):
"""
Returns a Pointcloud structure with updated padded tensors and copies of
@ -920,13 +896,9 @@ class Pointclouds(object):
def check_shapes(x, size):
if x.shape[0] != size[0]:
raise ValueError(
"new values must have the same batch dimension."
)
raise ValueError("new values must have the same batch dimension.")
if x.shape[1] != size[1]:
raise ValueError(
"new values must have the same number of points."
)
raise ValueError("new values must have the same number of points.")
if size[2] is not None:
if x.shape[2] != size[2]:
raise ValueError(

View File

@ -1,6 +1,7 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import List, Optional, Union
import torch
import torchvision.transforms as T
@ -233,11 +234,7 @@ class Textures(object):
if all(
v is not None
for v in [
self._faces_uvs_padded,
self._verts_uvs_padded,
self._maps_padded,
]
for v in [self._faces_uvs_padded, self._verts_uvs_padded, self._maps_padded]
):
new_verts_uvs = _extend_tensor(self._verts_uvs_padded, N)
new_faces_uvs = _extend_tensor(self._faces_uvs_padded, N)

View File

@ -1,6 +1,7 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import List, Union
import torch
@ -38,9 +39,7 @@ def list_to_padded(
pad_dim1 = max(y.shape[1] for y in x if len(y) > 0)
else:
if len(pad_size) != 2:
raise ValueError(
"Pad size must contain target size for 1st and 2nd dim"
)
raise ValueError("Pad size must contain target size for 1st and 2nd dim")
pad_dim0, pad_dim1 = pad_size
N = len(x)
@ -55,9 +54,7 @@ def list_to_padded(
return x_padded
def padded_to_list(
x: torch.Tensor, split_size: Union[list, tuple, None] = None
):
def padded_to_list(x: torch.Tensor, split_size: Union[list, tuple, None] = None):
r"""
Transforms a padded tensor of shape (N, M, K) into a list of N tensors
of shape (Mi, Ki) where (Mi, Ki) is specified in split_size(i), or of shape
@ -81,9 +78,7 @@ def padded_to_list(
N = len(split_size)
if x.shape[0] != N:
raise ValueError(
"Split size must be of same length as inputs first dimension"
)
raise ValueError("Split size must be of same length as inputs first dimension")
for i in range(N):
if isinstance(split_size[i], int):
@ -119,9 +114,7 @@ def list_to_packed(x: List[torch.Tensor]):
"""
N = len(x)
num_items = torch.zeros(N, dtype=torch.int64, device=x[0].device)
item_packed_first_idx = torch.zeros(
N, dtype=torch.int64, device=x[0].device
)
item_packed_first_idx = torch.zeros(N, dtype=torch.int64, device=x[0].device)
item_packed_to_list_idx = []
cur = 0
for i, y in enumerate(x):
@ -187,9 +180,7 @@ def padded_to_packed(
N, M, D = x.shape
if split_size is not None and pad_value is not None:
raise ValueError(
"Only one of split_size or pad_value should be provided."
)
raise ValueError("Only one of split_size or pad_value should be provided.")
x_packed = x.reshape(-1, D) # flatten padded
@ -205,9 +196,7 @@ def padded_to_packed(
# Convert to packed using split sizes
N = len(split_size)
if x.shape[0] != N:
raise ValueError(
"Split size must be of same length as inputs first dimension"
)
raise ValueError("Split size must be of same length as inputs first dimension")
if not all(isinstance(i, int) for i in split_size):
raise ValueError(

View File

@ -22,4 +22,5 @@ from .so3 import (
)
from .transform3d import Rotate, RotateAxisAngle, Scale, Transform3d, Translate
__all__ = [k for k in globals().keys() if not k.startswith("_")]

View File

@ -2,6 +2,7 @@
import functools
from typing import Optional
import torch
@ -155,9 +156,7 @@ def euler_angles_to_matrix(euler_angles, convention: str):
for letter in convention:
if letter not in ("X", "Y", "Z"):
raise ValueError(f"Invalid letter {letter} in convention string.")
matrices = map(
_axis_angle_rotation, convention, torch.unbind(euler_angles, -1)
)
matrices = map(_axis_angle_rotation, convention, torch.unbind(euler_angles, -1))
return functools.reduce(torch.matmul, matrices)
@ -246,10 +245,7 @@ def matrix_to_euler_angles(matrix, convention: str):
def random_quaternions(
n: int,
dtype: Optional[torch.dtype] = None,
device=None,
requires_grad=False,
n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
):
"""
Generate random quaternions representing rotations,
@ -266,19 +262,14 @@ def random_quaternions(
Returns:
Quaternions as tensor of shape (N, 4).
"""
o = torch.randn(
(n, 4), dtype=dtype, device=device, requires_grad=requires_grad
)
o = torch.randn((n, 4), dtype=dtype, device=device, requires_grad=requires_grad)
s = (o * o).sum(1)
o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None]
return o
def random_rotations(
n: int,
dtype: Optional[torch.dtype] = None,
device=None,
requires_grad=False,
n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
):
"""
Generate random rotations as 3x3 rotation matrices.

View File

@ -3,6 +3,7 @@
import torch
HAT_INV_SKEW_SYMMETRIC_TOL = 1e-5
@ -65,9 +66,7 @@ def so3_rotation_angle(R, eps: float = 1e-4, cos_angle: bool = False):
rot_trace = R[:, 0, 0] + R[:, 1, 1] + R[:, 2, 2]
if ((rot_trace < -1.0 - eps) + (rot_trace > 3.0 + eps)).any():
raise ValueError(
"A matrix has trace outside valid range [-1-eps,3+eps]."
)
raise ValueError("A matrix has trace outside valid range [-1-eps,3+eps].")
# clamp to valid range
rot_trace = torch.clamp(rot_trace, -1.0, 3.0)

View File

@ -3,6 +3,7 @@
import math
import warnings
from typing import Optional
import torch
from .rotation_conversions import _axis_angle_rotation
@ -230,9 +231,7 @@ class Transform3d:
# the transformations with get_matrix(), this correctly
# right-multiplies by the inverse of self._matrix
# at the end of the composition.
tinv._transforms = [
t.inverse() for t in reversed(self._transforms)
]
tinv._transforms = [t.inverse() for t in reversed(self._transforms)]
last = Transform3d(device=self.device)
last._matrix = i_matrix
tinv._transforms.append(last)
@ -334,9 +333,7 @@ class Transform3d:
return self.compose(Scale(device=self.device, *args, **kwargs))
def rotate_axis_angle(self, *args, **kwargs):
return self.compose(
RotateAxisAngle(device=self.device, *args, **kwargs)
)
return self.compose(RotateAxisAngle(device=self.device, *args, **kwargs))
def clone(self):
"""
@ -388,9 +385,7 @@ class Transform3d:
class Translate(Transform3d):
def __init__(
self, x, y=None, z=None, dtype=torch.float32, device: str = "cpu"
):
def __init__(self, x, y=None, z=None, dtype=torch.float32, device: str = "cpu"):
"""
Create a new Transform3d representing 3D translations.
@ -424,9 +419,7 @@ class Translate(Transform3d):
class Scale(Transform3d):
def __init__(
self, x, y=None, z=None, dtype=torch.float32, device: str = "cpu"
):
def __init__(self, x, y=None, z=None, dtype=torch.float32, device: str = "cpu"):
"""
A Transform3d representing a scaling operation, with different scale
factors along each coordinate axis.
@ -444,9 +437,7 @@ class Scale(Transform3d):
- 1D torch tensor
"""
super().__init__(device=device)
xyz = _handle_input(
x, y, z, dtype, device, "scale", allow_singleton=True
)
xyz = _handle_input(x, y, z, dtype, device, "scale", allow_singleton=True)
N = xyz.shape[0]
# TODO: Can we do this all in one go somehow?
@ -469,11 +460,7 @@ class Scale(Transform3d):
class Rotate(Transform3d):
def __init__(
self,
R,
dtype=torch.float32,
device: str = "cpu",
orthogonal_tol: float = 1e-5,
self, R, dtype=torch.float32, device: str = "cpu", orthogonal_tol: float = 1e-5
):
"""
Create a new Transform3d representing 3D rotation using a rotation
@ -562,9 +549,7 @@ def _handle_coord(c, dtype, device):
return c
def _handle_input(
x, y, z, dtype, device, name: str, allow_singleton: bool = False
):
def _handle_input(x, y, z, dtype, device, name: str, allow_singleton: bool = False):
"""
Helper function to handle parsing logic for building transforms. The output
is always a tensor of shape (N, 3), but there are several types of allowed

View File

@ -3,4 +3,5 @@
from .ico_sphere import ico_sphere
from .torus import torus
__all__ = [k for k in globals().keys() if not k.startswith("_")]

View File

@ -2,10 +2,10 @@
import torch
from pytorch3d.ops.subdivide_meshes import SubdivideMeshes
from pytorch3d.structures.meshes import Meshes
# Vertex coordinates for a level 0 ico-sphere.
_ico_verts0 = [
[-0.5257, 0.8507, 0.0000],

View File

@ -3,8 +3,8 @@
from itertools import tee
from math import cos, pi, sin
from typing import Iterator, Optional, Tuple
import torch
import torch
from pytorch3d.structures.meshes import Meshes
@ -16,11 +16,7 @@ def _make_pair_range(N: int) -> Iterator[Tuple[int, int]]:
def torus(
r: float,
R: float,
sides: int,
rings: int,
device: Optional[torch.device] = None,
r: float, R: float, sides: int, rings: int, device: Optional[torch.device] = None
) -> Meshes:
"""
Create vertices and faces for a torus.

View File

@ -4,10 +4,12 @@
import argparse
import json
import os
import nbformat
from bs4 import BeautifulSoup
from nbconvert import HTMLExporter, ScriptExporter
TEMPLATE = """const CWD = process.cwd();
const React = require('react');
@ -41,9 +43,7 @@ def gen_tutorials(repo_dir: str) -> None:
Also create ipynb and py versions of tutorial in Docusaurus site for
download.
"""
with open(
os.path.join(repo_dir, "website", "tutorials.json"), "r"
) as infile:
with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile:
tutorial_config = json.loads(infile.read())
tutorial_ids = {x["id"] for v in tutorial_config.values() for x in v}
@ -107,10 +107,7 @@ if __name__ == "__main__":
description="Generate JS, HTML, ipynb, and py files for tutorials."
)
parser.add_argument(
"--repo_dir",
metavar="path",
required=True,
help="PyTorch3D repo directory.",
"--repo_dir", metavar="path", required=True, help="PyTorch3D repo directory."
)
args = parser.parse_args()
gen_tutorials(args.repo_dir)

View File

@ -3,8 +3,9 @@
import glob
import os
from setuptools import find_packages, setup
import torch
from setuptools import find_packages, setup
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension

View File

@ -2,8 +2,8 @@
from itertools import product
from fvcore.common.benchmark import benchmark
from fvcore.common.benchmark import benchmark
from test_blending import TestBlending
@ -18,12 +18,7 @@ def bm_blending() -> None:
for case in test_cases:
n, s, k, d = case
kwargs_list.append(
{
"num_meshes": n,
"image_size": s,
"faces_per_pixel": k,
"device": d,
}
{"num_meshes": n, "image_size": s, "faces_per_pixel": k, "device": d}
)
benchmark(

View File

@ -3,7 +3,6 @@
import torch
from fvcore.common.benchmark import benchmark
from test_chamfer import TestChamfer
@ -25,9 +24,4 @@ def bm_chamfer() -> None:
{"batch_size": 1, "P1": 1000, "P2": 3000, "return_normals": False},
{"batch_size": 1, "P1": 1000, "P2": 30000, "return_normals": True},
]
benchmark(
TestChamfer.chamfer_with_init,
"CHAMFER",
kwargs_list,
warmup_iters=1,
)
benchmark(TestChamfer.chamfer_with_init, "CHAMFER", kwargs_list, warmup_iters=1)

View File

@ -1,7 +1,6 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from fvcore.common.benchmark import benchmark
from test_cubify import TestCubify
@ -11,6 +10,4 @@ def bm_cubify() -> None:
{"batch_size": 64, "V": 16},
{"batch_size": 16, "V": 32},
]
benchmark(
TestCubify.cubify_with_init, "CUBIFY", kwargs_list, warmup_iters=1
)
benchmark(TestCubify.cubify_with_init, "CUBIFY", kwargs_list, warmup_iters=1)

View File

@ -2,9 +2,9 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_face_areas_normals import TestFaceAreasNormals

View File

@ -2,9 +2,9 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_graph_conv import TestGraphConv

View File

@ -1,9 +1,9 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from pytorch3d import _C
from pytorch3d.ops.knn import _knn_points_idx_naive
@ -32,9 +32,7 @@ def benchmark_knn_cuda_versions() -> None:
knn_kwargs.append({"N": N, "D": D, "P": P, "K": K, "v": version})
for N, P, D in product(Ns, Ps, Ds):
nn_kwargs.append({"N": N, "D": D, "P": P})
benchmark(
knn_cuda_with_init, "KNN_CUDA_VERSIONS", knn_kwargs, warmup_iters=1
)
benchmark(knn_cuda_with_init, "KNN_CUDA_VERSIONS", knn_kwargs, warmup_iters=1)
benchmark(nn_cuda_with_init, "NN_CUDA", nn_kwargs, warmup_iters=1)
@ -50,10 +48,7 @@ def benchmark_knn_cuda_vs_naive() -> None:
if P <= 4096:
naive_kwargs.append({"N": N, "D": D, "P": P, "K": K})
benchmark(
knn_python_cuda_with_init,
"KNN_CUDA_PYTHON",
naive_kwargs,
warmup_iters=1,
knn_python_cuda_with_init, "KNN_CUDA_PYTHON", naive_kwargs, warmup_iters=1
)
benchmark(knn_cuda_with_init, "KNN_CUDA", knn_kwargs, warmup_iters=1)
@ -68,9 +63,7 @@ def benchmark_knn_cpu() -> None:
knn_kwargs.append({"N": N, "D": D, "P": P, "K": K})
for N, P, D in product(Ns, Ps, Ds):
nn_kwargs.append({"N": N, "D": D, "P": P})
benchmark(
knn_python_cpu_with_init, "KNN_CPU_PYTHON", knn_kwargs, warmup_iters=1
)
benchmark(knn_python_cpu_with_init, "KNN_CPU_PYTHON", knn_kwargs, warmup_iters=1)
benchmark(knn_cpu_with_init, "KNN_CPU_CPP", knn_kwargs, warmup_iters=1)
benchmark(nn_cpu_with_init, "NN_CPU_CPP", nn_kwargs, warmup_iters=1)

View File

@ -5,6 +5,7 @@ import glob
import importlib
from os.path import basename, dirname, isfile, join, sys
if __name__ == "__main__":
# pyre-ignore[16]
if len(sys.argv) > 1:
@ -25,7 +26,5 @@ if __name__ == "__main__":
for attr in dir(module):
# Run all the functions with names "bm_*" in the module.
if attr.startswith("bm_"):
print(
"Running benchmarks for " + module_name + "/" + attr + "..."
)
print("Running benchmarks for " + module_name + "/" + attr + "...")
getattr(module, attr)()

View File

@ -2,8 +2,8 @@
from itertools import product
from fvcore.common.benchmark import benchmark
from fvcore.common.benchmark import benchmark
from test_mesh_edge_loss import TestMeshEdgeLoss
@ -17,8 +17,5 @@ def bm_mesh_edge_loss() -> None:
n, v, f = case
kwargs_list.append({"num_meshes": n, "max_v": v, "max_f": f})
benchmark(
TestMeshEdgeLoss.mesh_edge_loss,
"MESH_EDGE_LOSS",
kwargs_list,
warmup_iters=1,
TestMeshEdgeLoss.mesh_edge_loss, "MESH_EDGE_LOSS", kwargs_list, warmup_iters=1
)

View File

@ -1,7 +1,6 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from fvcore.common.benchmark import benchmark
from test_obj_io import TestMeshObjIO
from test_ply_io import TestMeshPlyIO

View File

@ -2,9 +2,9 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_mesh_laplacian_smoothing import TestLaplacianSmoothing

View File

@ -2,9 +2,9 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_mesh_normal_consistency import TestMeshNormalConsistency

View File

@ -2,9 +2,9 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_meshes import TestMeshes
@ -20,9 +20,7 @@ def bm_compute_packed_padded_meshes() -> None:
test_cases = product(num_meshes, max_v, max_f, devices)
for case in test_cases:
n, v, f, d = case
kwargs_list.append(
{"num_meshes": n, "max_v": v, "max_f": f, "device": d}
)
kwargs_list.append({"num_meshes": n, "max_v": v, "max_f": f, "device": d})
benchmark(
TestMeshes.compute_packed_with_init,
"COMPUTE_PACKED",

View File

@ -1,9 +1,9 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_nearest_neighbor_points import TestNearestNeighborPoints

View File

@ -2,9 +2,9 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_packed_to_padded import TestPackedToPadded
@ -23,13 +23,7 @@ def bm_packed_to_padded() -> None:
for case in test_cases:
n, v, f, d, b = case
kwargs_list.append(
{
"num_meshes": n,
"num_verts": v,
"num_faces": f,
"num_d": d,
"device": b,
}
{"num_meshes": n, "num_verts": v, "num_faces": f, "num_d": d, "device": b}
)
benchmark(
TestPackedToPadded.packed_to_padded_with_init,

View File

@ -2,8 +2,8 @@
from itertools import product
from fvcore.common.benchmark import benchmark
from fvcore.common.benchmark import benchmark
from test_pointclouds import TestPointclouds

View File

@ -2,11 +2,12 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_rasterize_meshes import TestRasterizeMeshes
# ico levels:
# 0: (12 verts, 20 faces)
# 1: (42 verts, 80 faces)
@ -39,12 +40,7 @@ def bm_rasterize_meshes() -> None:
for case in test_cases:
n, ic, im, b = case
kwargs_list.append(
{
"num_meshes": n,
"ico_level": ic,
"image_size": im,
"blur_radius": b,
}
{"num_meshes": n, "ico_level": ic, "image_size": im, "blur_radius": b}
)
benchmark(
TestRasterizeMeshes.rasterize_meshes_cpu_with_init,
@ -63,9 +59,7 @@ def bm_rasterize_meshes() -> None:
test_cases = product(num_meshes, ico_level, image_size, blur, bin_size)
# only keep cases where bin_size == 0 or image_size / bin_size < 16
test_cases = [
elem
for elem in test_cases
if (elem[-1] == 0 or elem[-3] / elem[-1] < 16)
elem for elem in test_cases if (elem[-1] == 0 or elem[-3] / elem[-1] < 16)
]
for case in test_cases:
n, ic, im, b, bn = case

View File

@ -3,7 +3,6 @@
import torch
from fvcore.common.benchmark import benchmark
from pytorch3d.renderer.points.rasterize_points import (
rasterize_points,
rasterize_points_python,
@ -40,9 +39,7 @@ def bm_python_vs_cpu() -> None:
{"N": 1, "P": 32, "img_size": 32, "radius": 0.1, "pts_per_pxl": 3},
{"N": 2, "P": 32, "img_size": 32, "radius": 0.1, "pts_per_pxl": 3},
]
benchmark(
_bm_python_with_init, "RASTERIZE_PYTHON", kwargs_list, warmup_iters=1
)
benchmark(_bm_python_with_init, "RASTERIZE_PYTHON", kwargs_list, warmup_iters=1)
benchmark(_bm_cpu_with_init, "RASTERIZE_CPU", kwargs_list, warmup_iters=1)
kwargs_list = [
{"N": 2, "P": 32, "img_size": 32, "radius": 0.1, "pts_per_pxl": 3},

View File

@ -2,9 +2,9 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_sample_points_from_meshes import TestSamplePoints

View File

@ -1,7 +1,6 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from fvcore.common.benchmark import benchmark
from test_so3 import TestSO3

View File

@ -2,8 +2,8 @@
from itertools import product
from fvcore.common.benchmark import benchmark
from fvcore.common.benchmark import benchmark
from test_subdivide_meshes import TestSubdivideMeshes

View File

@ -2,9 +2,9 @@
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from test_vert_align import TestVertAlign
@ -25,8 +25,5 @@ def bm_vert_align() -> None:
)
benchmark(
TestVertAlign.vert_align_with_init,
"VERT_ALIGN",
kwargs_list,
warmup_iters=1,
TestVertAlign.vert_align_with_init, "VERT_ALIGN", kwargs_list, warmup_iters=1
)

View File

@ -1,8 +1,9 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import numpy as np
import torch
@ -11,17 +12,13 @@ class TestCaseMixin(unittest.TestCase):
"""
Verify that tensor1 and tensor2 have their data in distinct locations.
"""
self.assertNotEqual(
tensor1.storage().data_ptr(), tensor2.storage().data_ptr()
)
self.assertNotEqual(tensor1.storage().data_ptr(), tensor2.storage().data_ptr())
def assertNotSeparate(self, tensor1, tensor2) -> None:
"""
Verify that tensor1 and tensor2 have their data in the same locations.
"""
self.assertEqual(
tensor1.storage().data_ptr(), tensor2.storage().data_ptr()
)
self.assertEqual(tensor1.storage().data_ptr(), tensor2.storage().data_ptr())
def assertAllSeparate(self, tensor_list) -> None:
"""
@ -57,7 +54,5 @@ class TestCaseMixin(unittest.TestCase):
input, other, rtol=rtol, atol=atol, equal_nan=equal_nan
)
else:
close = np.allclose(
input, other, rtol=rtol, atol=atol, equal_nan=equal_nan
)
close = np.allclose(input, other, rtol=rtol, atol=atol, equal_nan=equal_nan)
self.assertTrue(close)

View File

@ -1,9 +1,9 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import torch
import numpy as np
import torch
from pytorch3d.renderer.blending import (
BlendParams,
hard_rgb_blend,
@ -43,9 +43,7 @@ def sigmoid_blend_naive_loop(colors, fragments, blend_params):
return pixel_colors
def sigmoid_blend_naive_loop_backward(
grad_images, images, fragments, blend_params
):
def sigmoid_blend_naive_loop_backward(grad_images, images, fragments, blend_params):
pix_to_face = fragments.pix_to_face
dists = fragments.dists
sigma = blend_params.sigma
@ -135,14 +133,7 @@ class TestBlending(unittest.TestCase):
torch.manual_seed(42)
def _compare_impls(
self,
fn1,
fn2,
args1,
args2,
grad_var1=None,
grad_var2=None,
compare_grads=True,
self, fn1, fn2, args1, args2, grad_var1=None, grad_var2=None, compare_grads=True
):
out1 = fn1(*args1)
@ -160,9 +151,7 @@ class TestBlending(unittest.TestCase):
(out2 * grad_out).sum().backward()
self.assertTrue(hasattr(grad_var2, "grad"))
self.assertTrue(
torch.allclose(
grad_var1.grad.cpu(), grad_var2.grad.cpu(), atol=2e-5
)
torch.allclose(grad_var1.grad.cpu(), grad_var2.grad.cpu(), atol=2e-5)
)
def test_hard_rgb_blend(self):
@ -199,9 +188,7 @@ class TestBlending(unittest.TestCase):
# # (-) means inside triangle, (+) means outside triangle.
random_sign_flip = torch.rand((N, S, S, K))
random_sign_flip[random_sign_flip > 0.5] *= -1.0
dists = torch.randn(
size=(N, S, S, K), requires_grad=True, device=device
)
dists = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
fragments = Fragments(
pix_to_face=pix_to_face,
bary_coords=empty, # dummy
@ -238,9 +225,7 @@ class TestBlending(unittest.TestCase):
# # (-) means inside triangle, (+) means outside triangle.
random_sign_flip = torch.rand((N, S, S, K))
random_sign_flip[random_sign_flip > 0.5] *= -1.0
dists1 = torch.randn(
size=(N, S, S, K), requires_grad=True, device=device
)
dists1 = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
dists2 = dists1.detach().clone()
dists2.requires_grad = True
@ -276,9 +261,7 @@ class TestBlending(unittest.TestCase):
# of the image with surrounding padded values.
N, S, K = 1, 8, 2
device = torch.device("cuda")
pix_to_face = -torch.ones(
(N, S, S, K), dtype=torch.int64, device=device
)
pix_to_face = -torch.ones((N, S, S, K), dtype=torch.int64, device=device)
h = int(S / 2)
pix_to_face_full = torch.randint(
size=(N, h, h, K), low=0, high=100, device=device
@ -294,9 +277,7 @@ class TestBlending(unittest.TestCase):
# randomly flip the sign of the distance
# (-) means inside triangle, (+) means outside triangle.
dists1 = (
torch.randn(size=(N, S, S, K), device=device) * random_sign_flip
)
dists1 = torch.randn(size=(N, S, S, K), device=device) * random_sign_flip
dists2 = dists1.clone()
zbuf2 = zbuf1.clone()
dists1.requires_grad = True
@ -353,9 +334,7 @@ class TestBlending(unittest.TestCase):
# # (-) means inside triangle, (+) means outside triangle.
random_sign_flip = torch.rand((N, S, S, K), device=device)
random_sign_flip[random_sign_flip > 0.5] *= -1.0
dists1 = torch.randn(
size=(N, S, S, K), requires_grad=True, device=device
)
dists1 = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
fragments = Fragments(
pix_to_face=pix_to_face,
bary_coords=empty, # dummy
@ -398,15 +377,10 @@ class TestBlending(unittest.TestCase):
# # (-) means inside triangle, (+) means outside triangle.
random_sign_flip = torch.rand((N, S, S, K), device=device)
random_sign_flip[random_sign_flip > 0.5] *= -1.0
dists1 = torch.randn(
size=(N, S, S, K), requires_grad=True, device=device
)
dists1 = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
zbuf = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
fragments = Fragments(
pix_to_face=pix_to_face,
bary_coords=empty, # dummy
zbuf=zbuf,
dists=dists1,
pix_to_face=pix_to_face, bary_coords=empty, zbuf=zbuf, dists=dists1 # dummy
)
blend_params = BlendParams(sigma=1e-3)

View File

@ -3,6 +3,7 @@ import unittest
from collections import Counter
from pathlib import Path
# This file groups together tests which look at the code without running it.
@ -61,6 +62,5 @@ class TestBuild(unittest.TestCase):
if firstline.startswith(("# -*-", "#!")):
firstline = f.readline()
self.assertTrue(
firstline.endswith(expect),
f"{i} missing copyright header.",
firstline.endswith(expect), f"{i} missing copyright header."
)

View File

@ -26,10 +26,11 @@
# SOFTWARE.
import math
import numpy as np
import unittest
import torch
import numpy as np
import torch
from common_testing import TestCaseMixin
from pytorch3d.renderer.cameras import (
OpenGLOrthographicCameras,
OpenGLPerspectiveCameras,
@ -43,8 +44,6 @@ from pytorch3d.renderer.cameras import (
from pytorch3d.transforms import Transform3d
from pytorch3d.transforms.so3 import so3_exponential_map
from common_testing import TestCaseMixin
# Naive function adapted from SoftRasterizer for test purposes.
def perspective_project_naive(points, fov=60.0):
@ -58,9 +57,7 @@ def perspective_project_naive(points, fov=60.0):
coordinate (no z renormalization)
"""
device = points.device
halfFov = torch.tensor(
(fov / 2) / 180 * np.pi, dtype=torch.float32, device=device
)
halfFov = torch.tensor((fov / 2) / 180 * np.pi, dtype=torch.float32, device=device)
scale = torch.tan(halfFov[None])
scale = scale[:, None]
z = points[:, :, 2]
@ -150,9 +147,9 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
dist = 2.7
elev = 90.0
azim = 0.0
expected_position = torch.tensor(
[0.0, 2.7, 0.0], dtype=torch.float32
).view(1, 3)
expected_position = torch.tensor([0.0, 2.7, 0.0], dtype=torch.float32).view(
1, 3
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertClose(position, expected_position, atol=2e-7)
@ -171,9 +168,9 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
dist = torch.tensor(2.7)
elev = torch.tensor(0.0)
azim = torch.tensor(90.0)
expected_position = torch.tensor(
[2.7, 0.0, 0.0], dtype=torch.float32
).view(1, 3)
expected_position = torch.tensor([2.7, 0.0, 0.0], dtype=torch.float32).view(
1, 3
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertClose(position, expected_position, atol=2e-7)
@ -181,9 +178,9 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
dist = 2.7
elev = torch.tensor(0.0)
azim = 90.0
expected_position = torch.tensor(
[2.7, 0.0, 0.0], dtype=torch.float32
).view(1, 3)
expected_position = torch.tensor([2.7, 0.0, 0.0], dtype=torch.float32).view(
1, 3
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertClose(position, expected_position, atol=2e-7)
@ -228,8 +225,7 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
elev = torch.tensor([0.0])
azim = torch.tensor([90.0])
expected_position = torch.tensor(
[[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]],
dtype=torch.float32,
[[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]], dtype=torch.float32
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertClose(position, expected_position, atol=3e-7)
@ -239,8 +235,7 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
elev = 0.0
azim = torch.tensor(90.0)
expected_position = torch.tensor(
[[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]],
dtype=torch.float32,
[[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]], dtype=torch.float32
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertClose(position, expected_position, atol=3e-7)
@ -364,9 +359,7 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
):
cam = cam_type(R=R, T=T)
RT_class = cam.get_world_to_view_transform()
self.assertTrue(
torch.allclose(RT.get_matrix(), RT_class.get_matrix())
)
self.assertTrue(torch.allclose(RT.get_matrix(), RT_class.get_matrix()))
self.assertTrue(isinstance(RT, Transform3d))
@ -539,9 +532,7 @@ class TestOpenGLOrthographicProjection(TestCaseMixin, unittest.TestCase):
# applying the scale puts the z coordinate at the far clipping plane
# so the z is mapped to 1.0
projected_verts = torch.tensor([2, 1, 1], dtype=torch.float32)
cameras = OpenGLOrthographicCameras(
znear=1.0, zfar=10.0, scale_xyz=scale
)
cameras = OpenGLOrthographicCameras(znear=1.0, zfar=10.0, scale_xyz=scale)
P = cameras.get_projection_transform()
v1 = P.transform_points(vertices)
v2 = orthographic_project_naive(vertices, scale)
@ -578,9 +569,7 @@ class TestOpenGLOrthographicProjection(TestCaseMixin, unittest.TestCase):
far = torch.tensor([10.0])
near = 1.0
scale = torch.tensor([[1.0, 1.0, 1.0]], requires_grad=True)
cameras = OpenGLOrthographicCameras(
znear=near, zfar=far, scale_xyz=scale
)
cameras = OpenGLOrthographicCameras(znear=near, zfar=far, scale_xyz=scale)
P = cameras.get_projection_transform()
vertices = torch.tensor([1.0, 2.0, 10.0], dtype=torch.float32)
vertices_batch = vertices[None, None, :]
@ -683,15 +672,11 @@ class TestSfMPerspectiveProjection(TestCaseMixin, unittest.TestCase):
self.assertClose(v3[..., :2], v2[..., :2])
def test_perspective_kwargs(self):
cameras = SfMPerspectiveCameras(
focal_length=5.0, principal_point=((2.5, 2.5),)
)
cameras = SfMPerspectiveCameras(focal_length=5.0, principal_point=((2.5, 2.5),))
P = cameras.get_projection_transform(
focal_length=2.0, principal_point=((2.5, 3.5),)
)
vertices = torch.randn([3, 4, 3], dtype=torch.float32)
v1 = P.transform_points(vertices)
v2 = sfm_perspective_project_naive(
vertices, fx=2.0, fy=2.0, p0x=2.5, p0y=3.5
)
v2 = sfm_perspective_project_naive(vertices, fx=2.0, fy=2.0, p0x=2.5, p0y=3.5)
self.assertClose(v1, v2)

View File

@ -1,12 +1,11 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
import torch.nn.functional as F
from pytorch3d.loss import chamfer_distance
from common_testing import TestCaseMixin
from pytorch3d.loss import chamfer_distance
class TestChamfer(TestCaseMixin, unittest.TestCase):
@ -19,14 +18,10 @@ class TestChamfer(TestCaseMixin, unittest.TestCase):
"""
device = torch.device("cuda:0")
p1 = torch.rand((batch_size, P1, 3), dtype=torch.float32, device=device)
p1_normals = torch.rand(
(batch_size, P1, 3), dtype=torch.float32, device=device
)
p1_normals = torch.rand((batch_size, P1, 3), dtype=torch.float32, device=device)
p1_normals = p1_normals / p1_normals.norm(dim=2, p=2, keepdim=True)
p2 = torch.rand((batch_size, P2, 3), dtype=torch.float32, device=device)
p2_normals = torch.rand(
(batch_size, P2, 3), dtype=torch.float32, device=device
)
p2_normals = torch.rand((batch_size, P2, 3), dtype=torch.float32, device=device)
p2_normals = p2_normals / p2_normals.norm(dim=2, p=2, keepdim=True)
weights = torch.rand((batch_size,), dtype=torch.float32, device=device)
@ -47,9 +42,7 @@ class TestChamfer(TestCaseMixin, unittest.TestCase):
for n in range(N):
for i1 in range(P1):
for i2 in range(P2):
dist[n, i1, i2] = torch.sum(
(p1[n, i1, :] - p2[n, i2, :]) ** 2
)
dist[n, i1, i2] = torch.sum((p1[n, i1, :] - p2[n, i2, :]) ** 2)
loss = [
torch.min(dist, dim=2)[0], # (N, P1)
@ -146,11 +139,7 @@ class TestChamfer(TestCaseMixin, unittest.TestCase):
# Error when point_reduction = "none" and batch_reduction = "none".
with self.assertRaises(ValueError):
chamfer_distance(
p1,
p2,
weights=weights,
batch_reduction="none",
point_reduction="none",
p1, p2, weights=weights, batch_reduction="none", point_reduction="none"
)
# Error when batch_reduction is not in ["none", "mean", "sum"].
@ -339,9 +328,7 @@ class TestChamfer(TestCaseMixin, unittest.TestCase):
loss, loss_norm = chamfer_distance(p1, p2, weights=weights)
@staticmethod
def chamfer_with_init(
batch_size: int, P1: int, P2: int, return_normals: bool
):
def chamfer_with_init(batch_size: int, P1: int, P2: int, return_normals: bool):
p1, p2, p1_normals, p2_normals, weights = TestChamfer.init_pointclouds(
batch_size, P1, P2
)

View File

@ -1,8 +1,8 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
import torch
from pytorch3d.renderer.compositing import (
alpha_composite,
norm_weighted_sum,
@ -37,9 +37,7 @@ class TestAccumulatePoints(unittest.TestCase):
continue
alpha = alphas[b, k, j, i]
output[b, c, j, i] += (
features[c, n_idx] * alpha * t_alpha
)
output[b, c, j, i] += features[c, n_idx] * alpha * t_alpha
t_alpha = (1 - alpha) * t_alpha
return output
@ -105,17 +103,13 @@ class TestAccumulatePoints(unittest.TestCase):
continue
alpha = alphas[b, k, j, i]
output[b, c, j, i] += (
features[c, n_idx] * alpha / t_alpha
)
output[b, c, j, i] += features[c, n_idx] * alpha / t_alpha
return output
def test_python(self):
device = torch.device("cpu")
self._simple_alphacomposite(
self.accumulate_alphacomposite_python, device
)
self._simple_alphacomposite(self.accumulate_alphacomposite_python, device)
self._simple_wsum(self.accumulate_weightedsum_python, device)
self._simple_wsumnorm(self.accumulate_weightedsumnorm_python, device)
@ -138,9 +132,7 @@ class TestAccumulatePoints(unittest.TestCase):
self._python_vs_cpu_vs_cuda(
self.accumulate_weightedsumnorm_python, norm_weighted_sum
)
self._python_vs_cpu_vs_cuda(
self.accumulate_weightedsum_python, weighted_sum
)
self._python_vs_cpu_vs_cuda(self.accumulate_weightedsum_python, weighted_sum)
def _python_vs_cpu_vs_cuda(self, accumulate_func_python, accumulate_func):
torch.manual_seed(231)
@ -208,15 +200,11 @@ class TestAccumulatePoints(unittest.TestCase):
grads2 = [gradsi.grad.data.clone().cpu() for gradsi in grads2]
for i in range(0, len(grads1)):
self.assertTrue(
torch.allclose(grads1[i].cpu(), grads2[i].cpu(), atol=1e-6)
)
self.assertTrue(torch.allclose(grads1[i].cpu(), grads2[i].cpu(), atol=1e-6))
def _simple_wsum(self, accum_func, device):
# Initialise variables
features = torch.Tensor(
[[0.1, 0.4, 0.6, 0.9], [0.1, 0.4, 0.6, 0.9]]
).to(device)
features = torch.Tensor([[0.1, 0.4, 0.6, 0.9], [0.1, 0.4, 0.6, 0.9]]).to(device)
alphas = torch.Tensor(
[
@ -285,15 +273,11 @@ class TestAccumulatePoints(unittest.TestCase):
]
).to(device)
self.assertTrue(
torch.allclose(result.cpu(), true_result.cpu(), rtol=1e-3)
)
self.assertTrue(torch.allclose(result.cpu(), true_result.cpu(), rtol=1e-3))
def _simple_wsumnorm(self, accum_func, device):
# Initialise variables
features = torch.Tensor(
[[0.1, 0.4, 0.6, 0.9], [0.1, 0.4, 0.6, 0.9]]
).to(device)
features = torch.Tensor([[0.1, 0.4, 0.6, 0.9], [0.1, 0.4, 0.6, 0.9]]).to(device)
alphas = torch.Tensor(
[
@ -362,15 +346,11 @@ class TestAccumulatePoints(unittest.TestCase):
]
).to(device)
self.assertTrue(
torch.allclose(result.cpu(), true_result.cpu(), rtol=1e-3)
)
self.assertTrue(torch.allclose(result.cpu(), true_result.cpu(), rtol=1e-3))
def _simple_alphacomposite(self, accum_func, device):
# Initialise variables
features = torch.Tensor(
[[0.1, 0.4, 0.6, 0.9], [0.1, 0.4, 0.6, 0.9]]
).to(device)
features = torch.Tensor([[0.1, 0.4, 0.6, 0.9], [0.1, 0.4, 0.6, 0.9]]).to(device)
alphas = torch.Tensor(
[

View File

@ -1,8 +1,8 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
import torch
from pytorch3d.ops import cubify
@ -33,9 +33,7 @@ class TestCubify(unittest.TestCase):
# 1st-check
verts, faces = meshes.get_mesh_verts_faces(0)
self.assertTrue(
torch.allclose(faces.max(), torch.tensor([verts.size(0) - 1]))
)
self.assertTrue(torch.allclose(faces.max(), torch.tensor([verts.size(0) - 1])))
self.assertTrue(
torch.allclose(
verts,
@ -80,9 +78,7 @@ class TestCubify(unittest.TestCase):
)
# 2nd-check
verts, faces = meshes.get_mesh_verts_faces(1)
self.assertTrue(
torch.allclose(faces.max(), torch.tensor([verts.size(0) - 1]))
)
self.assertTrue(torch.allclose(faces.max(), torch.tensor([verts.size(0) - 1])))
self.assertTrue(
torch.allclose(
verts,
@ -275,9 +271,7 @@ class TestCubify(unittest.TestCase):
@staticmethod
def cubify_with_init(batch_size: int, V: int):
device = torch.device("cuda:0")
voxels = torch.rand(
(batch_size, V, V, V), dtype=torch.float32, device=device
)
voxels = torch.rand((batch_size, V, V, V), dtype=torch.float32, device=device)
torch.cuda.synchronize()
def convert():

View File

@ -2,13 +2,12 @@
import unittest
import torch
import torch
from common_testing import TestCaseMixin
from pytorch3d.ops import mesh_face_areas_normals
from pytorch3d.structures.meshes import Meshes
from common_testing import TestCaseMixin
class TestFaceAreasNormals(TestCaseMixin, unittest.TestCase):
def setUp(self) -> None:
@ -27,10 +26,7 @@ class TestFaceAreasNormals(TestCaseMixin, unittest.TestCase):
faces_list = []
for _ in range(num_meshes):
verts = torch.rand(
(num_verts, 3),
dtype=torch.float32,
device=device,
requires_grad=True,
(num_verts, 3), dtype=torch.float32, device=device, requires_grad=True
)
faces = torch.randint(
num_verts, size=(num_faces, 3), dtype=torch.int64, device=device
@ -55,9 +51,7 @@ class TestFaceAreasNormals(TestCaseMixin, unittest.TestCase):
v02 = vertices_faces[:, 2] - vertices_faces[:, 0]
normals = torch.cross(v01, v02, dim=1) # (F, 3)
face_areas = normals.norm(dim=-1) / 2
face_normals = torch.nn.functional.normalize(
normals, p=2, dim=1, eps=1e-6
)
face_normals = torch.nn.functional.normalize(normals, p=2, dim=1, eps=1e-6)
return face_areas, face_normals
def _test_face_areas_normals_helper(self, device, dtype=torch.float32):
@ -76,10 +70,7 @@ class TestFaceAreasNormals(TestCaseMixin, unittest.TestCase):
verts_torch = verts.detach().clone().to(dtype)
verts_torch.requires_grad = True
faces_torch = faces.detach().clone()
(
areas_torch,
normals_torch,
) = TestFaceAreasNormals.face_areas_normals_python(
(areas_torch, normals_torch) = TestFaceAreasNormals.face_areas_normals_python(
verts_torch, faces_torch
)
self.assertClose(areas_torch, areas, atol=1e-7)

View File

@ -1,20 +1,15 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
import torch.nn as nn
from common_testing import TestCaseMixin
from pytorch3d import _C
from pytorch3d.ops.graph_conv import (
GraphConv,
gather_scatter,
gather_scatter_python,
)
from pytorch3d.ops.graph_conv import GraphConv, gather_scatter, gather_scatter_python
from pytorch3d.structures.meshes import Meshes
from pytorch3d.utils import ico_sphere
from common_testing import TestCaseMixin
class TestGraphConv(TestCaseMixin, unittest.TestCase):
def test_undirected(self):
@ -89,8 +84,7 @@ class TestGraphConv(TestCaseMixin, unittest.TestCase):
w1 = torch.tensor([[-1, -1, -1]], dtype=dtype)
expected_y = torch.tensor(
[[1 + 2 + 3 - 4 - 5 - 6 - 7 - 8 - 9], [4 + 5 + 6], [7 + 8 + 9]],
dtype=dtype,
[[1 + 2 + 3 - 4 - 5 - 6 - 7 - 8 - 9], [4 + 5 + 6], [7 + 8 + 9]], dtype=dtype
)
conv = GraphConv(3, 1, directed=True).to(dtype)
@ -126,17 +120,13 @@ class TestGraphConv(TestCaseMixin, unittest.TestCase):
def test_cpu_cuda_tensor_error(self):
device = torch.device("cuda:0")
verts = torch.tensor(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=torch.float32,
device=device,
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float32, device=device
)
edges = torch.tensor([[0, 1], [0, 2]])
conv = GraphConv(3, 1, directed=True).to(torch.float32)
with self.assertRaises(Exception) as err:
conv(verts, edges)
self.assertTrue(
"tensors must be on the same device." in str(err.exception)
)
self.assertTrue("tensors must be on the same device." in str(err.exception))
def test_gather_scatter(self):
"""
@ -178,12 +168,10 @@ class TestGraphConv(TestCaseMixin, unittest.TestCase):
backend: str = "cuda",
):
device = torch.device("cuda") if backend == "cuda" else "cpu"
verts_list = torch.tensor(
num_verts * [[0.11, 0.22, 0.33]], device=device
).view(-1, 3)
faces_list = torch.tensor(num_faces * [[1, 2, 3]], device=device).view(
verts_list = torch.tensor(num_verts * [[0.11, 0.22, 0.33]], device=device).view(
-1, 3
)
faces_list = torch.tensor(num_faces * [[1, 2, 3]], device=device).view(-1, 3)
meshes = Meshes(num_meshes * [verts_list], num_meshes * [faces_list])
gconv = GraphConv(gconv_dim, gconv_dim, directed=directed)
gconv.to(device)
@ -191,9 +179,7 @@ class TestGraphConv(TestCaseMixin, unittest.TestCase):
total_verts = meshes.verts_packed().shape[0]
# Features.
x = torch.randn(
total_verts, gconv_dim, device=device, requires_grad=True
)
x = torch.randn(total_verts, gconv_dim, device=device, requires_grad=True)
torch.cuda.synchronize()
def run_graph_conv():

View File

@ -2,8 +2,8 @@
import unittest
from itertools import product
import torch
import torch
from pytorch3d.ops.knn import _knn_points_idx_naive, knn_points_idx

View File

@ -1,14 +1,13 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import torch
import numpy as np
import torch
from common_testing import TestCaseMixin
from pytorch3d.renderer.lighting import DirectionalLights, PointLights
from pytorch3d.transforms import RotateAxisAngle
from common_testing import TestCaseMixin
class TestLights(TestCaseMixin, unittest.TestCase):
def test_init_lights(self):
@ -56,9 +55,7 @@ class TestLights(TestCaseMixin, unittest.TestCase):
self.assertSeparate(new_prop, prop)
def test_lights_accessor(self):
d_light = DirectionalLights(
ambient_color=((0.0, 0.0, 0.0), (1.0, 1.0, 1.0))
)
d_light = DirectionalLights(ambient_color=((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)))
p_light = PointLights(ambient_color=((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)))
for light in [d_light, p_light]:
# Update element
@ -96,14 +93,12 @@ class TestLights(TestCaseMixin, unittest.TestCase):
"""
with self.assertRaises(ValueError):
DirectionalLights(
ambient_color=torch.randn(10, 3),
diffuse_color=torch.randn(15, 3),
ambient_color=torch.randn(10, 3), diffuse_color=torch.randn(15, 3)
)
with self.assertRaises(ValueError):
PointLights(
ambient_color=torch.randn(10, 3),
diffuse_color=torch.randn(15, 3),
ambient_color=torch.randn(10, 3), diffuse_color=torch.randn(15, 3)
)
def test_initialize_lights_dimensions_fail(self):
@ -138,8 +133,7 @@ class TestDiffuseLighting(TestCaseMixin, unittest.TestCase):
normals = torch.tensor([0, 0, 1], dtype=torch.float32)
normals = normals[None, None, :]
expected_output = torch.tensor(
[1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)],
dtype=torch.float32,
[1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)], dtype=torch.float32
)
expected_output = expected_output.view(1, 1, 3).repeat(3, 1, 1)
light = DirectionalLights(diffuse_color=color, direction=direction)
@ -169,13 +163,10 @@ class TestDiffuseLighting(TestCaseMixin, unittest.TestCase):
points = torch.tensor([0, 0, 0], dtype=torch.float32)
normals = torch.tensor([0, 0, 1], dtype=torch.float32)
expected_output = torch.tensor(
[1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)],
dtype=torch.float32,
[1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)], dtype=torch.float32
)
expected_output = expected_output.view(-1, 1, 3)
light = PointLights(
diffuse_color=color[None, :], location=location[None, :]
)
light = PointLights(diffuse_color=color[None, :], location=location[None, :])
output_light = light.diffuse(
points=points[None, None, :], normals=normals[None, None, :]
)
@ -184,9 +175,7 @@ class TestDiffuseLighting(TestCaseMixin, unittest.TestCase):
# Change light direction to be 90 degrees apart from normal direction.
location = torch.tensor([0, 1, 0], dtype=torch.float32)
expected_output = torch.zeros_like(expected_output)
light = PointLights(
diffuse_color=color[None, :], location=location[None, :]
)
light = PointLights(diffuse_color=color[None, :], location=location[None, :])
output_light = light.diffuse(
points=points[None, None, :], normals=normals[None, None, :]
)
@ -204,8 +193,7 @@ class TestDiffuseLighting(TestCaseMixin, unittest.TestCase):
)
normals = torch.tensor([0, 0, 1], dtype=torch.float32)
expected_out = torch.tensor(
[1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)],
dtype=torch.float32,
[1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)], dtype=torch.float32
)
# Reshape
@ -231,8 +219,7 @@ class TestDiffuseLighting(TestCaseMixin, unittest.TestCase):
)
normals = torch.tensor([0, 0, 1], dtype=torch.float32)
expected_out = torch.tensor(
[1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)],
dtype=torch.float32,
[1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)], dtype=torch.float32
)
# Reshape
@ -258,9 +245,7 @@ class TestDiffuseLighting(TestCaseMixin, unittest.TestCase):
device = torch.device("cuda:0")
color = torch.tensor([1, 1, 1], dtype=torch.float32, device=device)
direction = torch.tensor(
[0, 1 / np.sqrt(2), 1 / np.sqrt(2)],
dtype=torch.float32,
device=device,
[0, 1 / np.sqrt(2), 1 / np.sqrt(2)], dtype=torch.float32, device=device
)
normals = torch.tensor([0, 0, 1], dtype=torch.float32, device=device)
normals = normals.view(1, 1, 1, 1, 3).expand(N, H, W, K, -1)
@ -373,9 +358,7 @@ class TestSpecularLighting(TestCaseMixin, unittest.TestCase):
normals = torch.tensor([0, 1, 0], dtype=torch.float32)
expected_output = torch.tensor([1.0, 0.0, 1.0], dtype=torch.float32)
expected_output = expected_output.view(-1, 1, 3)
lights = PointLights(
specular_color=color[None, :], location=location[None, :]
)
lights = PointLights(specular_color=color[None, :], location=location[None, :])
output_light = lights.specular(
points=points[None, None, :],
normals=normals[None, None, :],
@ -528,8 +511,7 @@ class TestSpecularLighting(TestCaseMixin, unittest.TestCase):
mesh_to_vert_idx = torch.tensor(mesh_to_vert_idx, dtype=torch.int64)
color = torch.tensor([[1, 1, 1], [1, 0, 1]], dtype=torch.float32)
direction = torch.tensor(
[[-1 / np.sqrt(2), 1 / np.sqrt(2), 0], [-1, 1, 0]],
dtype=torch.float32,
[[-1 / np.sqrt(2), 1 / np.sqrt(2), 0], [-1, 1, 0]], dtype=torch.float32
)
camera_position = torch.tensor(
[

View File

@ -1,11 +1,10 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from pytorch3d.renderer.materials import Materials
from common_testing import TestCaseMixin
from pytorch3d.renderer.materials import Materials
class TestMaterials(TestCaseMixin, unittest.TestCase):
@ -64,8 +63,7 @@ class TestMaterials(TestCaseMixin, unittest.TestCase):
"""
with self.assertRaises(ValueError):
Materials(
ambient_color=torch.randn(10, 3),
diffuse_color=torch.randn(15, 3),
ambient_color=torch.randn(10, 3), diffuse_color=torch.randn(15, 3)
)
def test_initialize_materials_dimensions_fail(self):
@ -80,16 +78,12 @@ class TestMaterials(TestCaseMixin, unittest.TestCase):
Materials(shininess=torch.randn(10, 2))
def test_initialize_materials_mixed_inputs(self):
mat = Materials(
ambient_color=torch.randn(1, 3), diffuse_color=((1, 1, 1),)
)
mat = Materials(ambient_color=torch.randn(1, 3), diffuse_color=((1, 1, 1),))
self.assertTrue(mat.ambient_color.shape == (1, 3))
self.assertTrue(mat.diffuse_color.shape == (1, 3))
def test_initialize_materials_mixed_inputs_broadcast(self):
mat = Materials(
ambient_color=torch.randn(10, 3), diffuse_color=((1, 1, 1),)
)
mat = Materials(ambient_color=torch.randn(10, 3), diffuse_color=((1, 1, 1),))
self.assertTrue(mat.ambient_color.shape == (10, 3))
self.assertTrue(mat.diffuse_color.shape == (10, 3))
self.assertTrue(mat.specular_color.shape == (10, 3))

View File

@ -1,12 +1,11 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
import torch
from common_testing import TestCaseMixin
from pytorch3d.loss import mesh_edge_loss
from pytorch3d.structures import Meshes
from common_testing import TestCaseMixin
from test_sample_points_from_meshes import TestSamplePoints
@ -27,9 +26,7 @@ class TestMeshEdgeLoss(TestCaseMixin, unittest.TestCase):
mesh = Meshes(verts=verts_list, faces=faces_list)
loss = mesh_edge_loss(mesh, target_length=target_length)
self.assertClose(
loss, torch.tensor([0.0], dtype=torch.float32, device=device)
)
self.assertClose(loss, torch.tensor([0.0], dtype=torch.float32, device=device))
self.assertTrue(loss.requires_grad)
@staticmethod
@ -53,9 +50,7 @@ class TestMeshEdgeLoss(TestCaseMixin, unittest.TestCase):
num_edges = mesh_edges.size(0)
for e in range(num_edges):
v0, v1 = verts_edges[e, 0], verts_edges[e, 1]
predlosses[b] += (
(v0 - v1).norm(dim=0, p=2) - target_length
) ** 2.0
predlosses[b] += ((v0 - v1).norm(dim=0, p=2) - target_length) ** 2.0
if num_edges > 0:
predlosses[b] = predlosses[b] / num_edges
@ -96,12 +91,8 @@ class TestMeshEdgeLoss(TestCaseMixin, unittest.TestCase):
self.assertClose(loss, predloss)
@staticmethod
def mesh_edge_loss(
num_meshes: int = 10, max_v: int = 100, max_f: int = 300
):
meshes = TestSamplePoints.init_meshes(
num_meshes, max_v, max_f, device="cuda:0"
)
def mesh_edge_loss(num_meshes: int = 10, max_v: int = 100, max_f: int = 300):
meshes = TestSamplePoints.init_meshes(num_meshes, max_v, max_f, device="cuda:0")
torch.cuda.synchronize()
def compute_loss():

View File

@ -2,8 +2,8 @@
import unittest
import torch
import torch
from pytorch3d.loss.mesh_laplacian_smoothing import mesh_laplacian_smoothing
from pytorch3d.structures.meshes import Meshes
@ -56,9 +56,7 @@ class TestLaplacianSmoothing(unittest.TestCase):
V = verts_packed.shape[0]
L = torch.zeros((V, V), dtype=torch.float32, device=meshes.device)
inv_areas = torch.zeros(
(V, 1), dtype=torch.float32, device=meshes.device
)
inv_areas = torch.zeros((V, 1), dtype=torch.float32, device=meshes.device)
for f in faces_packed:
v0 = verts_packed[f[0], :]
@ -69,9 +67,7 @@ class TestLaplacianSmoothing(unittest.TestCase):
C = (v0 - v1).norm()
s = 0.5 * (A + B + C)
face_area = (
(s * (s - A) * (s - B) * (s - C)).clamp_(min=1e-12).sqrt()
)
face_area = (s * (s - A) * (s - B) * (s - C)).clamp_(min=1e-12).sqrt()
inv_areas[f[0]] += face_area
inv_areas[f[1]] += face_area
inv_areas[f[2]] += face_area
@ -114,16 +110,13 @@ class TestLaplacianSmoothing(unittest.TestCase):
return loss.sum() / len(meshes)
@staticmethod
def init_meshes(
num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000
):
def init_meshes(num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
for _ in range(num_meshes):
verts = (
torch.rand((num_verts, 3), dtype=torch.float32, device=device)
* 2.0
torch.rand((num_verts, 3), dtype=torch.float32, device=device) * 2.0
- 1.0
) # verts in the space of [-1, 1]
faces = torch.stack(
@ -148,9 +141,7 @@ class TestLaplacianSmoothing(unittest.TestCase):
# feats in list
out = mesh_laplacian_smoothing(meshes, method="uniform")
naive_out = TestLaplacianSmoothing.laplacian_smoothing_naive_uniform(
meshes
)
naive_out = TestLaplacianSmoothing.laplacian_smoothing_naive_uniform(meshes)
self.assertTrue(torch.allclose(out, naive_out))
@ -190,9 +181,7 @@ class TestLaplacianSmoothing(unittest.TestCase):
verts_list = []
faces_list = []
for _ in range(num_meshes):
verts = torch.rand(
(num_verts, 3), dtype=torch.float32, device=device
)
verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device)
faces = torch.randint(
num_verts, size=(num_faces, 3), dtype=torch.int64, device=device
)

View File

@ -2,8 +2,8 @@
import unittest
import torch
import torch
from pytorch3d.loss.mesh_normal_consistency import mesh_normal_consistency
from pytorch3d.structures.meshes import Meshes
from pytorch3d.utils.ico_sphere import ico_sphere
@ -33,17 +33,14 @@ class TestMeshNormalConsistency(unittest.TestCase):
return faces
@staticmethod
def init_meshes(
num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000
):
def init_meshes(num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000):
device = torch.device("cuda:0")
valid_faces = TestMeshNormalConsistency.init_faces(num_verts).to(device)
verts_list = []
faces_list = []
for _ in range(num_meshes):
verts = (
torch.rand((num_verts, 3), dtype=torch.float32, device=device)
* 2.0
torch.rand((num_verts, 3), dtype=torch.float32, device=device) * 2.0
- 1.0
) # verts in the space of [-1, 1]
"""
@ -105,8 +102,7 @@ class TestMeshNormalConsistency(unittest.TestCase):
(
1
- torch.cosine_similarity(
normals[i].view(1, 3),
-normals[j].view(1, 3),
normals[i].view(1, 3), -normals[j].view(1, 3)
)
)
)
@ -137,9 +133,7 @@ class TestMeshNormalConsistency(unittest.TestCase):
device = torch.device("cuda:0")
# mesh1 shown above
verts1 = torch.rand((4, 3), dtype=torch.float32, device=device)
faces1 = torch.tensor(
[[0, 1, 2], [2, 1, 3]], dtype=torch.int64, device=device
)
faces1 = torch.tensor([[0, 1, 2], [2, 1, 3]], dtype=torch.int64, device=device)
# mesh2 is a cuboid with 8 verts, 12 faces and 18 edges
verts2 = torch.tensor(
@ -181,9 +175,7 @@ class TestMeshNormalConsistency(unittest.TestCase):
[[0, 1, 2], [2, 1, 3], [2, 1, 4]], dtype=torch.int64, device=device
)
meshes = Meshes(
verts=[verts1, verts2, verts3], faces=[faces1, faces2, faces3]
)
meshes = Meshes(verts=[verts1, verts2, verts3], faces=[faces1, faces2, faces3])
# mesh1: normal consistency computation
n0 = (verts1[1] - verts1[2]).cross(verts1[3] - verts1[2])

View File

@ -2,8 +2,8 @@
import unittest
import torch
import torch
from pytorch3d.renderer.mesh.utils import _clip_barycentric_coordinates

View File

@ -1,12 +1,11 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import numpy as np
import torch
from pytorch3d.structures.meshes import Meshes
from common_testing import TestCaseMixin
from pytorch3d.structures.meshes import Meshes
class TestMeshes(TestCaseMixin, unittest.TestCase):
@ -54,9 +53,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
# For lists of faces and vertices, we can sample different v/f
# per mesh.
f = torch.randint(max_f, size=(num_meshes,), dtype=torch.int32)
v = torch.randint(
3, high=max_v, size=(num_meshes,), dtype=torch.int32
)
v = torch.randint(3, high=max_v, size=(num_meshes,), dtype=torch.int32)
# Generate the actual vertices and faces.
for i in range(num_meshes):
@ -90,12 +87,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
device=device,
),
torch.tensor(
[
[0.1, 0.3, 0.3],
[0.6, 0.7, 0.8],
[0.2, 0.3, 0.4],
[0.1, 0.5, 0.3],
],
[[0.1, 0.3, 0.3], [0.6, 0.7, 0.8], [0.2, 0.3, 0.4], [0.1, 0.5, 0.3]],
dtype=torch.float32,
device=device,
),
@ -113,9 +105,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
]
faces = [
torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device),
torch.tensor(
[[0, 1, 2], [1, 2, 3]], dtype=torch.int64, device=device
),
torch.tensor([[0, 1, 2], [1, 2, 3]], dtype=torch.int64, device=device),
torch.tensor(
[
[1, 2, 0],
@ -136,12 +126,8 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
mesh = TestMeshes.init_simple_mesh("cuda:0")
# Check that faces/verts per mesh are set in init:
self.assertClose(
mesh._num_faces_per_mesh.cpu(), torch.tensor([1, 2, 7])
)
self.assertClose(
mesh._num_verts_per_mesh.cpu(), torch.tensor([3, 4, 5])
)
self.assertClose(mesh._num_faces_per_mesh.cpu(), torch.tensor([1, 2, 7]))
self.assertClose(mesh._num_verts_per_mesh.cpu(), torch.tensor([3, 4, 5]))
# Check computed tensors
self.assertClose(
@ -163,8 +149,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
mesh.mesh_to_faces_packed_first_idx().cpu(), torch.tensor([0, 1, 3])
)
self.assertClose(
mesh.num_edges_per_mesh().cpu(),
torch.tensor([3, 5, 10], dtype=torch.int32),
mesh.num_edges_per_mesh().cpu(), torch.tensor([3, 5, 10], dtype=torch.int32)
)
def test_simple_random_meshes(self):
@ -172,9 +157,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
# Define the test mesh object either as a list or tensor of faces/verts.
for lists_to_tensors in (False, True):
N = 10
mesh = TestMeshes.init_mesh(
N, 100, 300, lists_to_tensors=lists_to_tensors
)
mesh = TestMeshes.init_mesh(N, 100, 300, lists_to_tensors=lists_to_tensors)
verts_list = mesh.verts_list()
faces_list = mesh.faces_list()
@ -207,12 +190,8 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
for n in range(N):
v = verts_list[n].shape[0]
f = faces_list[n].shape[0]
self.assertClose(
verts_packed[curv : curv + v, :], verts_list[n]
)
self.assertClose(
faces_packed[curf : curf + f, :] - curv, faces_list[n]
)
self.assertClose(verts_packed[curv : curv + v, :], verts_list[n])
self.assertClose(faces_packed[curf : curf + f, :] - curv, faces_list[n])
self.assertTrue(vert_to_mesh[curv : curv + v].eq(n).all())
self.assertTrue(face_to_mesh[curf : curf + f].eq(n).all())
self.assertTrue(mesh_to_vert[n] == curv)
@ -232,9 +211,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
npedges = np.concatenate((e12, e20, e01), axis=0)
npedges = np.sort(npedges, axis=1)
unique_edges, unique_idx = np.unique(
npedges, return_index=True, axis=0
)
unique_edges, unique_idx = np.unique(npedges, return_index=True, axis=0)
self.assertTrue(np.allclose(edges, unique_edges))
temp = face_to_mesh.cpu().numpy()
temp = np.concatenate((temp, temp, temp), axis=0)
@ -266,13 +243,9 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
v = torch.randint(
3, high=V, size=(1,), dtype=torch.int32, device=device
)[0]
f = torch.randint(
F, size=(1,), dtype=torch.int32, device=device
)[0]
f = torch.randint(F, size=(1,), dtype=torch.int32, device=device)[0]
verts = torch.rand((v, 3), dtype=torch.float32, device=device)
faces = torch.randint(
v, size=(f, 3), dtype=torch.int64, device=device
)
faces = torch.randint(v, size=(f, 3), dtype=torch.int64, device=device)
else:
verts = torch.tensor([], dtype=torch.float32, device=device)
faces = torch.tensor([], dtype=torch.int64, device=device)
@ -309,16 +282,12 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
)
for n in range(N):
verts.append(torch.rand((V, 3), dtype=torch.float32, device=device))
this_faces = torch.full(
(F, 3), -1, dtype=torch.int64, device=device
)
this_faces = torch.full((F, 3), -1, dtype=torch.int64, device=device)
if valid[n]:
v = torch.randint(
3, high=V, size=(1,), dtype=torch.int32, device=device
)[0]
f = torch.randint(
F, size=(1,), dtype=torch.int32, device=device
)[0]
f = torch.randint(F, size=(1,), dtype=torch.int32, device=device)[0]
this_faces[:f, :] = torch.randint(
v, size=(f, 3), dtype=torch.int64, device=device
)
@ -329,9 +298,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
mesh = Meshes(verts=torch.stack(verts), faces=torch.stack(faces))
# Check verts/faces per mesh are set correctly in init.
self.assertListEqual(
mesh._num_faces_per_mesh.tolist(), num_faces.tolist()
)
self.assertListEqual(mesh._num_faces_per_mesh.tolist(), num_faces.tolist())
self.assertListEqual(mesh._num_verts_per_mesh.tolist(), [V] * N)
for n, (vv, ff) in enumerate(zip(mesh.verts_list(), mesh.faces_list())):
@ -339,12 +306,8 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
self.assertClose(vv, verts[n])
new_faces = [ff.clone() for ff in faces]
v = torch.randint(
3, high=V, size=(1,), dtype=torch.int32, device=device
)[0]
f = torch.randint(F - 10, size=(1,), dtype=torch.int32, device=device)[
0
]
v = torch.randint(3, high=V, size=(1,), dtype=torch.int32, device=device)[0]
f = torch.randint(F - 10, size=(1,), dtype=torch.int32, device=device)[0]
this_faces = torch.full((F, 3), -1, dtype=torch.int64, device=device)
this_faces[10 : f + 10, :] = torch.randint(
v, size=(f, 3), dtype=torch.int64, device=device
@ -376,9 +339,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
torch.allclose(new_mesh._verts_list[0], mesh._verts_list[0])
)
self.assertFalse(
torch.allclose(
mesh.num_verts_per_mesh(), new_mesh.num_verts_per_mesh()
)
torch.allclose(mesh.num_verts_per_mesh(), new_mesh.num_verts_per_mesh())
)
self.assertSeparate(new_mesh.verts_packed(), mesh.verts_packed())
self.assertSeparate(new_mesh.verts_padded(), mesh.verts_padded())
@ -438,9 +399,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
mesh._compute_face_areas_normals(refresh=True)
mesh._compute_vertex_normals(refresh=True)
deform = torch.rand(
(all_v, 3), dtype=torch.float32, device=mesh.device
)
deform = torch.rand((all_v, 3), dtype=torch.float32, device=mesh.device)
# new meshes class to hold the deformed mesh
new_mesh_naive = naive_offset_verts(mesh, deform)
@ -458,9 +417,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
self.assertClose(
new_mesh.verts_list()[i], new_mesh_naive.verts_list()[i]
)
self.assertClose(
mesh.faces_list()[i], new_mesh_naive.faces_list()[i]
)
self.assertClose(mesh.faces_list()[i], new_mesh_naive.faces_list()[i])
self.assertClose(
new_mesh.faces_list()[i], new_mesh_naive.faces_list()[i]
)
@ -475,21 +432,11 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
)
# check padded & packed
self.assertClose(
new_mesh.faces_padded(), new_mesh_naive.faces_padded()
)
self.assertClose(
new_mesh.verts_padded(), new_mesh_naive.verts_padded()
)
self.assertClose(
new_mesh.faces_packed(), new_mesh_naive.faces_packed()
)
self.assertClose(
new_mesh.verts_packed(), new_mesh_naive.verts_packed()
)
self.assertClose(
new_mesh.edges_packed(), new_mesh_naive.edges_packed()
)
self.assertClose(new_mesh.faces_padded(), new_mesh_naive.faces_padded())
self.assertClose(new_mesh.verts_padded(), new_mesh_naive.verts_padded())
self.assertClose(new_mesh.faces_packed(), new_mesh_naive.faces_packed())
self.assertClose(new_mesh.verts_packed(), new_mesh_naive.verts_packed())
self.assertClose(new_mesh.edges_packed(), new_mesh_naive.edges_packed())
self.assertClose(
new_mesh.verts_packed_to_mesh_idx(),
new_mesh_naive.verts_packed_to_mesh_idx(),
@ -499,8 +446,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
new_mesh_naive.mesh_to_verts_packed_first_idx(),
)
self.assertClose(
new_mesh.num_verts_per_mesh(),
new_mesh_naive.num_verts_per_mesh(),
new_mesh.num_verts_per_mesh(), new_mesh_naive.num_verts_per_mesh()
)
self.assertClose(
new_mesh.faces_packed_to_mesh_idx(),
@ -511,8 +457,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
new_mesh_naive.mesh_to_faces_packed_first_idx(),
)
self.assertClose(
new_mesh.num_faces_per_mesh(),
new_mesh_naive.num_faces_per_mesh(),
new_mesh.num_faces_per_mesh(), new_mesh_naive.num_faces_per_mesh()
)
self.assertClose(
new_mesh.edges_packed_to_mesh_idx(),
@ -527,24 +472,19 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
# check face areas, normals and vertex normals
self.assertClose(
new_mesh.verts_normals_packed(),
new_mesh_naive.verts_normals_packed(),
new_mesh.verts_normals_packed(), new_mesh_naive.verts_normals_packed()
)
self.assertClose(
new_mesh.verts_normals_padded(),
new_mesh_naive.verts_normals_padded(),
new_mesh.verts_normals_padded(), new_mesh_naive.verts_normals_padded()
)
self.assertClose(
new_mesh.faces_normals_packed(),
new_mesh_naive.faces_normals_packed(),
new_mesh.faces_normals_packed(), new_mesh_naive.faces_normals_packed()
)
self.assertClose(
new_mesh.faces_normals_padded(),
new_mesh_naive.faces_normals_padded(),
new_mesh.faces_normals_padded(), new_mesh_naive.faces_normals_padded()
)
self.assertClose(
new_mesh.faces_areas_packed(),
new_mesh_naive.faces_areas_packed(),
new_mesh.faces_areas_packed(), new_mesh_naive.faces_areas_packed()
)
def test_scale_verts(self):
@ -579,13 +519,11 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
for i in range(N):
if test == "tensor":
self.assertClose(
scales[i] * mesh.verts_list()[i],
new_mesh.verts_list()[i],
scales[i] * mesh.verts_list()[i], new_mesh.verts_list()[i]
)
else:
self.assertClose(
scales * mesh.verts_list()[i],
new_mesh.verts_list()[i],
scales * mesh.verts_list()[i], new_mesh.verts_list()[i]
)
self.assertClose(
new_mesh.verts_list()[i], new_mesh_naive.verts_list()[i]
@ -607,21 +545,11 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
)
# check padded & packed
self.assertClose(
new_mesh.faces_padded(), new_mesh_naive.faces_padded()
)
self.assertClose(
new_mesh.verts_padded(), new_mesh_naive.verts_padded()
)
self.assertClose(
new_mesh.faces_packed(), new_mesh_naive.faces_packed()
)
self.assertClose(
new_mesh.verts_packed(), new_mesh_naive.verts_packed()
)
self.assertClose(
new_mesh.edges_packed(), new_mesh_naive.edges_packed()
)
self.assertClose(new_mesh.faces_padded(), new_mesh_naive.faces_padded())
self.assertClose(new_mesh.verts_padded(), new_mesh_naive.verts_padded())
self.assertClose(new_mesh.faces_packed(), new_mesh_naive.faces_packed())
self.assertClose(new_mesh.verts_packed(), new_mesh_naive.verts_packed())
self.assertClose(new_mesh.edges_packed(), new_mesh_naive.edges_packed())
self.assertClose(
new_mesh.verts_packed_to_mesh_idx(),
new_mesh_naive.verts_packed_to_mesh_idx(),
@ -631,8 +559,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
new_mesh_naive.mesh_to_verts_packed_first_idx(),
)
self.assertClose(
new_mesh.num_verts_per_mesh(),
new_mesh_naive.num_verts_per_mesh(),
new_mesh.num_verts_per_mesh(), new_mesh_naive.num_verts_per_mesh()
)
self.assertClose(
new_mesh.faces_packed_to_mesh_idx(),
@ -643,8 +570,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
new_mesh_naive.mesh_to_faces_packed_first_idx(),
)
self.assertClose(
new_mesh.num_faces_per_mesh(),
new_mesh_naive.num_faces_per_mesh(),
new_mesh.num_faces_per_mesh(), new_mesh_naive.num_faces_per_mesh()
)
self.assertClose(
new_mesh.edges_packed_to_mesh_idx(),
@ -675,8 +601,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
new_mesh_naive.faces_normals_padded(),
)
self.assertClose(
new_mesh.faces_areas_packed(),
new_mesh_naive.faces_areas_packed(),
new_mesh.faces_areas_packed(), new_mesh_naive.faces_areas_packed()
)
def test_extend_list(self):
@ -730,10 +655,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
self.assertTrue(len(split_meshes[0]) == 2)
self.assertTrue(
split_meshes[0].verts_list()
== [
mesh.get_mesh_verts_faces(0)[0],
mesh.get_mesh_verts_faces(1)[0],
]
== [mesh.get_mesh_verts_faces(0)[0], mesh.get_mesh_verts_faces(1)[0]]
)
self.assertTrue(len(split_meshes[1]) == 3)
self.assertTrue(
@ -756,9 +678,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
verts_faces = [(10, 100), (20, 200)]
for (V, F) in verts_faces:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(
V, size=(F, 3), dtype=torch.int64, device=device
)
faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
@ -782,9 +702,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
faces_list = []
for (V, F) in [(10, 100)]:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(
V, size=(F, 3), dtype=torch.int64, device=device
)
faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
@ -802,9 +720,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
verts_faces = [(10, 100), (20, 200), (30, 300)]
for (V, F) in verts_faces:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(
V, size=(F, 3), dtype=torch.int64, device=device
)
faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
@ -814,9 +730,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
verts_padded = mesh.verts_padded()
verts_padded_flat = verts_padded.view(-1, 3)
self.assertClose(
verts_padded_flat[verts_padded_to_packed_idx], verts_packed
)
self.assertClose(verts_padded_flat[verts_padded_to_packed_idx], verts_packed)
idx = verts_padded_to_packed_idx.view(-1, 1).expand(-1, 3)
self.assertClose(verts_padded_flat.gather(0, idx), verts_packed)
@ -828,9 +742,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
verts_faces = [(10, 100), (20, 200), (30, 300)]
for (V, F) in verts_faces:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(
V, size=(F, 3), dtype=torch.int64, device=device
)
faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
@ -1006,12 +918,10 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
verts_normals_packed = meshes.verts_normals_packed()
faces_normals_packed = meshes.faces_normals_packed()
self.assertTrue(
list(verts_normals_packed.shape)
== [verts.shape[0] + verts2.shape[0], 3]
list(verts_normals_packed.shape) == [verts.shape[0] + verts2.shape[0], 3]
)
self.assertTrue(
list(faces_normals_packed.shape)
== [faces.shape[0] + faces2.shape[0], 3]
list(faces_normals_packed.shape) == [faces.shape[0] + faces2.shape[0], 3]
)
# Single mesh where two faces share one vertex so the normal is
@ -1079,17 +989,12 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
# with areas > eps=1e-6
nonzero = face_areas_cpu > 1e-6
self.assertClose(
face_normals_cpu[nonzero],
face_normals_cuda.cpu()[nonzero],
atol=1e-6,
face_normals_cpu[nonzero], face_normals_cuda.cpu()[nonzero], atol=1e-6
)
@staticmethod
def compute_packed_with_init(
num_meshes: int = 10,
max_v: int = 100,
max_f: int = 300,
device: str = "cpu",
num_meshes: int = 10, max_v: int = 100, max_f: int = 300, device: str = "cpu"
):
mesh = TestMeshes.init_mesh(num_meshes, max_v, max_f, device=device)
torch.cuda.synchronize()
@ -1102,10 +1007,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
@staticmethod
def compute_padded_with_init(
num_meshes: int = 10,
max_v: int = 100,
max_f: int = 300,
device: str = "cpu",
num_meshes: int = 10, max_v: int = 100, max_f: int = 300, device: str = "cpu"
):
mesh = TestMeshes.init_mesh(num_meshes, max_v, max_f, device=device)
torch.cuda.synchronize()

View File

@ -2,8 +2,8 @@
import unittest
from itertools import product
import torch
import torch
from pytorch3d import _C
@ -33,9 +33,7 @@ class TestNearestNeighborPoints(unittest.TestCase):
# to the cpp or cuda versions of the function
# depending on the input type.
idx1 = _C.nn_points_idx(x, y)
idx2 = TestNearestNeighborPoints.nn_points_idx_naive(
x, y
)
idx2 = TestNearestNeighborPoints.nn_points_idx_naive(x, y)
self.assertTrue(idx1.size(1) == P1)
self.assertTrue(torch.all(idx1 == idx2))

View File

@ -4,14 +4,13 @@ import os
import unittest
from io import StringIO
from pathlib import Path
import torch
import torch
from common_testing import TestCaseMixin
from pytorch3d.io import load_obj, load_objs_as_meshes, save_obj
from pytorch3d.structures import Meshes, Textures, join_meshes
from pytorch3d.utils import torus
from common_testing import TestCaseMixin
class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
def test_load_obj_simple(self):
@ -34,12 +33,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
tex_maps = aux.texture_images
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
],
[[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]],
dtype=torch.float32,
)
expected_faces = torch.tensor(
@ -124,12 +118,8 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
[[0.749279, 0.501284], [0.999110, 0.501077], [0.999455, 0.750380]],
dtype=torch.float32,
)
expected_faces_normals_idx = torch.tensor(
[[1, 1, 1]], dtype=torch.int64
)
expected_faces_textures_idx = torch.tensor(
[[0, 0, 1]], dtype=torch.int64
)
expected_faces_normals_idx = torch.tensor([[1, 1, 1]], dtype=torch.int64)
expected_faces_textures_idx = torch.tensor([[0, 0, 1]], dtype=torch.int64)
self.assertTrue(torch.all(verts == expected_verts))
self.assertTrue(torch.all(faces.verts_idx == expected_faces))
@ -153,23 +143,13 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
]
)
obj_file = StringIO(obj_file)
expected_faces_normals_idx = torch.tensor(
[[0, 0, 1]], dtype=torch.int64
)
expected_faces_normals_idx = torch.tensor([[0, 0, 1]], dtype=torch.int64)
expected_normals = torch.tensor(
[
[0.000000, 0.000000, -1.000000],
[-1.000000, -0.000000, -0.000000],
],
[[0.000000, 0.000000, -1.000000], [-1.000000, -0.000000, -0.000000]],
dtype=torch.float32,
)
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
],
[[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]],
dtype=torch.float32,
)
verts, faces, aux = load_obj(obj_file)
@ -198,19 +178,12 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
]
)
obj_file = StringIO(obj_file)
expected_faces_textures_idx = torch.tensor(
[[0, 0, 1]], dtype=torch.int64
)
expected_faces_textures_idx = torch.tensor([[0, 0, 1]], dtype=torch.int64)
expected_textures = torch.tensor(
[[0.999110, 0.501077], [0.999455, 0.750380]], dtype=torch.float32
)
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
],
[[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]],
dtype=torch.float32,
)
verts, faces, aux = load_obj(obj_file)
@ -257,9 +230,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
with self.assertRaises(ValueError) as err:
load_obj(obj_file)
self.assertTrue(
"Vertex properties are inconsistent" in str(err.exception)
)
self.assertTrue("Vertex properties are inconsistent" in str(err.exception))
def test_load_obj_error_too_many_vertex_properties(self):
obj_file = "\n".join(["f 2/1/1/3"])
@ -267,9 +238,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
with self.assertRaises(ValueError) as err:
load_obj(obj_file)
self.assertTrue(
"Face vertices can ony have 3 properties" in str(err.exception)
)
self.assertTrue("Face vertices can ony have 3 properties" in str(err.exception))
def test_load_obj_error_invalid_vertex_indices(self):
obj_file = "\n".join(
@ -320,7 +289,9 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
verts = torch.FloatTensor([[0.1, 0.2, 0.3, 0.4]]) # (V, 4)
faces = torch.LongTensor([[0, 1, 2]])
save_obj(StringIO(), verts, faces)
expected_message = "Argument 'verts' should either be empty or of shape (num_verts, 3)."
expected_message = (
"Argument 'verts' should either be empty or of shape (num_verts, 3)."
)
self.assertTrue(expected_message, error.exception)
# Invalid faces shape
@ -328,7 +299,9 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
verts = torch.FloatTensor([[0.1, 0.2, 0.3]])
faces = torch.LongTensor([[0, 1, 2, 3]]) # (F, 4)
save_obj(StringIO(), verts, faces)
expected_message = "Argument 'faces' should either be empty or of shape (num_faces, 3)."
expected_message = (
"Argument 'faces' should either be empty or of shape (num_faces, 3)."
)
self.assertTrue(expected_message, error.exception)
def test_save_obj_invalid_indices(self):
@ -395,12 +368,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
def test_save_obj(self):
verts = torch.tensor(
[
[0.01, 0.2, 0.301],
[0.2, 0.03, 0.408],
[0.3, 0.4, 0.05],
[0.6, 0.7, 0.8],
],
[[0.01, 0.2, 0.301], [0.2, 0.03, 0.408], [0.3, 0.4, 0.05], [0.6, 0.7, 0.8]],
dtype=torch.float32,
)
faces = torch.tensor(
@ -424,9 +392,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
self.assertEqual(actual_file, expected_file)
def test_load_mtl(self):
DATA_DIR = (
Path(__file__).resolve().parent.parent / "docs/tutorials/data"
)
DATA_DIR = Path(__file__).resolve().parent.parent / "docs/tutorials/data"
obj_filename = "cow_mesh/cow.obj"
filename = os.path.join(DATA_DIR, obj_filename)
verts, faces, aux = load_obj(filename)
@ -452,19 +418,13 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
# Check all keys and values in dictionary are the same.
for n1, n2 in zip(materials.keys(), expected_materials.keys()):
self.assertTrue(n1 == n2)
for k1, k2 in zip(
materials[n1].keys(), expected_materials[n2].keys()
):
for k1, k2 in zip(materials[n1].keys(), expected_materials[n2].keys()):
self.assertTrue(
torch.allclose(
materials[n1][k1], expected_materials[n2][k2]
)
torch.allclose(materials[n1][k1], expected_materials[n2][k2])
)
def test_load_mtl_noload(self):
DATA_DIR = (
Path(__file__).resolve().parent.parent / "docs/tutorials/data"
)
DATA_DIR = Path(__file__).resolve().parent.parent / "docs/tutorials/data"
obj_filename = "cow_mesh/cow.obj"
filename = os.path.join(DATA_DIR, obj_filename)
verts, faces, aux = load_obj(filename, load_textures=False)
@ -490,12 +450,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
verts, faces, aux = load_obj(obj_file)
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
],
[[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]],
dtype=torch.float32,
)
expected_faces = torch.tensor([[0, 1, 2], [0, 1, 3]], dtype=torch.int64)
@ -514,12 +469,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
verts, faces, aux = load_obj(filename)
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
],
[[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]],
dtype=torch.float32,
)
expected_faces = torch.tensor([[0, 1, 2], [0, 1, 3]], dtype=torch.int64)
@ -533,12 +483,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
verts, faces, aux = load_obj(filename, load_textures=False)
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
],
[[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]],
dtype=torch.float32,
)
expected_faces = torch.tensor([[0, 1, 2], [0, 1, 3]], dtype=torch.int64)
@ -555,12 +500,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
verts, faces, aux = load_obj(filename)
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
],
[[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]],
dtype=torch.float32,
)
expected_faces = torch.tensor([[0, 1, 2], [0, 1, 3]], dtype=torch.int64)
@ -574,12 +514,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
verts, faces, aux = load_obj(filename, load_textures=False)
expected_verts = torch.tensor(
[
[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6],
],
[[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]],
dtype=torch.float32,
)
expected_faces = torch.tensor([[0, 1, 2], [0, 1, 3]], dtype=torch.int64)
@ -607,33 +542,24 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
check_item(mesh.verts_padded(), mesh3.verts_padded())
check_item(mesh.faces_padded(), mesh3.faces_padded())
if mesh.textures is not None:
check_item(mesh.textures.maps_padded(), mesh3.textures.maps_padded())
check_item(
mesh.textures.maps_padded(), mesh3.textures.maps_padded()
mesh.textures.faces_uvs_padded(), mesh3.textures.faces_uvs_padded()
)
check_item(
mesh.textures.faces_uvs_padded(),
mesh3.textures.faces_uvs_padded(),
mesh.textures.verts_uvs_padded(), mesh3.textures.verts_uvs_padded()
)
check_item(
mesh.textures.verts_uvs_padded(),
mesh3.textures.verts_uvs_padded(),
)
check_item(
mesh.textures.verts_rgb_padded(),
mesh3.textures.verts_rgb_padded(),
mesh.textures.verts_rgb_padded(), mesh3.textures.verts_rgb_padded()
)
DATA_DIR = (
Path(__file__).resolve().parent.parent / "docs/tutorials/data"
)
DATA_DIR = Path(__file__).resolve().parent.parent / "docs/tutorials/data"
obj_filename = DATA_DIR / "cow_mesh/cow.obj"
mesh = load_objs_as_meshes([obj_filename])
mesh3 = load_objs_as_meshes([obj_filename, obj_filename, obj_filename])
check_triple(mesh, mesh3)
self.assertTupleEqual(
mesh.textures.maps_padded().shape, (1, 1024, 1024, 3)
)
self.assertTupleEqual(mesh.textures.maps_padded().shape, (1, 1024, 1024, 3))
mesh_notex = load_objs_as_meshes([obj_filename], load_textures=False)
mesh3_notex = load_objs_as_meshes(
@ -655,9 +581,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
teapot_obj = DATA_DIR / "teapot.obj"
mesh_teapot = load_objs_as_meshes([teapot_obj])
teapot_verts, teapot_faces = mesh_teapot.get_mesh_verts_faces(0)
mix_mesh = load_objs_as_meshes(
[obj_filename, teapot_obj], load_textures=False
)
mix_mesh = load_objs_as_meshes([obj_filename, teapot_obj], load_textures=False)
self.assertEqual(len(mix_mesh), 2)
self.assertClose(mix_mesh.verts_list()[0], mesh.verts_list()[0])
self.assertClose(mix_mesh.faces_list()[0], mesh.faces_list()[0])
@ -671,15 +595,11 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
self.assertClose(cow3_tea.faces_list()[3], mesh_teapot.faces_list()[0])
@staticmethod
def _bm_save_obj(
verts: torch.Tensor, faces: torch.Tensor, decimal_places: int
):
def _bm_save_obj(verts: torch.Tensor, faces: torch.Tensor, decimal_places: int):
return lambda: save_obj(StringIO(), verts, faces, decimal_places)
@staticmethod
def _bm_load_obj(
verts: torch.Tensor, faces: torch.Tensor, decimal_places: int
):
def _bm_load_obj(verts: torch.Tensor, faces: torch.Tensor, decimal_places: int):
f = StringIO()
save_obj(f, verts, faces, decimal_places)
s = f.getvalue()

View File

@ -1,13 +1,12 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
import torch
from common_testing import TestCaseMixin
from pytorch3d.ops import packed_to_padded, padded_to_packed
from pytorch3d.structures.meshes import Meshes
from common_testing import TestCaseMixin
class TestPackedToPadded(TestCaseMixin, unittest.TestCase):
def setUp(self) -> None:
@ -25,9 +24,7 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase):
verts_list = []
faces_list = []
for _ in range(num_meshes):
verts = torch.rand(
(num_verts, 3), dtype=torch.float32, device=device
)
verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device)
faces = torch.randint(
num_verts, size=(num_faces, 3), dtype=torch.int64, device=device
)
@ -47,9 +44,7 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase):
if D == 0:
inputs_padded = torch.zeros((num_meshes, max_size), device=device)
else:
inputs_padded = torch.zeros(
(num_meshes, max_size, D), device=device
)
inputs_padded = torch.zeros((num_meshes, max_size, D), device=device)
for m in range(num_meshes):
s = first_idxs[m]
if m == num_meshes - 1:
@ -92,13 +87,9 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase):
max_faces = meshes.num_faces_per_mesh().max().item()
if D == 0:
values = torch.rand(
(faces.shape[0],), device=device, requires_grad=True
)
values = torch.rand((faces.shape[0],), device=device, requires_grad=True)
else:
values = torch.rand(
(faces.shape[0], D), device=device, requires_grad=True
)
values = torch.rand((faces.shape[0], D), device=device, requires_grad=True)
values_torch = values.detach().clone()
values_torch.requires_grad = True
values_padded = packed_to_padded(
@ -120,10 +111,7 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase):
values_padded_torch.backward(grad_inputs)
grad_outputs_torch1 = values_torch.grad
grad_outputs_torch2 = TestPackedToPadded.padded_to_packed_python(
grad_inputs,
mesh_to_faces_packed_first_idx,
values.size(0),
device=device,
grad_inputs, mesh_to_faces_packed_first_idx, values.size(0), device=device
)
self.assertClose(grad_outputs, grad_outputs_torch1)
self.assertClose(grad_outputs, grad_outputs_torch2)
@ -165,9 +153,7 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase):
values_torch = values.detach().clone()
values_torch.requires_grad = True
values_packed = padded_to_packed(
values,
mesh_to_faces_packed_first_idx,
num_faces_per_mesh.sum().item(),
values, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item()
)
values_packed_torch = TestPackedToPadded.padded_to_packed_python(
values_torch,
@ -180,9 +166,7 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase):
# check backward
if D == 0:
grad_inputs = torch.rand(
(num_faces_per_mesh.sum().item()), device=device
)
grad_inputs = torch.rand((num_faces_per_mesh.sum().item()), device=device)
else:
grad_inputs = torch.rand(
(num_faces_per_mesh.sum().item(), D), device=device
@ -192,10 +176,7 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase):
values_packed_torch.backward(grad_inputs)
grad_outputs_torch1 = values_torch.grad
grad_outputs_torch2 = TestPackedToPadded.packed_to_padded_python(
grad_inputs,
mesh_to_faces_packed_first_idx,
values.size(1),
device=device,
grad_inputs, mesh_to_faces_packed_first_idx, values.size(1), device=device
)
self.assertClose(grad_outputs, grad_outputs_torch1)
self.assertClose(grad_outputs, grad_outputs_torch2)
@ -219,34 +200,24 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase):
self._test_padded_to_packed_helper(16, "cuda:0")
def test_invalid_inputs_shapes(self, device="cuda:0"):
with self.assertRaisesRegex(
ValueError, "input can only be 2-dimensional."
):
with self.assertRaisesRegex(ValueError, "input can only be 2-dimensional."):
values = torch.rand((100, 50, 2), device=device)
first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device)
packed_to_padded(values, first_idxs, 100)
with self.assertRaisesRegex(
ValueError, "input can only be 3-dimensional."
):
with self.assertRaisesRegex(ValueError, "input can only be 3-dimensional."):
values = torch.rand((100,), device=device)
first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device)
padded_to_packed(values, first_idxs, 20)
with self.assertRaisesRegex(
ValueError, "input can only be 3-dimensional."
):
with self.assertRaisesRegex(ValueError, "input can only be 3-dimensional."):
values = torch.rand((100, 50, 2, 2), device=device)
first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device)
padded_to_packed(values, first_idxs, 20)
@staticmethod
def packed_to_padded_with_init(
num_meshes: int,
num_verts: int,
num_faces: int,
num_d: int,
device: str = "cpu",
num_meshes: int, num_verts: int, num_faces: int, num_d: int, device: str = "cpu"
):
meshes = TestPackedToPadded.init_meshes(
num_meshes, num_verts, num_faces, device
@ -268,11 +239,7 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase):
@staticmethod
def packed_to_padded_with_init_torch(
num_meshes: int,
num_verts: int,
num_faces: int,
num_d: int,
device: str = "cpu",
num_meshes: int, num_verts: int, num_faces: int, num_d: int, device: str = "cpu"
):
meshes = TestPackedToPadded.init_meshes(
num_meshes, num_verts, num_faces, device

View File

@ -3,13 +3,12 @@
import struct
import unittest
from io import BytesIO, StringIO
import torch
import torch
from common_testing import TestCaseMixin
from pytorch3d.io.ply_io import _load_ply_raw, load_ply, save_ply
from pytorch3d.utils import torus
from common_testing import TestCaseMixin
class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
def test_raw_load_simple_ascii(self):
@ -155,14 +154,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
def test_load_simple_binary(self):
for big_endian in [True, False]:
verts = (
"0 0 0 "
"0 0 1 "
"0 1 1 "
"0 1 0 "
"1 0 0 "
"1 0 1 "
"1 1 1 "
"1 1 0"
"0 0 0 " "0 0 1 " "0 1 1 " "0 1 0 " "1 0 0 " "1 0 1 " "1 1 1 " "1 1 0"
).split()
faces = (
"4 0 1 2 3 "
@ -176,9 +168,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
"3 4 5 1"
).split()
short_one = b"\00\01" if big_endian else b"\01\00"
mixed_data = b"\00\00" b"\03\03" + (
short_one + b"\00\01\01\01" b"\00\02"
)
mixed_data = b"\00\00" b"\03\03" + (short_one + b"\00\01\01\01" b"\00\02")
minus_one_data = b"\xff" * 14
endian_char = ">" if big_endian else "<"
format = (
@ -306,9 +296,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
lines2 = lines.copy()
lines2[8] = "1 2"
with self.assertRaisesRegex(
ValueError, "Inconsistent data for vertex."
):
with self.assertRaisesRegex(ValueError, "Inconsistent data for vertex."):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines[:-1]
@ -344,9 +332,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
lines2 = lines.copy()
lines2.insert(4, "element bad 1")
with self.assertRaisesRegex(
ValueError, "Found an element with no properties."
):
with self.assertRaisesRegex(ValueError, "Found an element with no properties."):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2 = lines.copy()
@ -369,25 +355,19 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
lines2 = lines.copy()
lines2.insert(4, "property double y")
with self.assertRaisesRegex(
ValueError, "Too little data for an element."
):
with self.assertRaisesRegex(ValueError, "Too little data for an element."):
_load_ply_raw(StringIO("\n".join(lines2)))
lines2[-2] = "3.3 4.2"
_load_ply_raw(StringIO("\n".join(lines2)))
lines2[-2] = "3.3 4.3 2"
with self.assertRaisesRegex(
ValueError, "Too much data for an element."
):
with self.assertRaisesRegex(ValueError, "Too much data for an element."):
_load_ply_raw(StringIO("\n".join(lines2)))
# Now make the ply file actually be readable as a Mesh
with self.assertRaisesRegex(
ValueError, "The ply file has no face element."
):
with self.assertRaisesRegex(ValueError, "The ply file has no face element."):
load_ply(StringIO("\n".join(lines)))
lines2 = lines.copy()
@ -398,9 +378,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
lines2.insert(5, "property float z")
lines2.insert(5, "property float y")
lines2[-2] = "0 0 0"
with self.assertRaisesRegex(
ValueError, "Faces must have at least 3 vertices."
):
with self.assertRaisesRegex(ValueError, "Faces must have at least 3 vertices."):
load_ply(StringIO("\n".join(lines2)))
# Good one
@ -408,17 +386,11 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
load_ply(StringIO("\n".join(lines2)))
@staticmethod
def _bm_save_ply(
verts: torch.Tensor, faces: torch.Tensor, decimal_places: int
):
return lambda: save_ply(
StringIO(), verts, faces, decimal_places=decimal_places
)
def _bm_save_ply(verts: torch.Tensor, faces: torch.Tensor, decimal_places: int):
return lambda: save_ply(StringIO(), verts, faces, decimal_places=decimal_places)
@staticmethod
def _bm_load_ply(
verts: torch.Tensor, faces: torch.Tensor, decimal_places: int
):
def _bm_load_ply(verts: torch.Tensor, faces: torch.Tensor, decimal_places: int):
f = StringIO()
save_ply(f, verts, faces, decimal_places)
s = f.getvalue()

View File

@ -1,13 +1,12 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import numpy as np
import torch
from pytorch3d.structures.pointclouds import Pointclouds
from common_testing import TestCaseMixin
from pytorch3d.structures.pointclouds import Pointclouds
class TestPointclouds(TestCaseMixin, unittest.TestCase):
@ -52,13 +51,11 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
normals_list, features_list = None, None
if with_normals:
normals_list = [
torch.rand((i, 3), device=device, dtype=torch.float32)
for i in p
torch.rand((i, 3), device=device, dtype=torch.float32) for i in p
]
if with_features:
features_list = [
torch.rand((i, channels), device=device, dtype=torch.float32)
for i in p
torch.rand((i, channels), device=device, dtype=torch.float32) for i in p
]
if lists_to_tensors:
@ -68,9 +65,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
if with_features:
features_list = torch.stack(features_list)
return Pointclouds(
points_list, normals=normals_list, features=features_list
)
return Pointclouds(points_list, normals=normals_list, features=features_list)
def test_simple(self):
device = torch.device("cuda:0")
@ -81,12 +76,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
device=device,
),
torch.tensor(
[
[0.1, 0.3, 0.3],
[0.6, 0.7, 0.8],
[0.2, 0.3, 0.4],
[0.1, 0.5, 0.3],
],
[[0.1, 0.3, 0.3], [0.6, 0.7, 0.8], [0.2, 0.3, 0.4], [0.1, 0.5, 0.3]],
dtype=torch.float32,
device=device,
),
@ -111,9 +101,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
self.assertClose(
clouds.cloud_to_packed_first_idx().cpu(), torch.tensor([0, 3, 7])
)
self.assertClose(
clouds.num_points_per_cloud().cpu(), torch.tensor([3, 4, 5])
)
self.assertClose(clouds.num_points_per_cloud().cpu(), torch.tensor([3, 4, 5]))
self.assertClose(
clouds.padded_to_packed_idx().cpu(),
torch.tensor([0, 1, 2, 5, 6, 7, 8, 10, 11, 12, 13, 14]),
@ -129,11 +117,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
"points_padded",
"padded_to_packed_idx",
]
public_normals_getters = [
"normals_list",
"normals_packed",
"normals_padded",
]
public_normals_getters = ["normals_list", "normals_packed", "normals_padded"]
public_features_getters = [
"features_list",
"features_packed",
@ -147,17 +131,13 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
points_data = [torch.zeros((max_len, 3)).uniform_() for i in lengths]
normals_data = [torch.zeros((max_len, 3)).uniform_() for i in lengths]
features_data = [torch.zeros((max_len, C)).uniform_() for i in lengths]
for length, p, n, f in zip(
lengths, points_data, normals_data, features_data
):
for length, p, n, f in zip(lengths, points_data, normals_data, features_data):
p[length:] = 0.0
n[length:] = 0.0
f[length:] = 0.0
points_list = [d[:length] for length, d in zip(lengths, points_data)]
normals_list = [d[:length] for length, d in zip(lengths, normals_data)]
features_list = [
d[:length] for length, d in zip(lengths, features_data)
]
features_list = [d[:length] for length, d in zip(lengths, features_data)]
points_packed = torch.cat(points_data)
normals_packed = torch.cat(normals_data)
features_packed = torch.cat(features_data)
@ -173,13 +153,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
("emptylist_emptylist_emptylist", [], [], []),
]
false_cases_inputs = [
(
"list_packed",
points_list,
normals_packed,
features_packed,
ValueError,
),
("list_packed", points_list, normals_packed, features_packed, ValueError),
("packed_0", points_packed, None, None, ValueError),
]
@ -230,15 +204,11 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
self.assertIsNone(features_padded)
for n in range(N):
p = points_list[n].shape[0]
self.assertClose(
points_padded[n, :p, :], points_list[n]
)
self.assertClose(points_padded[n, :p, :], points_list[n])
if with_normals:
norms = normals_list[n].shape[0]
self.assertEqual(p, norms)
self.assertClose(
normals_padded[n, :p, :], normals_list[n]
)
self.assertClose(normals_padded[n, :p, :], normals_list[n])
if with_features:
f = features_list[n].shape[0]
self.assertEqual(p, f)
@ -248,9 +218,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
if points_padded.shape[1] > p:
self.assertTrue(points_padded[n, p:, :].eq(0).all())
if with_features:
self.assertTrue(
features_padded[n, p:, :].eq(0).all()
)
self.assertTrue(features_padded[n, p:, :].eq(0).all())
self.assertEqual(points_per_cloud[n], p)
# Check compute packed.
@ -272,17 +240,13 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
)
if with_normals:
self.assertClose(
normals_packed[cur : cur + p, :],
normals_list[n],
normals_packed[cur : cur + p, :], normals_list[n]
)
if with_features:
self.assertClose(
features_packed[cur : cur + p, :],
features_list[n],
features_packed[cur : cur + p, :], features_list[n]
)
self.assertTrue(
packed_to_cloud[cur : cur + p].eq(n).all()
)
self.assertTrue(packed_to_cloud[cur : cur + p].eq(n).all())
self.assertTrue(cloud_to_packed[n] == cur)
cur += p
@ -312,9 +276,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
)[0]
points = torch.rand((p, 3), dtype=torch.float32, device=device)
normals = torch.rand((p, 3), dtype=torch.float32, device=device)
features = torch.rand(
(p, C), dtype=torch.float32, device=device
)
features = torch.rand((p, C), dtype=torch.float32, device=device)
else:
points = torch.tensor([], dtype=torch.float32, device=device)
normals = torch.tensor([], dtype=torch.float32, device=device)
@ -331,9 +293,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
if with_features:
this_features = features_list
clouds = Pointclouds(
points=points_list,
normals=this_normals,
features=this_features,
points=points_list, normals=this_normals, features=this_features
)
points_padded = clouds.points_padded()
normals_padded = clouds.normals_padded()
@ -346,13 +306,9 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
for n in range(N):
p = len(points_list[n])
if p > 0:
self.assertClose(
points_padded[n, :p, :], points_list[n]
)
self.assertClose(points_padded[n, :p, :], points_list[n])
if with_normals:
self.assertClose(
normals_padded[n, :p, :], normals_list[n]
)
self.assertClose(normals_padded[n, :p, :], normals_list[n])
if with_features:
self.assertClose(
features_padded[n, :p, :], features_list[n]
@ -360,13 +316,9 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
if points_padded.shape[1] > p:
self.assertTrue(points_padded[n, p:, :].eq(0).all())
if with_normals:
self.assertTrue(
normals_padded[n, p:, :].eq(0).all()
)
self.assertTrue(normals_padded[n, p:, :].eq(0).all())
if with_features:
self.assertTrue(
features_padded[n, p:, :].eq(0).all()
)
self.assertTrue(features_padded[n, p:, :].eq(0).all())
self.assertTrue(points_per_cloud[n] == p)
def test_clone_list(self):
@ -379,12 +331,8 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
new_clouds = clouds.clone()
# Check cloned and original objects do not share tensors.
self.assertSeparate(
new_clouds.points_list()[0], clouds.points_list()[0]
)
self.assertSeparate(
new_clouds.normals_list()[0], clouds.normals_list()[0]
)
self.assertSeparate(new_clouds.points_list()[0], clouds.points_list()[0])
self.assertSeparate(new_clouds.normals_list()[0], clouds.normals_list()[0])
self.assertSeparate(
new_clouds.features_list()[0], clouds.features_list()[0]
)
@ -412,12 +360,8 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
new_clouds = clouds.clone()
# Check cloned and original objects do not share tensors.
self.assertSeparate(
new_clouds.points_list()[0], clouds.points_list()[0]
)
self.assertSeparate(
new_clouds.normals_list()[0], clouds.normals_list()[0]
)
self.assertSeparate(new_clouds.points_list()[0], clouds.points_list()[0])
self.assertSeparate(new_clouds.normals_list()[0], clouds.normals_list()[0])
self.assertSeparate(
new_clouds.features_list()[0], clouds.features_list()[0]
)
@ -442,9 +386,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
for i in range(N):
self.assertClose(cloud1.points_list()[i], cloud2.points_list()[i])
self.assertClose(cloud1.normals_list()[i], cloud2.normals_list()[i])
self.assertClose(
cloud1.features_list()[i], cloud2.features_list()[i]
)
self.assertClose(cloud1.features_list()[i], cloud2.features_list()[i])
has_normals = cloud1.normals_list() is not None
self.assertTrue(has_normals == (cloud2.normals_list() is not None))
has_features = cloud1.features_list() is not None
@ -459,22 +401,13 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
if has_features:
self.assertClose(cloud1.features_padded(), cloud2.features_padded())
self.assertClose(cloud1.features_packed(), cloud2.features_packed())
self.assertClose(cloud1.packed_to_cloud_idx(), cloud2.packed_to_cloud_idx())
self.assertClose(
cloud1.packed_to_cloud_idx(), cloud2.packed_to_cloud_idx()
)
self.assertClose(
cloud1.cloud_to_packed_first_idx(),
cloud2.cloud_to_packed_first_idx(),
)
self.assertClose(
cloud1.num_points_per_cloud(), cloud2.num_points_per_cloud()
)
self.assertClose(
cloud1.packed_to_cloud_idx(), cloud2.packed_to_cloud_idx()
)
self.assertClose(
cloud1.padded_to_packed_idx(), cloud2.padded_to_packed_idx()
cloud1.cloud_to_packed_first_idx(), cloud2.cloud_to_packed_first_idx()
)
self.assertClose(cloud1.num_points_per_cloud(), cloud2.num_points_per_cloud())
self.assertClose(cloud1.packed_to_cloud_idx(), cloud2.packed_to_cloud_idx())
self.assertClose(cloud1.padded_to_packed_idx(), cloud2.padded_to_packed_idx())
self.assertTrue(all(cloud1.valid == cloud2.valid))
self.assertTrue(cloud1.equisized == cloud2.equisized)
@ -482,9 +415,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
def naive_offset(clouds, offsets_packed):
new_points_packed = clouds.points_packed() + offsets_packed
new_points_list = list(
new_points_packed.split(
clouds.num_points_per_cloud().tolist(), 0
)
new_points_packed.split(clouds.num_points_per_cloud().tolist(), 0)
)
return Pointclouds(
points=new_points_list,
@ -502,9 +433,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
clouds._compute_padded()
clouds.padded_to_packed_idx()
deform = torch.rand(
(all_p, 3), dtype=torch.float32, device=clouds.device
)
deform = torch.rand((all_p, 3), dtype=torch.float32, device=clouds.device)
new_clouds_naive = naive_offset(clouds, deform)
new_clouds = clouds.offset(deform)
@ -521,8 +450,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
clouds.normals_list()[i], new_clouds_naive.normals_list()[i]
)
self.assertClose(
clouds.features_list()[i],
new_clouds_naive.features_list()[i],
clouds.features_list()[i], new_clouds_naive.features_list()[i]
)
self.assertCloudsEqual(new_clouds, new_clouds_naive)
@ -550,15 +478,13 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
new_clouds = clouds.scale(scales)
for i in range(N):
self.assertClose(
scales[i] * clouds.points_list()[i],
new_clouds.points_list()[i],
scales[i] * clouds.points_list()[i], new_clouds.points_list()[i]
)
self.assertClose(
clouds.normals_list()[i], new_clouds_naive.normals_list()[i]
)
self.assertClose(
clouds.features_list()[i],
new_clouds_naive.features_list()[i],
clouds.features_list()[i], new_clouds_naive.features_list()[i]
)
self.assertCloudsEqual(new_clouds, new_clouds_naive)
@ -576,20 +502,15 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
for i in range(len(clouds)):
for n in range(N):
self.assertClose(
clouds.points_list()[i],
new_clouds.points_list()[i * N + n],
clouds.points_list()[i], new_clouds.points_list()[i * N + n]
)
self.assertClose(
clouds.normals_list()[i],
new_clouds.normals_list()[i * N + n],
clouds.normals_list()[i], new_clouds.normals_list()[i * N + n]
)
self.assertClose(
clouds.features_list()[i],
new_clouds.features_list()[i * N + n],
)
self.assertTrue(
clouds.valid[i] == new_clouds.valid[i * N + n]
clouds.features_list()[i], new_clouds.features_list()[i * N + n]
)
self.assertTrue(clouds.valid[i] == new_clouds.valid[i * N + n])
self.assertAllSeparate(
clouds.points_list()
+ new_clouds.points_list()
@ -627,8 +548,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
"padded_to_packed_idx",
]:
self.assertClose(
getattr(new_cloud, attrib)().cpu(),
getattr(cloud, attrib)().cpu(),
getattr(new_cloud, attrib)().cpu(), getattr(cloud, attrib)().cpu()
)
for i in range(len(cloud)):
self.assertClose(
@ -638,8 +558,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
cloud.normals_list()[i].cpu(), new_cloud.normals_list()[i].cpu()
)
self.assertClose(
cloud.features_list()[i].cpu(),
new_cloud.features_list()[i].cpu(),
cloud.features_list()[i].cpu(), new_cloud.features_list()[i].cpu()
)
self.assertTrue(all(cloud.valid.cpu() == new_cloud.valid.cpu()))
self.assertTrue(cloud.equisized == new_cloud.equisized)
@ -666,8 +585,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
"padded_to_packed_idx",
]:
self.assertClose(
getattr(new_cloud, attrib)().cpu(),
getattr(cloud, attrib)().cpu(),
getattr(new_cloud, attrib)().cpu(), getattr(cloud, attrib)().cpu()
)
for i in range(len(cloud)):
self.assertClose(
@ -677,8 +595,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
cloud.normals_list()[i].cpu(), new_cloud.normals_list()[i].cpu()
)
self.assertClose(
cloud.features_list()[i].cpu(),
new_cloud.features_list()[i].cpu(),
cloud.features_list()[i].cpu(), new_cloud.features_list()[i].cpu()
)
self.assertTrue(all(cloud.valid.cpu() == new_cloud.valid.cpu()))
self.assertTrue(cloud.equisized == new_cloud.equisized)
@ -698,11 +615,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
self.assertEqual(len(split_clouds[1]), 3)
self.assertTrue(
split_clouds[1].points_list()
== [
clouds.get_cloud(2)[0],
clouds.get_cloud(3)[0],
clouds.get_cloud(4)[0],
]
== [clouds.get_cloud(2)[0], clouds.get_cloud(3)[0], clouds.get_cloud(4)[0]]
)
split_sizes = [2, 0.3]
@ -751,9 +664,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
points_padded = clouds.points_padded()
points_padded_flat = points_padded.view(-1, 3)
self.assertClose(
points_padded_flat[padded_to_packed_idx], points_packed
)
self.assertClose(points_padded_flat[padded_to_packed_idx], points_packed)
idx = padded_to_packed_idx.view(-1, 1).expand(-1, 3)
self.assertClose(points_padded_flat.gather(0, idx), points_packed)
@ -765,16 +676,13 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
def check_equal(selected, indices):
for selectedIdx, index in indices:
self.assertClose(
selected.points_list()[selectedIdx],
clouds.points_list()[index],
selected.points_list()[selectedIdx], clouds.points_list()[index]
)
self.assertClose(
selected.normals_list()[selectedIdx],
clouds.normals_list()[index],
selected.normals_list()[selectedIdx], clouds.normals_list()[index]
)
self.assertClose(
selected.features_list()[selectedIdx],
clouds.features_list()[index],
selected.features_list()[selectedIdx], clouds.features_list()[index]
)
# int index
@ -820,11 +728,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
for with_normfeat in (True, False):
for with_new_normfeat in (True, False):
clouds = self.init_cloud(
N,
P,
C,
with_normals=with_normfeat,
with_features=with_normfeat,
N, P, C, with_normals=with_normfeat, with_features=with_normfeat
)
num_points_per_cloud = clouds.num_points_per_cloud()
@ -843,8 +747,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
clouds.points_padded().shape, device=clouds.device
)
new_normals_list = [
new_normals[i, : num_points_per_cloud[i]]
for i in range(N)
new_normals[i, : num_points_per_cloud[i]] for i in range(N)
]
feat_shape = [
clouds.points_padded().shape[0],
@ -853,14 +756,11 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
]
new_features = torch.rand(feat_shape, device=clouds.device)
new_features_list = [
new_features[i, : num_points_per_cloud[i]]
for i in range(N)
new_features[i, : num_points_per_cloud[i]] for i in range(N)
]
# update
new_clouds = clouds.update_padded(
new_points, new_normals, new_features
)
new_clouds = clouds.update_padded(new_points, new_normals, new_features)
self.assertIsNone(new_clouds._points_list)
self.assertIsNone(new_clouds._points_packed)
@ -868,13 +768,9 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
self.assertTrue(all(new_clouds.valid == clouds.valid))
self.assertClose(new_clouds.points_padded(), new_points)
self.assertClose(
new_clouds.points_packed(), torch.cat(new_points_list)
)
self.assertClose(new_clouds.points_packed(), torch.cat(new_points_list))
for i in range(N):
self.assertClose(
new_clouds.points_list()[i], new_points_list[i]
)
self.assertClose(new_clouds.points_list()[i], new_points_list[i])
if with_new_normfeat:
for i in range(N):
@ -890,27 +786,22 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
)
self.assertClose(new_clouds.features_padded(), new_features)
self.assertClose(
new_clouds.features_packed(),
torch.cat(new_features_list),
new_clouds.features_packed(), torch.cat(new_features_list)
)
else:
if with_normfeat:
for i in range(N):
self.assertClose(
new_clouds.normals_list()[i],
clouds.normals_list()[i],
new_clouds.normals_list()[i], clouds.normals_list()[i]
)
self.assertClose(
new_clouds.features_list()[i],
clouds.features_list()[i],
new_clouds.features_list()[i], clouds.features_list()[i]
)
self.assertNotSeparate(
new_clouds.normals_list()[i],
clouds.normals_list()[i],
new_clouds.normals_list()[i], clouds.normals_list()[i]
)
self.assertNotSeparate(
new_clouds.features_list()[i],
clouds.features_list()[i],
new_clouds.features_list()[i], clouds.features_list()[i]
)
self.assertClose(
@ -920,19 +811,16 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
new_clouds.normals_packed(), clouds.normals_packed()
)
self.assertClose(
new_clouds.features_padded(),
clouds.features_padded(),
new_clouds.features_padded(), clouds.features_padded()
)
self.assertClose(
new_clouds.features_packed(),
clouds.features_packed(),
new_clouds.features_packed(), clouds.features_packed()
)
self.assertNotSeparate(
new_clouds.normals_padded(), clouds.normals_padded()
)
self.assertNotSeparate(
new_clouds.features_padded(),
clouds.features_padded(),
new_clouds.features_padded(), clouds.features_padded()
)
else:
self.assertIsNone(new_clouds.normals_list())

View File

@ -2,8 +2,9 @@
import functools
import unittest
import torch
import torch
from common_testing import TestCaseMixin
from pytorch3d import _C
from pytorch3d.renderer.mesh.rasterize_meshes import (
rasterize_meshes,
@ -12,20 +13,14 @@ from pytorch3d.renderer.mesh.rasterize_meshes import (
from pytorch3d.structures import Meshes
from pytorch3d.utils import ico_sphere
from common_testing import TestCaseMixin
class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
def test_simple_python(self):
device = torch.device("cpu")
self._simple_triangle_raster(
rasterize_meshes_python, device, bin_size=-1
)
self._simple_triangle_raster(rasterize_meshes_python, device, bin_size=-1)
self._simple_blurry_raster(rasterize_meshes_python, device, bin_size=-1)
self._test_behind_camera(rasterize_meshes_python, device, bin_size=-1)
self._test_perspective_correct(
rasterize_meshes_python, device, bin_size=-1
)
self._test_perspective_correct(rasterize_meshes_python, device, bin_size=-1)
def test_simple_cpu_naive(self):
device = torch.device("cpu")
@ -350,9 +345,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
fn1 = functools.partial(rasterize_meshes, meshes1, **kwargs)
fn2 = functools.partial(rasterize_meshes_python, meshes2, **kwargs)
args = ()
self._compare_impls(
fn1, fn2, args, args, verts1, verts2, compare_grads=True
)
self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True)
def test_cpp_vs_cuda_perspective_correct(self):
meshes = ico_sphere(2, device=torch.device("cpu"))
@ -367,9 +360,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
fn1 = functools.partial(rasterize_meshes, meshes1, **kwargs)
fn2 = functools.partial(rasterize_meshes, meshes2, bin_size=0, **kwargs)
args = ()
self._compare_impls(
fn1, fn2, args, args, verts1, verts2, compare_grads=True
)
self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True)
def test_cuda_naive_vs_binned_perspective_correct(self):
meshes = ico_sphere(2, device=torch.device("cuda"))
@ -384,9 +375,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
fn1 = functools.partial(rasterize_meshes, meshes1, bin_size=0, **kwargs)
fn2 = functools.partial(rasterize_meshes, meshes2, bin_size=8, **kwargs)
args = ()
self._compare_impls(
fn1, fn2, args, args, verts1, verts2, compare_grads=True
)
self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True)
def _compare_impls(
self,
@ -433,9 +422,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
grad_verts2 = grad_var2.grad.data.clone().cpu()
self.assertClose(grad_verts1, grad_verts2, rtol=1e-3)
def _test_perspective_correct(
self, rasterize_meshes_fn, device, bin_size=None
):
def _test_perspective_correct(self, rasterize_meshes_fn, device, bin_size=None):
# fmt: off
verts = torch.tensor([
[-0.4, -0.4, 10], # noqa: E241, E201
@ -542,12 +529,8 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
zbuf_f_bary = w0_f * z0 + w1_f * z1 + w2_f * z2
zbuf_t_bary = w0_t * z0 + w1_t * z1 + w2_t * z2
mask = idx_expected != -1
zbuf_f_bary_diff = (
(zbuf_f_bary[mask] - zbuf_f_expected[mask]).abs().max()
)
zbuf_t_bary_diff = (
(zbuf_t_bary[mask] - zbuf_t_expected[mask]).abs().max()
)
zbuf_f_bary_diff = (zbuf_f_bary[mask] - zbuf_f_expected[mask]).abs().max()
zbuf_t_bary_diff = (zbuf_t_bary[mask] - zbuf_t_expected[mask]).abs().max()
self.assertLess(zbuf_f_bary_diff, 1e-4)
self.assertLess(zbuf_t_bary_diff, 1e-4)
@ -719,9 +702,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
# k = 1, second closest point.
expected_p2face_k1 = expected_p2face_k0.clone()
expected_p2face_k1[0, :] = (
torch.ones_like(expected_p2face_k1[0, :]) * -1
)
expected_p2face_k1[0, :] = torch.ones_like(expected_p2face_k1[0, :]) * -1
# fmt: off
expected_p2face_k1[1, :] = torch.tensor(
@ -763,9 +744,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
# Coordinate conventions +Y up, +Z in, +X left
if bin_size == -1:
# simple python, no bin_size
p2face, zbuf, bary, pix_dists = raster_fn(
meshes, image_size, 0.0, 2
)
p2face, zbuf, bary, pix_dists = raster_fn(meshes, image_size, 0.0, 2)
else:
p2face, zbuf, bary, pix_dists = raster_fn(
meshes, image_size, 0.0, 2, bin_size
@ -914,9 +893,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
# Expected faces using axes convention +Y down, + X right, + Z in
bin_faces_expected = (
torch.ones(
(1, 2, 2, max_faces_per_bin), dtype=torch.int32, device=device
)
torch.ones((1, 2, 2, max_faces_per_bin), dtype=torch.int32, device=device)
* -1
)
bin_faces_expected[0, 0, 0, 0] = torch.tensor([1])
@ -979,12 +956,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
def rasterize():
rasterize_meshes(
meshes_batch,
image_size,
blur_radius,
8,
bin_size,
max_faces_per_bin,
meshes_batch, image_size, blur_radius, 8, bin_size, max_faces_per_bin
)
torch.cuda.synchronize()

View File

@ -1,10 +1,11 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
import torch
import numpy as np
import torch
from common_testing import TestCaseMixin
from pytorch3d import _C
from pytorch3d.renderer.points.rasterize_points import (
rasterize_points,
@ -12,8 +13,6 @@ from pytorch3d.renderer.points.rasterize_points import (
)
from pytorch3d.structures.pointclouds import Pointclouds
from common_testing import TestCaseMixin
class TestRasterizePoints(TestCaseMixin, unittest.TestCase):
def test_python_simple_cpu(self):
@ -38,9 +37,7 @@ class TestRasterizePoints(TestCaseMixin, unittest.TestCase):
self._test_behind_camera(rasterize_points, torch.device("cpu"))
def test_cuda_behind_camera(self):
self._test_behind_camera(
rasterize_points, torch.device("cuda"), bin_size=0
)
self._test_behind_camera(rasterize_points, torch.device("cuda"), bin_size=0)
def test_cpp_vs_naive_vs_binned(self):
# Make sure that the backward pass runs for all pathways
@ -167,20 +164,8 @@ class TestRasterizePoints(TestCaseMixin, unittest.TestCase):
points_cuda = points_cpu.cuda().detach().requires_grad_(True)
pointclouds_cpu = Pointclouds(points=points_cpu)
pointclouds_cuda = Pointclouds(points=points_cuda)
args_cpu = (
pointclouds_cpu,
image_size,
radius,
points_per_pixel,
bin_size,
)
args_cuda = (
pointclouds_cuda,
image_size,
radius,
points_per_pixel,
bin_size,
)
args_cpu = (pointclouds_cpu, image_size, radius, points_per_pixel, bin_size)
args_cuda = (pointclouds_cuda, image_size, radius, points_per_pixel, bin_size)
self._compare_impls(
rasterize_points,
rasterize_points,
@ -332,9 +317,7 @@ class TestRasterizePoints(TestCaseMixin, unittest.TestCase):
], device=device)
# fmt: on
dists1_expected = torch.zeros(
(5, 5, 2), dtype=torch.float32, device=device
)
dists1_expected = torch.zeros((5, 5, 2), dtype=torch.float32, device=device)
# fmt: off
dists1_expected[:, :, 0] = torch.tensor([
[-1.00, -1.00, 0.16, -1.00, -1.00], # noqa: E241

View File

@ -1,22 +1,17 @@
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import unittest
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from pytorch3d.renderer.cameras import (
OpenGLPerspectiveCameras,
look_at_view_transform,
)
from pytorch3d.renderer.mesh.rasterizer import (
MeshRasterizer,
RasterizationSettings,
)
from pytorch3d.renderer.cameras import OpenGLPerspectiveCameras, look_at_view_transform
from pytorch3d.renderer.mesh.rasterizer import MeshRasterizer, RasterizationSettings
from pytorch3d.utils.ico_sphere import ico_sphere
DATA_DIR = Path(__file__).resolve().parent / "data"
DEBUG = False # Set DEBUG to true to save outputs from the tests.
@ -52,9 +47,7 @@ class TestMeshRasterizer(unittest.TestCase):
)
# Init rasterizer
rasterizer = MeshRasterizer(
cameras=cameras, raster_settings=raster_settings
)
rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
####################################
# 1. Test rasterizing a single mesh

Some files were not shown because too many files have changed in this diff Show More