From d57daa6f855f581268d875a0ad93c570871f457b Mon Sep 17 00:00:00 2001 From: Patrick Labatut Date: Sun, 29 Mar 2020 14:46:33 -0700 Subject: [PATCH] Address black + isort fbsource linter warnings Summary: Address black + isort fbsource linter warnings from D20558374 (previous diff) Reviewed By: nikhilaravi Differential Revision: D20558373 fbshipit-source-id: d3607de4a01fb24c0d5269634563a7914bddf1c8 --- .circleci/regenerate.py | 9 +- docs/conf.py | 9 +- ...ints.ipynb => render_colored_points.ipynb} | 4 +- docs/tutorials/utils/__init__.py | 6 +- docs/tutorials/utils/plot_image_grid.py | 8 +- pytorch3d/io/__init__.py | 1 + pytorch3d/io/obj_io.py | 27 +- pytorch3d/io/ply_io.py | 49 +--- pytorch3d/loss/__init__.py | 1 + pytorch3d/loss/chamfer.py | 22 +- pytorch3d/loss/mesh_normal_consistency.py | 10 +- pytorch3d/ops/__init__.py | 1 + pytorch3d/ops/cubify.py | 5 +- pytorch3d/ops/graph_conv.py | 7 +- pytorch3d/ops/knn.py | 1 - pytorch3d/ops/mesh_face_areas_normals.py | 3 +- pytorch3d/ops/nearest_neighbor_points.py | 5 +- pytorch3d/ops/packed_to_padded.py | 3 +- pytorch3d/ops/sample_points_from_meshes.py | 20 +- pytorch3d/ops/subdivide_meshes.py | 33 +-- pytorch3d/ops/vert_align.py | 4 +- pytorch3d/renderer/__init__.py | 1 + pytorch3d/renderer/blending.py | 16 +- pytorch3d/renderer/cameras.py | 67 ++--- pytorch3d/renderer/compositing.py | 23 +- pytorch3d/renderer/lighting.py | 19 +- pytorch3d/renderer/mesh/__init__.py | 6 +- pytorch3d/renderer/mesh/rasterize_meshes.py | 17 +- pytorch3d/renderer/mesh/rasterizer.py | 6 +- pytorch3d/renderer/mesh/renderer.py | 9 +- pytorch3d/renderer/mesh/shader.py | 86 ++---- pytorch3d/renderer/mesh/shading.py | 9 +- pytorch3d/renderer/mesh/texturing.py | 9 +- pytorch3d/renderer/mesh/utils.py | 4 +- pytorch3d/renderer/points/__init__.py | 1 + pytorch3d/renderer/points/compositor.py | 17 +- pytorch3d/renderer/points/rasterize_points.py | 11 +- pytorch3d/renderer/points/rasterizer.py | 1 + pytorch3d/renderer/points/renderer.py | 1 + pytorch3d/renderer/utils.py | 16 +- pytorch3d/structures/__init__.py | 8 +- pytorch3d/structures/meshes.py | 70 ++--- pytorch3d/structures/pointclouds.py | 56 +--- pytorch3d/structures/textures.py | 7 +- pytorch3d/structures/utils.py | 25 +- pytorch3d/transforms/__init__.py | 1 + pytorch3d/transforms/rotation_conversions.py | 19 +- pytorch3d/transforms/so3.py | 5 +- pytorch3d/transforms/transform3d.py | 31 +-- pytorch3d/utils/__init__.py | 1 + pytorch3d/utils/ico_sphere.py | 2 +- pytorch3d/utils/torus.py | 8 +- scripts/parse_tutorials.py | 11 +- setup.py | 3 +- tests/bm_blending.py | 9 +- tests/bm_chamfer.py | 8 +- tests/bm_cubify.py | 5 +- tests/bm_face_areas_normals.py | 2 +- tests/bm_graph_conv.py | 2 +- tests/bm_knn.py | 15 +- tests/bm_main.py | 5 +- tests/bm_mesh_edge_loss.py | 7 +- tests/bm_mesh_io.py | 1 - tests/bm_mesh_laplacian_smoothing.py | 2 +- tests/bm_mesh_normal_consistency.py | 2 +- tests/bm_meshes.py | 6 +- tests/bm_nearest_neighbor_points.py | 2 +- tests/bm_packed_to_padded.py | 10 +- tests/bm_pointclouds.py | 2 +- tests/bm_rasterize_meshes.py | 14 +- tests/bm_rasterize_points.py | 5 +- tests/bm_sample_points_from_meshes.py | 2 +- tests/bm_so3.py | 1 - tests/bm_subdivide_meshes.py | 2 +- tests/bm_vert_align.py | 7 +- tests/common_testing.py | 15 +- tests/test_blending.py | 50 +--- tests/test_build.py | 4 +- tests/test_cameras.py | 55 ++-- tests/test_chamfer.py | 27 +- tests/test_compositing.py | 42 +-- tests/test_cubify.py | 14 +- tests/test_face_areas_normals.py | 19 +- tests/test_graph_conv.py | 32 +-- tests/test_knn.py | 2 +- tests/test_lighting.py | 48 ++-- tests/test_materials.py | 16 +- tests/test_mesh_edge_loss.py | 21 +- tests/test_mesh_laplacian_smoothing.py | 25 +- tests/test_mesh_normal_consistency.py | 20 +- tests/test_mesh_rendering_utils.py | 2 +- tests/test_meshes.py | 210 ++++----------- tests/test_nearest_neighbor_points.py | 6 +- tests/test_obj_io.py | 154 +++-------- tests/test_packed_to_padded.py | 63 ++--- tests/test_ply_io.py | 54 +--- tests/test_pointclouds.py | 244 +++++------------- tests/test_rasterize_meshes.py | 56 +--- tests/test_rasterize_points.py | 31 +-- tests/test_rasterizer.py | 19 +- tests/test_rendering_meshes.py | 59 ++--- tests/test_rendering_utils.py | 15 +- tests/test_rotation_conversions.py | 6 +- tests/test_sample_points_from_meshes.py | 73 ++---- tests/test_so3.py | 16 +- tests/test_struct_utils.py | 49 +--- tests/test_subdivide_meshes.py | 28 +- tests/test_texturing.py | 57 ++-- tests/test_transforms.py | 66 ++--- tests/test_vert_align.py | 54 +--- 110 files changed, 705 insertions(+), 1850 deletions(-) rename docs/tutorials/{render_coloured_points.ipynb => render_colored_points.ipynb} (99%) diff --git a/.circleci/regenerate.py b/.circleci/regenerate.py index 39638e5f..71440518 100755 --- a/.circleci/regenerate.py +++ b/.circleci/regenerate.py @@ -8,6 +8,7 @@ TODO: python 3.8 when pytorch 1.4. """ import os.path + import jinja2 import yaml @@ -45,9 +46,7 @@ def workflow_pair( ): w = [] - base_workflow_name = ( - f"{prefix}binary_linux_{btype}_py{python_version}_{cu_version}" - ) + base_workflow_name = f"{prefix}binary_linux_{btype}_py{python_version}_{cu_version}" w.append( generate_base_workflow( @@ -94,9 +93,7 @@ def generate_base_workflow( return {f"binary_linux_{btype}": d} -def generate_upload_workflow( - *, base_workflow_name, btype, cu_version, filter_branch -): +def generate_upload_workflow(*, base_workflow_name, btype, cu_version, filter_branch): d = { "name": f"{base_workflow_name}_upload", "context": "org-member", diff --git a/docs/conf.py b/docs/conf.py index 4789d1ea..87ed6a23 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -22,6 +22,7 @@ from recommonmark.states import DummyStateMachine from sphinx.builders.html import StandaloneHTMLBuilder from sphinx.ext.autodoc import between + # Monkey patch to fix recommonmark 0.4 doc reference issues. orig_run_role = DummyStateMachine.run_role @@ -154,9 +155,7 @@ html_theme_options = {"collapse_navigation": True} def url_resolver(url): if ".html" not in url: url = url.replace("../", "") - return ( - "https://github.com/facebookresearch/pytorch3d/blob/master/" + url - ) + return "https://github.com/facebookresearch/pytorch3d/blob/master/" + url else: if DEPLOY: return "http://pytorch3d.readthedocs.io/" + url @@ -188,9 +187,7 @@ def setup(app): # Register a sphinx.ext.autodoc.between listener to ignore everything # between lines that contain the word IGNORE - app.connect( - "autodoc-process-docstring", between("^.*IGNORE.*$", exclude=True) - ) + app.connect("autodoc-process-docstring", between("^.*IGNORE.*$", exclude=True)) app.add_transform(AutoStructify) return app diff --git a/docs/tutorials/render_coloured_points.ipynb b/docs/tutorials/render_colored_points.ipynb similarity index 99% rename from docs/tutorials/render_coloured_points.ipynb rename to docs/tutorials/render_colored_points.ipynb index b3143b18..d552b0e1 100644 --- a/docs/tutorials/render_coloured_points.ipynb +++ b/docs/tutorials/render_colored_points.ipynb @@ -15,7 +15,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Render a coloured point cloud\n", + "# Render a colored point cloud\n", "\n", "This tutorial shows how to:\n", "- set up a renderer \n", @@ -84,7 +84,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Load a point cloud and corresponding colours\n", + "### Load a point cloud and corresponding colors\n", "\n", "Load a `.ply` file and create a **Point Cloud** object. \n", "\n", diff --git a/docs/tutorials/utils/__init__.py b/docs/tutorials/utils/__init__.py index ffc23108..8e3bf5ec 100644 --- a/docs/tutorials/utils/__init__.py +++ b/docs/tutorials/utils/__init__.py @@ -1,8 +1,4 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -from .camera_visualization import ( - get_camera_wireframe, - plot_camera_scene, - plot_cameras, -) +from .camera_visualization import get_camera_wireframe, plot_camera_scene, plot_cameras from .plot_image_grid import image_grid diff --git a/docs/tutorials/utils/plot_image_grid.py b/docs/tutorials/utils/plot_image_grid.py index c670c487..7af8e167 100644 --- a/docs/tutorials/utils/plot_image_grid.py +++ b/docs/tutorials/utils/plot_image_grid.py @@ -34,13 +34,9 @@ def image_grid( cols = 1 gridspec_kw = {"wspace": 0.0, "hspace": 0.0} if fill else {} - fig, axarr = plt.subplots( - rows, cols, gridspec_kw=gridspec_kw, figsize=(15, 9) - ) + fig, axarr = plt.subplots(rows, cols, gridspec_kw=gridspec_kw, figsize=(15, 9)) bleed = 0 - fig.subplots_adjust( - left=bleed, bottom=bleed, right=(1 - bleed), top=(1 - bleed) - ) + fig.subplots_adjust(left=bleed, bottom=bleed, right=(1 - bleed), top=(1 - bleed)) for ax, im in zip(axarr.ravel(), images): if rgb: diff --git a/pytorch3d/io/__init__.py b/pytorch3d/io/__init__.py index 0162ae89..388fd126 100644 --- a/pytorch3d/io/__init__.py +++ b/pytorch3d/io/__init__.py @@ -4,4 +4,5 @@ from .obj_io import load_obj, load_objs_as_meshes, save_obj from .ply_io import load_ply, save_ply + __all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/pytorch3d/io/obj_io.py b/pytorch3d/io/obj_io.py index c555b917..5626a0e0 100644 --- a/pytorch3d/io/obj_io.py +++ b/pytorch3d/io/obj_io.py @@ -2,16 +2,16 @@ """This module implements utility functions for loading and saving meshes.""" -import numpy as np import os import pathlib import warnings from collections import namedtuple from typing import List, Optional + +import numpy as np import torch from fvcore.common.file_io import PathManager from PIL import Image - from pytorch3d.structures import Meshes, Textures, join_meshes @@ -51,9 +51,7 @@ def _read_image(file_name: str, format=None): # Faces & Aux type returned from load_obj function. _Faces = namedtuple("Faces", "verts_idx normals_idx textures_idx materials_idx") -_Aux = namedtuple( - "Properties", "normals verts_uvs material_colors texture_images" -) +_Aux = namedtuple("Properties", "normals verts_uvs material_colors texture_images") def _format_faces_indices(faces_indices, max_index): @@ -247,9 +245,7 @@ def load_objs_as_meshes(files: list, device=None, load_textures: bool = True): image = list(tex_maps.values())[0].to(device)[None] tex = Textures(verts_uvs=verts_uvs, faces_uvs=faces_uvs, maps=image) - mesh = Meshes( - verts=[verts], faces=[faces.verts_idx.to(device)], textures=tex - ) + mesh = Meshes(verts=[verts], faces=[faces.verts_idx.to(device)], textures=tex) mesh_list.append(mesh) if len(mesh_list) == 1: return mesh_list[0] @@ -308,9 +304,7 @@ def _parse_face( # Subdivide faces with more than 3 vertices. See comments of the # load_obj function for more details. for i in range(len(face_verts) - 2): - faces_verts_idx.append( - (face_verts[0], face_verts[i + 1], face_verts[i + 2]) - ) + faces_verts_idx.append((face_verts[0], face_verts[i + 1], face_verts[i + 2])) if len(face_normals) > 0: faces_normals_idx.append( (face_normals[0], face_normals[i + 1], face_normals[i + 2]) @@ -367,8 +361,7 @@ def _load(f_obj, data_dir, load_textures=True): tx = [float(x) for x in line.split()[1:3]] if len(tx) != 2: raise ValueError( - "Texture %s does not have 2 values. Line: %s" - % (str(tx), str(line)) + "Texture %s does not have 2 values. Line: %s" % (str(tx), str(line)) ) verts_uvs.append(tx) elif line.startswith("vn "): @@ -397,17 +390,13 @@ def _load(f_obj, data_dir, load_textures=True): # Repeat for normals and textures if present. if len(faces_normals_idx) > 0: - faces_normals_idx = _format_faces_indices( - faces_normals_idx, normals.shape[0] - ) + faces_normals_idx = _format_faces_indices(faces_normals_idx, normals.shape[0]) if len(faces_textures_idx) > 0: faces_textures_idx = _format_faces_indices( faces_textures_idx, verts_uvs.shape[0] ) if len(faces_materials_idx) > 0: - faces_materials_idx = torch.tensor( - faces_materials_idx, dtype=torch.int64 - ) + faces_materials_idx = torch.tensor(faces_materials_idx, dtype=torch.int64) # Load materials material_colors, texture_images = None, None diff --git a/pytorch3d/io/ply_io.py b/pytorch3d/io/ply_io.py index cc9d98cd..fe215ead 100644 --- a/pytorch3d/io/ply_io.py +++ b/pytorch3d/io/ply_io.py @@ -4,15 +4,17 @@ """This module implements utility functions for loading and saving meshes.""" -import numpy as np import pathlib import struct import sys import warnings from collections import namedtuple from typing import Optional, Tuple + +import numpy as np import torch + _PlyTypeData = namedtuple("_PlyTypeData", "size struct_char np_type") _PLY_TYPES = { @@ -257,11 +259,7 @@ def _try_read_ply_constant_list_ascii(f, definition: _PlyElementType): "ignore", message=".* Empty input file.*", category=UserWarning ) data = np.loadtxt( - f, - dtype=np_type, - comments=None, - ndmin=2, - max_rows=definition.count, + f, dtype=np_type, comments=None, ndmin=2, max_rows=definition.count ) except ValueError: f.seek(start_point) @@ -301,9 +299,7 @@ def _parse_heterogenous_property_ascii(datum, line_iter, property: _Property): length = int(value) except ValueError: raise ValueError("A list length was not a number.") - list_value = np.zeros( - length, dtype=_PLY_TYPES[property.data_type].np_type - ) + list_value = np.zeros(length, dtype=_PLY_TYPES[property.data_type].np_type) for i in range(length): inner_value = next(line_iter, None) if inner_value is None: @@ -404,8 +400,7 @@ def _read_ply_element_struct(f, definition: _PlyElementType, endian_str: str): values. There is one column for each property. """ format = "".join( - _PLY_TYPES[property.data_type].struct_char - for property in definition.properties + _PLY_TYPES[property.data_type].struct_char for property in definition.properties ) format = endian_str + format pattern = struct.Struct(format) @@ -414,10 +409,7 @@ def _read_ply_element_struct(f, definition: _PlyElementType, endian_str: str): bytes_data = f.read(needed_bytes) if len(bytes_data) != needed_bytes: raise ValueError("Not enough data for %s." % definition.name) - data = [ - pattern.unpack_from(bytes_data, i * size) - for i in range(definition.count) - ] + data = [pattern.unpack_from(bytes_data, i * size) for i in range(definition.count)] return data @@ -475,9 +467,7 @@ def _try_read_ply_constant_list_binary( return output -def _read_ply_element_binary( - f, definition: _PlyElementType, big_endian: bool -) -> list: +def _read_ply_element_binary(f, definition: _PlyElementType, big_endian: bool) -> list: """ Decode all instances of a single element from a binary .ply file. @@ -515,9 +505,7 @@ def _read_ply_element_binary( data = [] for _i in range(definition.count): datum = [] - for property, property_struct in zip( - definition.properties, property_structs - ): + for property, property_struct in zip(definition.properties, property_structs): size = property_struct.size initial_data = f.read(size) if len(initial_data) != size: @@ -656,28 +644,19 @@ def load_ply(f): if face is None: raise ValueError("The ply file has no face element.") - if ( - not isinstance(vertex, np.ndarray) - or vertex.ndim != 2 - or vertex.shape[1] != 3 - ): + if not isinstance(vertex, np.ndarray) or vertex.ndim != 2 or vertex.shape[1] != 3: raise ValueError("Invalid vertices in file.") verts = torch.tensor(vertex, dtype=torch.float32) face_head = next(head for head in header.elements if head.name == "face") - if ( - len(face_head.properties) != 1 - or face_head.properties[0].list_size_type is None - ): + if len(face_head.properties) != 1 or face_head.properties[0].list_size_type is None: raise ValueError("Unexpected form of faces data.") # face_head.properties[0].name is usually "vertex_index" or "vertex_indices" # but we don't need to enforce this. if isinstance(face, np.ndarray) and face.ndim == 2: if face.shape[1] < 3: raise ValueError("Faces must have at least 3 vertices.") - face_arrays = [ - face[:, [0, i + 1, i + 2]] for i in range(face.shape[1] - 2) - ] + face_arrays = [face[:, [0, i + 1, i + 2]] for i in range(face.shape[1] - 2)] faces = torch.tensor(np.vstack(face_arrays), dtype=torch.int64) else: face_list = [] @@ -687,9 +666,7 @@ def load_ply(f): if face_item.shape[0] < 3: raise ValueError("Faces must have at least 3 vertices.") for i in range(face_item.shape[0] - 2): - face_list.append( - [face_item[0], face_item[i + 1], face_item[i + 2]] - ) + face_list.append([face_item[0], face_item[i + 1], face_item[i + 2]]) faces = torch.tensor(face_list, dtype=torch.int64) return verts, faces diff --git a/pytorch3d/loss/__init__.py b/pytorch3d/loss/__init__.py index adb362fd..dd6d179e 100644 --- a/pytorch3d/loss/__init__.py +++ b/pytorch3d/loss/__init__.py @@ -6,4 +6,5 @@ from .mesh_edge_loss import mesh_edge_loss from .mesh_laplacian_smoothing import mesh_laplacian_smoothing from .mesh_normal_consistency import mesh_normal_consistency + __all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/pytorch3d/loss/chamfer.py b/pytorch3d/loss/chamfer.py index 17fd4546..bd7c420c 100644 --- a/pytorch3d/loss/chamfer.py +++ b/pytorch3d/loss/chamfer.py @@ -2,13 +2,10 @@ import torch import torch.nn.functional as F - from pytorch3d.ops.nearest_neighbor_points import nn_points_idx -def _validate_chamfer_reduction_inputs( - batch_reduction: str, point_reduction: str -): +def _validate_chamfer_reduction_inputs(batch_reduction: str, point_reduction: str): """Check the requested reductions are valid. Args: @@ -18,17 +15,11 @@ def _validate_chamfer_reduction_inputs( points, can be one of ["none", "mean", "sum"]. """ if batch_reduction not in ["none", "mean", "sum"]: - raise ValueError( - 'batch_reduction must be one of ["none", "mean", "sum"]' - ) + raise ValueError('batch_reduction must be one of ["none", "mean", "sum"]') if point_reduction not in ["none", "mean", "sum"]: - raise ValueError( - 'point_reduction must be one of ["none", "mean", "sum"]' - ) + raise ValueError('point_reduction must be one of ["none", "mean", "sum"]') if batch_reduction == "none" and point_reduction == "none": - raise ValueError( - 'batch_reduction and point_reduction cannot both be "none".' - ) + raise ValueError('batch_reduction and point_reduction cannot both be "none".') def chamfer_distance( @@ -87,10 +78,7 @@ def chamfer_distance( (x.sum((1, 2)) * weights).sum() * 0.0, (x.sum((1, 2)) * weights).sum() * 0.0, ) - return ( - (x.sum((1, 2)) * weights) * 0.0, - (x.sum((1, 2)) * weights) * 0.0, - ) + return ((x.sum((1, 2)) * weights) * 0.0, (x.sum((1, 2)) * weights) * 0.0) return_normals = x_normals is not None and y_normals is not None cham_norm_x = x.new_zeros(()) diff --git a/pytorch3d/loss/mesh_normal_consistency.py b/pytorch3d/loss/mesh_normal_consistency.py index 071e2de4..7332b82b 100644 --- a/pytorch3d/loss/mesh_normal_consistency.py +++ b/pytorch3d/loss/mesh_normal_consistency.py @@ -2,6 +2,7 @@ from itertools import islice + import torch @@ -76,10 +77,7 @@ def mesh_normal_consistency(meshes): with torch.no_grad(): edge_idx = face_to_edge.reshape(F * 3) # (3 * F,) indexes into edges vert_idx = ( - faces_packed.view(1, F, 3) - .expand(3, F, 3) - .transpose(0, 1) - .reshape(3 * F, 3) + faces_packed.view(1, F, 3).expand(3, F, 3).transpose(0, 1).reshape(3 * F, 3) ) edge_idx, edge_sort_idx = edge_idx.sort() vert_idx = vert_idx[edge_sort_idx] @@ -132,9 +130,7 @@ def mesh_normal_consistency(meshes): loss = 1 - torch.cosine_similarity(n0, n1, dim=1) verts_packed_to_mesh_idx = verts_packed_to_mesh_idx[vert_idx[:, 0]] - verts_packed_to_mesh_idx = verts_packed_to_mesh_idx[ - vert_edge_pair_idx[:, 0] - ] + verts_packed_to_mesh_idx = verts_packed_to_mesh_idx[vert_edge_pair_idx[:, 0]] num_normals = verts_packed_to_mesh_idx.bincount(minlength=N) weights = 1.0 / num_normals[verts_packed_to_mesh_idx].float() diff --git a/pytorch3d/ops/__init__.py b/pytorch3d/ops/__init__.py index 98ca1735..de2eb61b 100644 --- a/pytorch3d/ops/__init__.py +++ b/pytorch3d/ops/__init__.py @@ -10,4 +10,5 @@ from .sample_points_from_meshes import sample_points_from_meshes from .subdivide_meshes import SubdivideMeshes from .vert_align import vert_align + __all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/pytorch3d/ops/cubify.py b/pytorch3d/ops/cubify.py index 1045ea1e..e0fa3456 100644 --- a/pytorch3d/ops/cubify.py +++ b/pytorch3d/ops/cubify.py @@ -3,7 +3,6 @@ import torch import torch.nn.functional as F - from pytorch3d.structures import Meshes @@ -200,8 +199,6 @@ def cubify(voxels, thresh, device=None) -> Meshes: grid_verts.index_select(0, (idleverts[n] == 0).nonzero()[:, 0]) for n in range(N) ] - faces_list = [ - nface - idlenum[n][nface] for n, nface in enumerate(faces_list) - ] + faces_list = [nface - idlenum[n][nface] for n, nface in enumerate(faces_list)] return Meshes(verts=verts_list, faces=faces_list) diff --git a/pytorch3d/ops/graph_conv.py b/pytorch3d/ops/graph_conv.py index 3beb5da0..dd21dfc0 100644 --- a/pytorch3d/ops/graph_conv.py +++ b/pytorch3d/ops/graph_conv.py @@ -3,11 +3,10 @@ import torch import torch.nn as nn +from pytorch3d import _C from torch.autograd import Function from torch.autograd.function import once_differentiable -from pytorch3d import _C - class GraphConv(nn.Module): """A single graph convolution layer.""" @@ -60,9 +59,7 @@ class GraphConv(nn.Module): number of output features per vertex. """ if verts.is_cuda != edges.is_cuda: - raise ValueError( - "verts and edges tensors must be on the same device." - ) + raise ValueError("verts and edges tensors must be on the same device.") if verts.shape[0] == 0: # empty graph. return verts.new_zeros((0, self.output_dim)) * verts.sum() diff --git a/pytorch3d/ops/knn.py b/pytorch3d/ops/knn.py index 3986b9bf..2ec35992 100644 --- a/pytorch3d/ops/knn.py +++ b/pytorch3d/ops/knn.py @@ -1,7 +1,6 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import torch - from pytorch3d import _C diff --git a/pytorch3d/ops/mesh_face_areas_normals.py b/pytorch3d/ops/mesh_face_areas_normals.py index ecf9ffef..68dcc13c 100644 --- a/pytorch3d/ops/mesh_face_areas_normals.py +++ b/pytorch3d/ops/mesh_face_areas_normals.py @@ -1,11 +1,10 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import torch +from pytorch3d import _C from torch.autograd import Function from torch.autograd.function import once_differentiable -from pytorch3d import _C - class _MeshFaceAreasNormals(Function): """ diff --git a/pytorch3d/ops/nearest_neighbor_points.py b/pytorch3d/ops/nearest_neighbor_points.py index 308e23bf..ffb40a8e 100644 --- a/pytorch3d/ops/nearest_neighbor_points.py +++ b/pytorch3d/ops/nearest_neighbor_points.py @@ -2,7 +2,6 @@ import torch - from pytorch3d import _C @@ -31,9 +30,7 @@ def nn_points_idx(p1, p2, p2_normals=None) -> torch.Tensor: """ N, P1, D = p1.shape with torch.no_grad(): - p1_nn_idx = _C.nn_points_idx( - p1.contiguous(), p2.contiguous() - ) # (N, P1) + p1_nn_idx = _C.nn_points_idx(p1.contiguous(), p2.contiguous()) # (N, P1) p1_nn_idx_expanded = p1_nn_idx.view(N, P1, 1).expand(N, P1, D) p1_nn_points = p2.gather(1, p1_nn_idx_expanded) if p2_normals is None: diff --git a/pytorch3d/ops/packed_to_padded.py b/pytorch3d/ops/packed_to_padded.py index 4ac4177a..3e567b53 100644 --- a/pytorch3d/ops/packed_to_padded.py +++ b/pytorch3d/ops/packed_to_padded.py @@ -1,11 +1,10 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import torch +from pytorch3d import _C from torch.autograd import Function from torch.autograd.function import once_differentiable -from pytorch3d import _C - class _PackedToPadded(Function): """ diff --git a/pytorch3d/ops/sample_points_from_meshes.py b/pytorch3d/ops/sample_points_from_meshes.py index d96e407d..7236c42b 100644 --- a/pytorch3d/ops/sample_points_from_meshes.py +++ b/pytorch3d/ops/sample_points_from_meshes.py @@ -7,8 +7,8 @@ batches of meshes. """ import sys from typing import Tuple, Union -import torch +import torch from pytorch3d.ops.mesh_face_areas_normals import mesh_face_areas_normals from pytorch3d.ops.packed_to_padded import packed_to_padded @@ -53,9 +53,7 @@ def sample_points_from_meshes( # Only compute samples for non empty meshes with torch.no_grad(): - areas, _ = mesh_face_areas_normals( - verts, faces - ) # Face areas can be zero. + areas, _ = mesh_face_areas_normals(verts, faces) # Face areas can be zero. max_faces = meshes.num_faces_per_mesh().max().item() areas_padded = packed_to_padded( areas, mesh_to_face[meshes.valid], max_faces @@ -80,21 +78,17 @@ def sample_points_from_meshes( a = v0[sample_face_idxs] # (N, num_samples, 3) b = v1[sample_face_idxs] c = v2[sample_face_idxs] - samples[meshes.valid] = ( - w0[:, :, None] * a + w1[:, :, None] * b + w2[:, :, None] * c - ) + samples[meshes.valid] = w0[:, :, None] * a + w1[:, :, None] * b + w2[:, :, None] * c if return_normals: # Intialize normals tensor with fill value 0 for empty meshes. # Normals for the sampled points are face normals computed from # the vertices of the face in which the sampled point lies. - normals = torch.zeros( - (num_meshes, num_samples, 3), device=meshes.device - ) + normals = torch.zeros((num_meshes, num_samples, 3), device=meshes.device) vert_normals = (v1 - v0).cross(v2 - v1, dim=1) - vert_normals = vert_normals / vert_normals.norm( - dim=1, p=2, keepdim=True - ).clamp(min=sys.float_info.epsilon) + vert_normals = vert_normals / vert_normals.norm(dim=1, p=2, keepdim=True).clamp( + min=sys.float_info.epsilon + ) vert_normals = vert_normals[sample_face_idxs] normals[meshes.valid] = vert_normals diff --git a/pytorch3d/ops/subdivide_meshes.py b/pytorch3d/ops/subdivide_meshes.py index 9abfb5f7..6a2386e8 100644 --- a/pytorch3d/ops/subdivide_meshes.py +++ b/pytorch3d/ops/subdivide_meshes.py @@ -3,7 +3,6 @@ import torch import torch.nn as nn - from pytorch3d.structures import Meshes @@ -193,16 +192,12 @@ class SubdivideMeshes(nn.Module): edges = meshes[0].edges_packed() # The set of faces is the same across the different meshes. - new_faces = self._subdivided_faces.view(1, -1, 3).expand( - self._N, -1, -1 - ) + new_faces = self._subdivided_faces.view(1, -1, 3).expand(self._N, -1, -1) # Add one new vertex at the midpoint of each edge by taking the average # of the vertices that form each edge. new_verts = verts[:, edges].mean(dim=2) - new_verts = torch.cat( - [verts, new_verts], dim=1 - ) # (sum(V_n)+sum(E_n), 3) + new_verts = torch.cat([verts, new_verts], dim=1) # (sum(V_n)+sum(E_n), 3) new_feats = None # Calculate features for new vertices. @@ -212,15 +207,11 @@ class SubdivideMeshes(nn.Module): # padded, i.e. (N*V, D) to (N, V, D). feats = feats.view(verts.size(0), verts.size(1), feats.size(1)) if feats.dim() != 3: - raise ValueError( - "features need to be of shape (N, V, D) or (N*V, D)" - ) + raise ValueError("features need to be of shape (N, V, D) or (N*V, D)") # Take average of the features at the vertices that form each edge. new_feats = feats[:, edges].mean(dim=2) - new_feats = torch.cat( - [feats, new_feats], dim=1 - ) # (sum(V_n)+sum(E_n), 3) + new_feats = torch.cat([feats, new_feats], dim=1) # (sum(V_n)+sum(E_n), 3) new_meshes = Meshes(verts=new_verts, faces=new_faces) @@ -270,9 +261,7 @@ class SubdivideMeshes(nn.Module): ) # (sum(V_n)+sum(E_n),) verts_ordered_idx_init = torch.zeros( - new_verts_per_mesh.sum(), - dtype=torch.int64, - device=meshes.device, + new_verts_per_mesh.sum(), dtype=torch.int64, device=meshes.device ) # (sum(V_n)+sum(E_n),) # Reassign vertex indices so that existing and new vertices for each @@ -288,9 +277,7 @@ class SubdivideMeshes(nn.Module): # Calculate the indices needed to group the existing and new faces # for each mesh. - face_sort_idx = create_faces_index( - num_faces_per_mesh, device=meshes.device - ) + face_sort_idx = create_faces_index(num_faces_per_mesh, device=meshes.device) # Reorder the faces to sequentially group existing and new faces # for each mesh. @@ -361,9 +348,7 @@ def create_verts_index(verts_per_mesh, edges_per_mesh, device=None): E = edges_per_mesh.sum() # e.g. 21 verts_per_mesh_cumsum = verts_per_mesh.cumsum(dim=0) # (N,) e.g. (4, 9, 15) - edges_per_mesh_cumsum = edges_per_mesh.cumsum( - dim=0 - ) # (N,) e.g. (5, 12, 21) + edges_per_mesh_cumsum = edges_per_mesh.cumsum(dim=0) # (N,) e.g. (5, 12, 21) v_to_e_idx = verts_per_mesh_cumsum.clone() @@ -373,9 +358,7 @@ def create_verts_index(verts_per_mesh, edges_per_mesh, device=None): ] # e.g. (4, 9, 15) + (0, 5, 12) = (4, 14, 27) # vertex to edge offset. - v_to_e_offset = ( - V - verts_per_mesh_cumsum - ) # e.g. 15 - (4, 9, 15) = (11, 6, 0) + v_to_e_offset = V - verts_per_mesh_cumsum # e.g. 15 - (4, 9, 15) = (11, 6, 0) v_to_e_offset[1:] += edges_per_mesh_cumsum[ :-1 ] # e.g. (11, 6, 0) + (0, 5, 12) = (11, 11, 12) diff --git a/pytorch3d/ops/vert_align.py b/pytorch3d/ops/vert_align.py index b1903e6a..43e50eed 100644 --- a/pytorch3d/ops/vert_align.py +++ b/pytorch3d/ops/vert_align.py @@ -59,9 +59,7 @@ def vert_align( elif hasattr(verts, "verts_padded"): grid = verts.verts_padded() else: - raise ValueError( - "verts must be a tensor or have a `verts_padded` attribute" - ) + raise ValueError("verts must be a tensor or have a `verts_padded` attribute") grid = grid[:, None, :, :2] # (N, 1, V, 2) diff --git a/pytorch3d/renderer/__init__.py b/pytorch3d/renderer/__init__.py index 7b13ea9a..9251f83b 100644 --- a/pytorch3d/renderer/__init__.py +++ b/pytorch3d/renderer/__init__.py @@ -44,4 +44,5 @@ from .points import ( ) from .utils import TensorProperties, convert_to_tensors_and_broadcast + __all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/pytorch3d/renderer/blending.py b/pytorch3d/renderer/blending.py index 53dbea69..ea67c5f1 100644 --- a/pytorch3d/renderer/blending.py +++ b/pytorch3d/renderer/blending.py @@ -1,10 +1,12 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import numpy as np from typing import NamedTuple, Sequence + +import numpy as np import torch + # Example functions for blending the top K colors per pixel using the outputs # from rasterization. # NOTE: All blending function should return an RGBA image per batch element @@ -63,9 +65,7 @@ def sigmoid_alpha_blend(colors, fragments, blend_params) -> torch.Tensor: 3D Reasoning', ICCV 2019 """ N, H, W, K = fragments.pix_to_face.shape - pixel_colors = torch.ones( - (N, H, W, 4), dtype=colors.dtype, device=colors.device - ) + pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=colors.device) mask = fragments.pix_to_face >= 0 # The distance is negative if a pixel is inside a face and positive outside @@ -124,14 +124,10 @@ def softmax_rgb_blend( N, H, W, K = fragments.pix_to_face.shape device = fragments.pix_to_face.device - pixel_colors = torch.ones( - (N, H, W, 4), dtype=colors.dtype, device=colors.device - ) + pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=colors.device) background = blend_params.background_color if not torch.is_tensor(background): - background = torch.tensor( - background, dtype=torch.float32, device=device - ) + background = torch.tensor(background, dtype=torch.float32, device=device) # Background color delta = np.exp(1e-10 / blend_params.gamma) * 1e-10 diff --git a/pytorch3d/renderer/cameras.py b/pytorch3d/renderer/cameras.py index 31d421f5..b9338723 100644 --- a/pytorch3d/renderer/cameras.py +++ b/pytorch3d/renderer/cameras.py @@ -1,15 +1,16 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import math -import numpy as np from typing import Optional, Sequence, Tuple + +import numpy as np import torch import torch.nn.functional as F - from pytorch3d.transforms import Rotate, Transform3d, Translate from .utils import TensorProperties, convert_to_tensors_and_broadcast + # Default values for rotation and translation matrices. r = np.expand_dims(np.eye(3), axis=0) # (1, 3, 3) t = np.expand_dims(np.zeros(3), axis=0) # (1, 3) @@ -106,9 +107,7 @@ class OpenGLPerspectiveCameras(TensorProperties): aspect_ratio = kwargs.get("aspect_ratio", self.aspect_ratio) degrees = kwargs.get("degrees", self.degrees) - P = torch.zeros( - (self._N, 4, 4), device=self.device, dtype=torch.float32 - ) + P = torch.zeros((self._N, 4, 4), device=self.device, dtype=torch.float32) ones = torch.ones((self._N), dtype=torch.float32, device=self.device) if degrees: fov = (np.pi / 180) * fov @@ -204,9 +203,7 @@ class OpenGLPerspectiveCameras(TensorProperties): """ self.R = kwargs.get("R", self.R) # pyre-ignore[16] self.T = kwargs.get("T", self.T) # pyre-ignore[16] - world_to_view_transform = get_world_to_view_transform( - R=self.R, T=self.T - ) + world_to_view_transform = get_world_to_view_transform(R=self.R, T=self.T) return world_to_view_transform def get_full_projection_transform(self, **kwargs) -> Transform3d: @@ -229,9 +226,7 @@ class OpenGLPerspectiveCameras(TensorProperties): """ self.R = kwargs.get("R", self.R) # pyre-ignore[16] self.T = kwargs.get("T", self.T) # pyre-ignore[16] - world_to_view_transform = self.get_world_to_view_transform( - R=self.R, T=self.T - ) + world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T) view_to_screen_transform = self.get_projection_transform(**kwargs) return world_to_view_transform.compose(view_to_screen_transform) @@ -337,9 +332,7 @@ class OpenGLOrthographicCameras(TensorProperties): bottom = kwargs.get("bottom", self.bottom) # pyre-ignore[16] scale_xyz = kwargs.get("scale_xyz", self.scale_xyz) # pyre-ignore[16] - P = torch.zeros( - (self._N, 4, 4), dtype=torch.float32, device=self.device - ) + P = torch.zeros((self._N, 4, 4), dtype=torch.float32, device=self.device) ones = torch.ones((self._N), dtype=torch.float32, device=self.device) # NOTE: OpenGL flips handedness of coordinate system between camera # space and NDC space so z sign is -ve. In PyTorch3D we maintain a @@ -417,9 +410,7 @@ class OpenGLOrthographicCameras(TensorProperties): """ self.R = kwargs.get("R", self.R) # pyre-ignore[16] self.T = kwargs.get("T", self.T) # pyre-ignore[16] - world_to_view_transform = get_world_to_view_transform( - R=self.R, T=self.T - ) + world_to_view_transform = get_world_to_view_transform(R=self.R, T=self.T) return world_to_view_transform def get_full_projection_transform(self, **kwargs) -> Transform3d: @@ -442,9 +433,7 @@ class OpenGLOrthographicCameras(TensorProperties): """ self.R = kwargs.get("R", self.R) # pyre-ignore[16] self.T = kwargs.get("T", self.T) # pyre-ignore[16] - world_to_view_transform = self.get_world_to_view_transform( - R=self.R, T=self.T - ) + world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T) view_to_screen_transform = self.get_projection_transform(**kwargs) return world_to_view_transform.compose(view_to_screen_transform) @@ -470,12 +459,7 @@ class SfMPerspectiveCameras(TensorProperties): """ def __init__( - self, - focal_length=1.0, - principal_point=((0.0, 0.0),), - R=r, - T=t, - device="cpu", + self, focal_length=1.0, principal_point=((0.0, 0.0),), R=r, T=t, device="cpu" ): """ __init__(self, focal_length, principal_point, R, T, device) -> None @@ -589,9 +573,7 @@ class SfMPerspectiveCameras(TensorProperties): """ self.R = kwargs.get("R", self.R) # pyre-ignore[16] self.T = kwargs.get("T", self.T) # pyre-ignore[16] - world_to_view_transform = get_world_to_view_transform( - R=self.R, T=self.T - ) + world_to_view_transform = get_world_to_view_transform(R=self.R, T=self.T) return world_to_view_transform def get_full_projection_transform(self, **kwargs) -> Transform3d: @@ -610,9 +592,7 @@ class SfMPerspectiveCameras(TensorProperties): """ self.R = kwargs.get("R", self.R) # pyre-ignore[16] self.T = kwargs.get("T", self.T) # pyre-ignore[16] - world_to_view_transform = self.get_world_to_view_transform( - R=self.R, T=self.T - ) + world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T) view_to_screen_transform = self.get_projection_transform(**kwargs) return world_to_view_transform.compose(view_to_screen_transform) @@ -638,12 +618,7 @@ class SfMOrthographicCameras(TensorProperties): """ def __init__( - self, - focal_length=1.0, - principal_point=((0.0, 0.0),), - R=r, - T=t, - device="cpu", + self, focal_length=1.0, principal_point=((0.0, 0.0),), R=r, T=t, device="cpu" ): """ __init__(self, focal_length, principal_point, R, T, device) -> None @@ -757,9 +732,7 @@ class SfMOrthographicCameras(TensorProperties): """ self.R = kwargs.get("R", self.R) # pyre-ignore[16] self.T = kwargs.get("T", self.T) # pyre-ignore[16] - world_to_view_transform = get_world_to_view_transform( - R=self.R, T=self.T - ) + world_to_view_transform = get_world_to_view_transform(R=self.R, T=self.T) return world_to_view_transform def get_full_projection_transform(self, **kwargs) -> Transform3d: @@ -778,9 +751,7 @@ class SfMOrthographicCameras(TensorProperties): """ self.R = kwargs.get("R", self.R) # pyre-ignore[16] self.T = kwargs.get("T", self.T) # pyre-ignore[16] - world_to_view_transform = self.get_world_to_view_transform( - R=self.R, T=self.T - ) + world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T) view_to_screen_transform = self.get_projection_transform(**kwargs) return world_to_view_transform.compose(view_to_screen_transform) @@ -990,9 +961,7 @@ def look_at_rotation( z_axis = F.normalize(at - camera_position, eps=1e-5) x_axis = F.normalize(torch.cross(up, z_axis), eps=1e-5) y_axis = F.normalize(torch.cross(z_axis, x_axis), eps=1e-5) - R = torch.cat( - (x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1 - ) + R = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1) return R.transpose(1, 2) @@ -1038,9 +1007,7 @@ def look_at_view_transform( """ if eye is not None: - broadcasted_args = convert_to_tensors_and_broadcast( - eye, at, up, device=device - ) + broadcasted_args = convert_to_tensors_and_broadcast(eye, at, up, device=device) eye, at, up = broadcasted_args C = eye else: diff --git a/pytorch3d/renderer/compositing.py b/pytorch3d/renderer/compositing.py index 66852be8..aee7a312 100644 --- a/pytorch3d/renderer/compositing.py +++ b/pytorch3d/renderer/compositing.py @@ -3,10 +3,11 @@ from typing import NamedTuple -import torch +import torch from pytorch3d import _C + # Example functions for blending the top K features per pixel using the outputs # from rasterization. # NOTE: All blending function should return a (N, H, W, C) tensor per batch element. @@ -49,9 +50,7 @@ class _CompositeAlphaPoints(torch.autograd.Function): def forward(ctx, features, alphas, points_idx): pt_cld = _C.accum_alphacomposite(features, alphas, points_idx) - ctx.save_for_backward( - features.clone(), alphas.clone(), points_idx.clone() - ) + ctx.save_for_backward(features.clone(), alphas.clone(), points_idx.clone()) return pt_cld @staticmethod @@ -68,9 +67,7 @@ class _CompositeAlphaPoints(torch.autograd.Function): return grad_features, grad_alphas, grad_points_idx, None -def alpha_composite( - pointsidx, alphas, pt_clds, blend_params=None -) -> torch.Tensor: +def alpha_composite(pointsidx, alphas, pt_clds, blend_params=None) -> torch.Tensor: """ Composite features within a z-buffer using alpha compositing. Given a zbuffer with corresponding features and weights, these values are accumulated according @@ -131,9 +128,7 @@ class _CompositeNormWeightedSumPoints(torch.autograd.Function): def forward(ctx, features, alphas, points_idx): pt_cld = _C.accum_weightedsumnorm(features, alphas, points_idx) - ctx.save_for_backward( - features.clone(), alphas.clone(), points_idx.clone() - ) + ctx.save_for_backward(features.clone(), alphas.clone(), points_idx.clone()) return pt_cld @staticmethod @@ -150,9 +145,7 @@ class _CompositeNormWeightedSumPoints(torch.autograd.Function): return grad_features, grad_alphas, grad_points_idx, None -def norm_weighted_sum( - pointsidx, alphas, pt_clds, blend_params=None -) -> torch.Tensor: +def norm_weighted_sum(pointsidx, alphas, pt_clds, blend_params=None) -> torch.Tensor: """ Composite features within a z-buffer using normalized weighted sum. Given a zbuffer with corresponding features and weights, these values are accumulated @@ -213,9 +206,7 @@ class _CompositeWeightedSumPoints(torch.autograd.Function): def forward(ctx, features, alphas, points_idx): pt_cld = _C.accum_weightedsum(features, alphas, points_idx) - ctx.save_for_backward( - features.clone(), alphas.clone(), points_idx.clone() - ) + ctx.save_for_backward(features.clone(), alphas.clone(), points_idx.clone()) return pt_cld @staticmethod diff --git a/pytorch3d/renderer/lighting.py b/pytorch3d/renderer/lighting.py index b575a56b..fbcd1aec 100644 --- a/pytorch3d/renderer/lighting.py +++ b/pytorch3d/renderer/lighting.py @@ -114,12 +114,7 @@ def specular( # Ensure all inputs have same batch dimension as points matched_tensors = convert_to_tensors_and_broadcast( - points, - color, - direction, - camera_position, - shininess, - device=points.device, + points, color, direction, camera_position, shininess, device=points.device ) _, color, direction, camera_position, shininess = matched_tensors @@ -201,9 +196,7 @@ class DirectionalLights(TensorProperties): normals=normals, color=self.diffuse_color, direction=self.direction ) - def specular( - self, normals, points, camera_position, shininess - ) -> torch.Tensor: + def specular(self, normals, points, camera_position, shininess) -> torch.Tensor: return specular( points=points, normals=normals, @@ -256,13 +249,9 @@ class PointLights(TensorProperties): def diffuse(self, normals, points) -> torch.Tensor: direction = self.location - points - return diffuse( - normals=normals, color=self.diffuse_color, direction=direction - ) + return diffuse(normals=normals, color=self.diffuse_color, direction=direction) - def specular( - self, normals, points, camera_position, shininess - ) -> torch.Tensor: + def specular(self, normals, points, camera_position, shininess) -> torch.Tensor: direction = self.location - points return specular( points=points, diff --git a/pytorch3d/renderer/mesh/__init__.py b/pytorch3d/renderer/mesh/__init__.py index 3ac0e00a..22ce56dc 100644 --- a/pytorch3d/renderer/mesh/__init__.py +++ b/pytorch3d/renderer/mesh/__init__.py @@ -1,10 +1,7 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -from .texturing import ( # isort:skip - interpolate_texture_map, - interpolate_vertex_colors, -) +from .texturing import interpolate_texture_map, interpolate_vertex_colors # isort:skip from .rasterize_meshes import rasterize_meshes from .rasterizer import MeshRasterizer, RasterizationSettings from .renderer import MeshRenderer @@ -20,4 +17,5 @@ from .shader import ( from .shading import gouraud_shading, phong_shading from .utils import interpolate_face_attributes + __all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/pytorch3d/renderer/mesh/rasterize_meshes.py b/pytorch3d/renderer/mesh/rasterize_meshes.py index fbbd7c41..34079a5c 100644 --- a/pytorch3d/renderer/mesh/rasterize_meshes.py +++ b/pytorch3d/renderer/mesh/rasterize_meshes.py @@ -1,12 +1,13 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import numpy as np from typing import Optional -import torch +import numpy as np +import torch from pytorch3d import _C + # TODO make the epsilon user configurable kEpsilon = 1e-30 @@ -172,9 +173,7 @@ class _RasterizeFaceVerts(torch.autograd.Function): return pix_to_face, zbuf, barycentric_coords, dists @staticmethod - def backward( - ctx, grad_pix_to_face, grad_zbuf, grad_barycentric_coords, grad_dists - ): + def backward(ctx, grad_pix_to_face, grad_zbuf, grad_barycentric_coords, grad_dists): grad_face_verts = None grad_mesh_to_face_first_idx = None grad_num_faces_per_mesh = None @@ -243,9 +242,7 @@ def rasterize_meshes_python( face_idxs = torch.full( (N, H, W, K), fill_value=-1, dtype=torch.int64, device=device ) - zbuf = torch.full( - (N, H, W, K), fill_value=-1, dtype=torch.float32, device=device - ) + zbuf = torch.full((N, H, W, K), fill_value=-1, dtype=torch.float32, device=device) bary_coords = torch.full( (N, H, W, K, 3), fill_value=-1, dtype=torch.float32, device=device ) @@ -308,9 +305,7 @@ def rasterize_meshes_python( continue # Compute barycentric coordinates and pixel z distance. - pxy = torch.tensor( - [xf, yf], dtype=torch.float32, device=device - ) + pxy = torch.tensor([xf, yf], dtype=torch.float32, device=device) bary = barycentric_coordinates(pxy, v0[:2], v1[:2], v2[:2]) if perspective_correct: diff --git a/pytorch3d/renderer/mesh/rasterizer.py b/pytorch3d/renderer/mesh/rasterizer.py index 20f169b9..b995bf27 100644 --- a/pytorch3d/renderer/mesh/rasterizer.py +++ b/pytorch3d/renderer/mesh/rasterizer.py @@ -1,6 +1,7 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import NamedTuple, Optional + import torch import torch.nn as nn @@ -123,8 +124,5 @@ class MeshRasterizer(nn.Module): perspective_correct=raster_settings.perspective_correct, ) return Fragments( - pix_to_face=pix_to_face, - zbuf=zbuf, - bary_coords=bary_coords, - dists=dists, + pix_to_face=pix_to_face, zbuf=zbuf, bary_coords=bary_coords, dists=dists ) diff --git a/pytorch3d/renderer/mesh/renderer.py b/pytorch3d/renderer/mesh/renderer.py index 98ee33f2..87835a70 100644 --- a/pytorch3d/renderer/mesh/renderer.py +++ b/pytorch3d/renderer/mesh/renderer.py @@ -7,6 +7,7 @@ import torch.nn as nn from .rasterizer import Fragments from .utils import _clip_barycentric_coordinates, _interpolate_zbuf + # A renderer class should be initialized with a # function for rasterization and a function for shading. # The rasterizer should: @@ -48,16 +49,12 @@ class MeshRenderer(nn.Module): the range for the corresponding face. """ fragments = self.rasterizer(meshes_world, **kwargs) - raster_settings = kwargs.get( - "raster_settings", self.rasterizer.raster_settings - ) + raster_settings = kwargs.get("raster_settings", self.rasterizer.raster_settings) if raster_settings.blur_radius > 0.0: # TODO: potentially move barycentric clipping to the rasterizer # if no downstream functions requires unclipped values. # This will avoid unnecssary re-interpolation of the z buffer. - clipped_bary_coords = _clip_barycentric_coordinates( - fragments.bary_coords - ) + clipped_bary_coords = _clip_barycentric_coordinates(fragments.bary_coords) clipped_zbuf = _interpolate_zbuf( fragments.pix_to_face, clipped_bary_coords, meshes_world ) diff --git a/pytorch3d/renderer/mesh/shader.py b/pytorch3d/renderer/mesh/shader.py index c15178ae..5ac34420 100644 --- a/pytorch3d/renderer/mesh/shader.py +++ b/pytorch3d/renderer/mesh/shader.py @@ -16,6 +16,7 @@ from ..materials import Materials from .shading import flat_shading, gouraud_shading, phong_shading from .texturing import interpolate_texture_map, interpolate_vertex_colors + # A Shader should take as input fragments from the output of rasterization # along with scene params and output images. A shader could perform operations # such as: @@ -41,16 +42,12 @@ class HardPhongShader(nn.Module): def __init__(self, device="cpu", cameras=None, lights=None, materials=None): super().__init__() - self.lights = ( - lights if lights is not None else PointLights(device=device) - ) + self.lights = lights if lights is not None else PointLights(device=device) self.materials = ( materials if materials is not None else Materials(device=device) ) self.cameras = ( - cameras - if cameras is not None - else OpenGLPerspectiveCameras(device=device) + cameras if cameras is not None else OpenGLPerspectiveCameras(device=device) ) def forward(self, fragments, meshes, **kwargs) -> torch.Tensor: @@ -85,28 +82,17 @@ class SoftPhongShader(nn.Module): """ def __init__( - self, - device="cpu", - cameras=None, - lights=None, - materials=None, - blend_params=None, + self, device="cpu", cameras=None, lights=None, materials=None, blend_params=None ): super().__init__() - self.lights = ( - lights if lights is not None else PointLights(device=device) - ) + self.lights = lights if lights is not None else PointLights(device=device) self.materials = ( materials if materials is not None else Materials(device=device) ) self.cameras = ( - cameras - if cameras is not None - else OpenGLPerspectiveCameras(device=device) - ) - self.blend_params = ( - blend_params if blend_params is not None else BlendParams() + cameras if cameras is not None else OpenGLPerspectiveCameras(device=device) ) + self.blend_params = blend_params if blend_params is not None else BlendParams() def forward(self, fragments, meshes, **kwargs) -> torch.Tensor: texels = interpolate_vertex_colors(fragments, meshes) @@ -142,16 +128,12 @@ class HardGouraudShader(nn.Module): def __init__(self, device="cpu", cameras=None, lights=None, materials=None): super().__init__() - self.lights = ( - lights if lights is not None else PointLights(device=device) - ) + self.lights = lights if lights is not None else PointLights(device=device) self.materials = ( materials if materials is not None else Materials(device=device) ) self.cameras = ( - cameras - if cameras is not None - else OpenGLPerspectiveCameras(device=device) + cameras if cameras is not None else OpenGLPerspectiveCameras(device=device) ) def forward(self, fragments, meshes, **kwargs) -> torch.Tensor: @@ -185,28 +167,17 @@ class SoftGouraudShader(nn.Module): """ def __init__( - self, - device="cpu", - cameras=None, - lights=None, - materials=None, - blend_params=None, + self, device="cpu", cameras=None, lights=None, materials=None, blend_params=None ): super().__init__() - self.lights = ( - lights if lights is not None else PointLights(device=device) - ) + self.lights = lights if lights is not None else PointLights(device=device) self.materials = ( materials if materials is not None else Materials(device=device) ) self.cameras = ( - cameras - if cameras is not None - else OpenGLPerspectiveCameras(device=device) - ) - self.blend_params = ( - blend_params if blend_params is not None else BlendParams() + cameras if cameras is not None else OpenGLPerspectiveCameras(device=device) ) + self.blend_params = blend_params if blend_params is not None else BlendParams() def forward(self, fragments, meshes, **kwargs) -> torch.Tensor: cameras = kwargs.get("cameras", self.cameras) @@ -241,28 +212,17 @@ class TexturedSoftPhongShader(nn.Module): """ def __init__( - self, - device="cpu", - cameras=None, - lights=None, - materials=None, - blend_params=None, + self, device="cpu", cameras=None, lights=None, materials=None, blend_params=None ): super().__init__() - self.lights = ( - lights if lights is not None else PointLights(device=device) - ) + self.lights = lights if lights is not None else PointLights(device=device) self.materials = ( materials if materials is not None else Materials(device=device) ) self.cameras = ( - cameras - if cameras is not None - else OpenGLPerspectiveCameras(device=device) - ) - self.blend_params = ( - blend_params if blend_params is not None else BlendParams() + cameras if cameras is not None else OpenGLPerspectiveCameras(device=device) ) + self.blend_params = blend_params if blend_params is not None else BlendParams() def forward(self, fragments, meshes, **kwargs) -> torch.Tensor: texels = interpolate_texture_map(fragments, meshes) @@ -298,16 +258,12 @@ class HardFlatShader(nn.Module): def __init__(self, device="cpu", cameras=None, lights=None, materials=None): super().__init__() - self.lights = ( - lights if lights is not None else PointLights(device=device) - ) + self.lights = lights if lights is not None else PointLights(device=device) self.materials = ( materials if materials is not None else Materials(device=device) ) self.cameras = ( - cameras - if cameras is not None - else OpenGLPerspectiveCameras(device=device) + cameras if cameras is not None else OpenGLPerspectiveCameras(device=device) ) def forward(self, fragments, meshes, **kwargs) -> torch.Tensor: @@ -346,9 +302,7 @@ class SoftSilhouetteShader(nn.Module): def __init__(self, blend_params=None): super().__init__() - self.blend_params = ( - blend_params if blend_params is not None else BlendParams() - ) + self.blend_params = blend_params if blend_params is not None else BlendParams() def forward(self, fragments, meshes, **kwargs) -> torch.Tensor: """" diff --git a/pytorch3d/renderer/mesh/shading.py b/pytorch3d/renderer/mesh/shading.py index f55cdc7c..1fac0b9e 100644 --- a/pytorch3d/renderer/mesh/shading.py +++ b/pytorch3d/renderer/mesh/shading.py @@ -2,6 +2,7 @@ from typing import Tuple + import torch from .texturing import interpolate_face_attributes @@ -82,9 +83,7 @@ def phong_shading( return colors -def gouraud_shading( - meshes, fragments, lights, cameras, materials -) -> torch.Tensor: +def gouraud_shading(meshes, fragments, lights, cameras, materials) -> torch.Tensor: """ Apply per vertex shading. First compute the vertex illumination by applying ambient, diffuse and specular lighting. If vertex color is available, @@ -131,9 +130,7 @@ def gouraud_shading( return colors -def flat_shading( - meshes, fragments, lights, cameras, materials, texels -) -> torch.Tensor: +def flat_shading(meshes, fragments, lights, cameras, materials, texels) -> torch.Tensor: """ Apply per face shading. Use the average face position and the face normals to compute the ambient, diffuse and specular lighting. Apply the ambient diff --git a/pytorch3d/renderer/mesh/texturing.py b/pytorch3d/renderer/mesh/texturing.py index c57ad455..0d1b0564 100644 --- a/pytorch3d/renderer/mesh/texturing.py +++ b/pytorch3d/renderer/mesh/texturing.py @@ -3,7 +3,6 @@ import torch import torch.nn.functional as F - from pytorch3d.structures.textures import Textures from .utils import interpolate_face_attributes @@ -75,9 +74,7 @@ def interpolate_texture_map(fragments, meshes) -> torch.Tensor: # right-bottom pixel of input. pixel_uvs = pixel_uvs * 2.0 - 1.0 - texture_maps = torch.flip( - texture_maps, [2] - ) # flip y axis of the texture map + texture_maps = torch.flip(texture_maps, [2]) # flip y axis of the texture map if texture_maps.device != pixel_uvs.device: texture_maps = texture_maps.to(pixel_uvs.device) texels = F.grid_sample(texture_maps, pixel_uvs, align_corners=False) @@ -107,9 +104,7 @@ def interpolate_vertex_colors(fragments, meshes) -> torch.Tensor: There will be one C dimensional value for each element in fragments.pix_to_face. """ - vertex_textures = meshes.textures.verts_rgb_padded().reshape( - -1, 3 - ) # (V, C) + vertex_textures = meshes.textures.verts_rgb_padded().reshape(-1, 3) # (V, C) vertex_textures = vertex_textures[meshes.verts_padded_to_packed_idx(), :] faces_packed = meshes.faces_packed() faces_textures = vertex_textures[faces_packed] # (F, 3, C) diff --git a/pytorch3d/renderer/mesh/utils.py b/pytorch3d/renderer/mesh/utils.py index c65c9f48..3fc41f67 100644 --- a/pytorch3d/renderer/mesh/utils.py +++ b/pytorch3d/renderer/mesh/utils.py @@ -92,8 +92,6 @@ def _interpolate_zbuf( verts = meshes.verts_packed() faces = meshes.faces_packed() faces_verts_z = verts[faces][..., 2][..., None] # (F, 3, 1) - return interpolate_face_attributes( - pix_to_face, barycentric_coords, faces_verts_z - )[ + return interpolate_face_attributes(pix_to_face, barycentric_coords, faces_verts_z)[ ..., 0 ] # (1, H, W, K) diff --git a/pytorch3d/renderer/points/__init__.py b/pytorch3d/renderer/points/__init__.py index 2e052fe2..b334f4c5 100644 --- a/pytorch3d/renderer/points/__init__.py +++ b/pytorch3d/renderer/points/__init__.py @@ -5,4 +5,5 @@ from .rasterize_points import rasterize_points from .rasterizer import PointsRasterizationSettings, PointsRasterizer from .renderer import PointsRenderer + __all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/pytorch3d/renderer/points/compositor.py b/pytorch3d/renderer/points/compositor.py index fd45c32a..f2e24bc1 100644 --- a/pytorch3d/renderer/points/compositor.py +++ b/pytorch3d/renderer/points/compositor.py @@ -5,6 +5,7 @@ import torch.nn as nn from ..compositing import CompositeParams, alpha_composite, norm_weighted_sum + # A compositor should take as input 3D points and some corresponding information. # Given this information, the compositor can: # - blend colors across the top K vertices at a pixel @@ -19,15 +20,11 @@ class AlphaCompositor(nn.Module): super().__init__() self.composite_params = ( - composite_params - if composite_params is not None - else CompositeParams() + composite_params if composite_params is not None else CompositeParams() ) def forward(self, fragments, alphas, ptclds, **kwargs) -> torch.Tensor: - images = alpha_composite( - fragments, alphas, ptclds, self.composite_params - ) + images = alpha_composite(fragments, alphas, ptclds, self.composite_params) return images @@ -39,13 +36,9 @@ class NormWeightedCompositor(nn.Module): def __init__(self, composite_params=None): super().__init__() self.composite_params = ( - composite_params - if composite_params is not None - else CompositeParams() + composite_params if composite_params is not None else CompositeParams() ) def forward(self, fragments, alphas, ptclds, **kwargs) -> torch.Tensor: - images = norm_weighted_sum( - fragments, alphas, ptclds, self.composite_params - ) + images = norm_weighted_sum(fragments, alphas, ptclds, self.composite_params) return images diff --git a/pytorch3d/renderer/points/rasterize_points.py b/pytorch3d/renderer/points/rasterize_points.py index b35d83c1..4d26cbaf 100644 --- a/pytorch3d/renderer/points/rasterize_points.py +++ b/pytorch3d/renderer/points/rasterize_points.py @@ -1,8 +1,8 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import Optional -import torch +import torch from pytorch3d import _C from pytorch3d.renderer.mesh.rasterize_meshes import pix_to_ndc @@ -155,10 +155,7 @@ class _RasterizePoints(torch.autograd.Function): def rasterize_points_python( - pointclouds, - image_size: int = 256, - radius: float = 0.01, - points_per_pixel: int = 8, + pointclouds, image_size: int = 256, radius: float = 0.01, points_per_pixel: int = 8 ): """ Naive pure PyTorch implementation of pointcloud rasterization. @@ -177,9 +174,7 @@ def rasterize_points_python( point_idxs = torch.full( (N, S, S, K), fill_value=-1, dtype=torch.int32, device=device ) - zbuf = torch.full( - (N, S, S, K), fill_value=-1, dtype=torch.float32, device=device - ) + zbuf = torch.full((N, S, S, K), fill_value=-1, dtype=torch.float32, device=device) pix_dists = torch.full( (N, S, S, K), fill_value=-1, dtype=torch.float32, device=device ) diff --git a/pytorch3d/renderer/points/rasterizer.py b/pytorch3d/renderer/points/rasterizer.py index 7684ec87..2eb39c50 100644 --- a/pytorch3d/renderer/points/rasterizer.py +++ b/pytorch3d/renderer/points/rasterizer.py @@ -3,6 +3,7 @@ from typing import NamedTuple, Optional + import torch import torch.nn as nn diff --git a/pytorch3d/renderer/points/renderer.py b/pytorch3d/renderer/points/renderer.py index 57255658..4dc610a3 100644 --- a/pytorch3d/renderer/points/renderer.py +++ b/pytorch3d/renderer/points/renderer.py @@ -5,6 +5,7 @@ import torch import torch.nn as nn + # A renderer class should be initialized with a # function for rasterization and a function for compositing. # The rasterizer should: diff --git a/pytorch3d/renderer/utils.py b/pytorch3d/renderer/utils.py index 20d3bfd2..3afd6003 100644 --- a/pytorch3d/renderer/utils.py +++ b/pytorch3d/renderer/utils.py @@ -1,9 +1,10 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import numpy as np import warnings from typing import Any, Union + +import numpy as np import torch @@ -45,10 +46,7 @@ class TensorAccessor(object): # Convert the attribute to a tensor if it is not a tensor. if not torch.is_tensor(value): value = torch.tensor( - value, - device=v.device, - dtype=v.dtype, - requires_grad=v.requires_grad, + value, device=v.device, dtype=v.dtype, requires_grad=v.requires_grad ) # Check the shapes match the existing shape and the shape of the index. @@ -253,9 +251,7 @@ class TensorProperties(object): return self -def format_tensor( - input, dtype=torch.float32, device: str = "cpu" -) -> torch.Tensor: +def format_tensor(input, dtype=torch.float32, device: str = "cpu") -> torch.Tensor: """ Helper function for converting a scalar value to a tensor. @@ -276,9 +272,7 @@ def format_tensor( return input -def convert_to_tensors_and_broadcast( - *args, dtype=torch.float32, device: str = "cpu" -): +def convert_to_tensors_and_broadcast(*args, dtype=torch.float32, device: str = "cpu"): """ Helper function to handle parsing an arbitrary number of inputs (*args) which all need to have the same batch dimension. diff --git a/pytorch3d/structures/__init__.py b/pytorch3d/structures/__init__.py index ab9cdff4..00c9cebb 100644 --- a/pytorch3d/structures/__init__.py +++ b/pytorch3d/structures/__init__.py @@ -3,11 +3,7 @@ from .meshes import Meshes, join_meshes from .pointclouds import Pointclouds from .textures import Textures -from .utils import ( - list_to_packed, - list_to_padded, - packed_to_list, - padded_to_list, -) +from .utils import list_to_packed, list_to_padded, packed_to_list, padded_to_list + __all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/pytorch3d/structures/meshes.py b/pytorch3d/structures/meshes.py index 0060deb4..a9b271cd 100644 --- a/pytorch3d/structures/meshes.py +++ b/pytorch3d/structures/meshes.py @@ -1,6 +1,7 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import List + import torch from . import utils as struct_utils @@ -314,14 +315,11 @@ class Meshes(object): if isinstance(verts, list) and isinstance(faces, list): self._verts_list = verts self._faces_list = [ - f[f.gt(-1).all(1)].to(torch.int64) if len(f) > 0 else f - for f in faces + f[f.gt(-1).all(1)].to(torch.int64) if len(f) > 0 else f for f in faces ] self._N = len(self._verts_list) self.device = torch.device("cpu") - self.valid = torch.zeros( - (self._N,), dtype=torch.bool, device=self.device - ) + self.valid = torch.zeros((self._N,), dtype=torch.bool, device=self.device) if self._N > 0: self.device = self._verts_list[0].device self._num_verts_per_mesh = torch.tensor( @@ -348,18 +346,14 @@ class Meshes(object): elif torch.is_tensor(verts) and torch.is_tensor(faces): if verts.size(2) != 3 and faces.size(2) != 3: - raise ValueError( - "Verts and Faces tensors have incorrect dimensions." - ) + raise ValueError("Verts and Faces tensors have incorrect dimensions.") self._verts_padded = verts self._faces_padded = faces.to(torch.int64) self._N = self._verts_padded.shape[0] self._V = self._verts_padded.shape[1] self.device = self._verts_padded.device - self.valid = torch.zeros( - (self._N,), dtype=torch.bool, device=self.device - ) + self.valid = torch.zeros((self._N,), dtype=torch.bool, device=self.device) if self._N > 0: # Check that padded faces - which have value -1 - are at the # end of the tensors @@ -400,12 +394,8 @@ class Meshes(object): # Set the num verts/faces on the textures if present. if self.textures is not None: - self.textures._num_faces_per_mesh = ( - self._num_faces_per_mesh.tolist() - ) - self.textures._num_verts_per_mesh = ( - self._num_verts_per_mesh.tolist() - ) + self.textures._num_faces_per_mesh = self._num_faces_per_mesh.tolist() + self.textures._num_verts_per_mesh = self._num_verts_per_mesh.tolist() def __len__(self): return self._N @@ -665,8 +655,7 @@ class Meshes(object): self._verts_padded_to_packed_idx = torch.cat( [ - torch.arange(v, dtype=torch.int64, device=self.device) - + i * self._V + torch.arange(v, dtype=torch.int64, device=self.device) + i * self._V for (i, v) in enumerate(self._num_verts_per_mesh) ], dim=0, @@ -706,15 +695,10 @@ class Meshes(object): tensor of normals of shape (N, max(V_n), 3). """ if self.isempty(): - return torch.zeros( - (self._N, 0, 3), dtype=torch.float32, device=self.device - ) + return torch.zeros((self._N, 0, 3), dtype=torch.float32, device=self.device) verts_normals_list = self.verts_normals_list() return struct_utils.list_to_padded( - verts_normals_list, - (self._V, 3), - pad_value=0.0, - equisized=self.equisized, + verts_normals_list, (self._V, 3), pad_value=0.0, equisized=self.equisized ) def faces_normals_packed(self): @@ -750,15 +734,10 @@ class Meshes(object): tensor of normals of shape (N, max(F_n), 3). """ if self.isempty(): - return torch.zeros( - (self._N, 0, 3), dtype=torch.float32, device=self.device - ) + return torch.zeros((self._N, 0, 3), dtype=torch.float32, device=self.device) faces_normals_list = self.faces_normals_list() return struct_utils.list_to_padded( - faces_normals_list, - (self._F, 3), - pad_value=0.0, - equisized=self.equisized, + faces_normals_list, (self._F, 3), pad_value=0.0, equisized=self.equisized ) def faces_areas_packed(self): @@ -797,9 +776,7 @@ class Meshes(object): return faces_packed = self.faces_packed() verts_packed = self.verts_packed() - face_areas, face_normals = mesh_face_areas_normals( - verts_packed, faces_packed - ) + face_areas, face_normals = mesh_face_areas_normals(verts_packed, faces_packed) self._faces_areas_packed = face_areas self._faces_normals_packed = face_normals @@ -813,9 +790,7 @@ class Meshes(object): refresh: Set to True to force recomputation of vertex normals. Default: False. """ - if not ( - refresh or any(v is None for v in [self._verts_normals_packed]) - ): + if not (refresh or any(v is None for v in [self._verts_normals_packed])): return if self.isempty(): @@ -867,8 +842,7 @@ class Meshes(object): Computes the padded version of meshes from verts_list and faces_list. """ if not ( - refresh - or any(v is None for v in [self._verts_padded, self._faces_padded]) + refresh or any(v is None for v in [self._verts_padded, self._faces_padded]) ): return @@ -887,16 +861,10 @@ class Meshes(object): ) else: self._faces_padded = struct_utils.list_to_padded( - faces_list, - (self._F, 3), - pad_value=-1.0, - equisized=self.equisized, + faces_list, (self._F, 3), pad_value=-1.0, equisized=self.equisized ) self._verts_padded = struct_utils.list_to_padded( - verts_list, - (self._V, 3), - pad_value=0.0, - equisized=self.equisized, + verts_list, (self._V, 3), pad_value=0.0, equisized=self.equisized ) # TODO(nikhilar) Improve performance of _compute_packed. @@ -1055,9 +1023,7 @@ class Meshes(object): face_to_edge = inverse_idxs[face_to_edge] self._faces_packed_to_edges_packed = face_to_edge - num_edges_per_mesh = torch.zeros( - self._N, dtype=torch.int32, device=self.device - ) + num_edges_per_mesh = torch.zeros(self._N, dtype=torch.int32, device=self.device) ones = torch.ones(1, dtype=torch.int32, device=self.device).expand( self._edges_packed_to_mesh_idx.shape ) diff --git a/pytorch3d/structures/pointclouds.py b/pytorch3d/structures/pointclouds.py index bf64c6c9..69566ac5 100644 --- a/pytorch3d/structures/pointclouds.py +++ b/pytorch3d/structures/pointclouds.py @@ -176,17 +176,13 @@ class Pointclouds(object): self._points_list = points self._N = len(self._points_list) self.device = torch.device("cpu") - self.valid = torch.zeros( - (self._N,), dtype=torch.bool, device=self.device - ) + self.valid = torch.zeros((self._N,), dtype=torch.bool, device=self.device) self._num_points_per_cloud = [] if self._N > 0: for p in self._points_list: if len(p) > 0 and (p.dim() != 2 or p.shape[1] != 3): - raise ValueError( - "Clouds in list must be of shape Px3 or empty" - ) + raise ValueError("Clouds in list must be of shape Px3 or empty") self.device = self._points_list[0].device num_points_per_cloud = torch.tensor( @@ -210,9 +206,7 @@ class Pointclouds(object): self._N = self._points_padded.shape[0] self._P = self._points_padded.shape[1] self.device = self._points_padded.device - self.valid = torch.ones( - (self._N,), dtype=torch.bool, device=self.device - ) + self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device) self._num_points_per_cloud = torch.tensor( [self._P] * self._N, device=self.device ) @@ -260,9 +254,7 @@ class Pointclouds(object): if isinstance(aux_input, list): if len(aux_input) != self._N: - raise ValueError( - "Points and auxiliary input must be the same length." - ) + raise ValueError("Points and auxiliary input must be the same length.") for p, d in zip(self._num_points_per_cloud, aux_input): if p != d.shape[0]: raise ValueError( @@ -282,9 +274,7 @@ class Pointclouds(object): return aux_input, None, aux_input_C elif torch.is_tensor(aux_input): if aux_input.dim() != 3: - raise ValueError( - "Auxiliary input tensor has incorrect dimensions." - ) + raise ValueError("Auxiliary input tensor has incorrect dimensions.") if self._N != aux_input.shape[0]: raise ValueError("Points and inputs must be the same length.") if self._P != aux_input.shape[1]: @@ -531,8 +521,7 @@ class Pointclouds(object): else: self._padded_to_packed_idx = torch.cat( [ - torch.arange(v, dtype=torch.int64, device=self.device) - + i * self._P + torch.arange(v, dtype=torch.int64, device=self.device) + i * self._P for (i, v) in enumerate(self._num_points_per_cloud) ], dim=0, @@ -551,9 +540,7 @@ class Pointclouds(object): self._normals_padded, self._features_padded = None, None if self.isempty(): - self._points_padded = torch.zeros( - (self._N, 0, 3), device=self.device - ) + self._points_padded = torch.zeros((self._N, 0, 3), device=self.device) else: self._points_padded = struct_utils.list_to_padded( self.points_list(), @@ -621,9 +608,7 @@ class Pointclouds(object): points_list_to_packed = struct_utils.list_to_packed(points_list) self._points_packed = points_list_to_packed[0] - if not torch.allclose( - self._num_points_per_cloud, points_list_to_packed[1] - ): + if not torch.allclose(self._num_points_per_cloud, points_list_to_packed[1]): raise ValueError("Inconsistent list to packed conversion") self._cloud_to_packed_first_idx = points_list_to_packed[2] self._packed_to_cloud_idx = points_list_to_packed[3] @@ -696,13 +681,9 @@ class Pointclouds(object): if other._N > 0: other._points_list = [v.to(device) for v in other.points_list()] if other._normals_list is not None: - other._normals_list = [ - n.to(device) for n in other.normals_list() - ] + other._normals_list = [n.to(device) for n in other.normals_list()] if other._features_list is not None: - other._features_list = [ - f.to(device) for f in other.features_list() - ] + other._features_list = [f.to(device) for f in other.features_list()] for k in self._INTERNAL_TENSORS: v = getattr(self, k) if torch.is_tensor(v): @@ -892,16 +873,11 @@ class Pointclouds(object): for features in self.features_list(): new_features_list.extend(features.clone() for _ in range(N)) return Pointclouds( - points=new_points_list, - normals=new_normals_list, - features=new_features_list, + points=new_points_list, normals=new_normals_list, features=new_features_list ) def update_padded( - self, - new_points_padded, - new_normals_padded=None, - new_features_padded=None, + self, new_points_padded, new_normals_padded=None, new_features_padded=None ): """ Returns a Pointcloud structure with updated padded tensors and copies of @@ -920,13 +896,9 @@ class Pointclouds(object): def check_shapes(x, size): if x.shape[0] != size[0]: - raise ValueError( - "new values must have the same batch dimension." - ) + raise ValueError("new values must have the same batch dimension.") if x.shape[1] != size[1]: - raise ValueError( - "new values must have the same number of points." - ) + raise ValueError("new values must have the same number of points.") if size[2] is not None: if x.shape[2] != size[2]: raise ValueError( diff --git a/pytorch3d/structures/textures.py b/pytorch3d/structures/textures.py index 94102ce3..fecece4b 100644 --- a/pytorch3d/structures/textures.py +++ b/pytorch3d/structures/textures.py @@ -1,6 +1,7 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import List, Optional, Union + import torch import torchvision.transforms as T @@ -233,11 +234,7 @@ class Textures(object): if all( v is not None - for v in [ - self._faces_uvs_padded, - self._verts_uvs_padded, - self._maps_padded, - ] + for v in [self._faces_uvs_padded, self._verts_uvs_padded, self._maps_padded] ): new_verts_uvs = _extend_tensor(self._verts_uvs_padded, N) new_faces_uvs = _extend_tensor(self._faces_uvs_padded, N) diff --git a/pytorch3d/structures/utils.py b/pytorch3d/structures/utils.py index dcaadafe..c4682b02 100644 --- a/pytorch3d/structures/utils.py +++ b/pytorch3d/structures/utils.py @@ -1,6 +1,7 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import List, Union + import torch @@ -38,9 +39,7 @@ def list_to_padded( pad_dim1 = max(y.shape[1] for y in x if len(y) > 0) else: if len(pad_size) != 2: - raise ValueError( - "Pad size must contain target size for 1st and 2nd dim" - ) + raise ValueError("Pad size must contain target size for 1st and 2nd dim") pad_dim0, pad_dim1 = pad_size N = len(x) @@ -55,9 +54,7 @@ def list_to_padded( return x_padded -def padded_to_list( - x: torch.Tensor, split_size: Union[list, tuple, None] = None -): +def padded_to_list(x: torch.Tensor, split_size: Union[list, tuple, None] = None): r""" Transforms a padded tensor of shape (N, M, K) into a list of N tensors of shape (Mi, Ki) where (Mi, Ki) is specified in split_size(i), or of shape @@ -81,9 +78,7 @@ def padded_to_list( N = len(split_size) if x.shape[0] != N: - raise ValueError( - "Split size must be of same length as inputs first dimension" - ) + raise ValueError("Split size must be of same length as inputs first dimension") for i in range(N): if isinstance(split_size[i], int): @@ -119,9 +114,7 @@ def list_to_packed(x: List[torch.Tensor]): """ N = len(x) num_items = torch.zeros(N, dtype=torch.int64, device=x[0].device) - item_packed_first_idx = torch.zeros( - N, dtype=torch.int64, device=x[0].device - ) + item_packed_first_idx = torch.zeros(N, dtype=torch.int64, device=x[0].device) item_packed_to_list_idx = [] cur = 0 for i, y in enumerate(x): @@ -187,9 +180,7 @@ def padded_to_packed( N, M, D = x.shape if split_size is not None and pad_value is not None: - raise ValueError( - "Only one of split_size or pad_value should be provided." - ) + raise ValueError("Only one of split_size or pad_value should be provided.") x_packed = x.reshape(-1, D) # flatten padded @@ -205,9 +196,7 @@ def padded_to_packed( # Convert to packed using split sizes N = len(split_size) if x.shape[0] != N: - raise ValueError( - "Split size must be of same length as inputs first dimension" - ) + raise ValueError("Split size must be of same length as inputs first dimension") if not all(isinstance(i, int) for i in split_size): raise ValueError( diff --git a/pytorch3d/transforms/__init__.py b/pytorch3d/transforms/__init__.py index 3080a1bc..b30b7fa1 100644 --- a/pytorch3d/transforms/__init__.py +++ b/pytorch3d/transforms/__init__.py @@ -22,4 +22,5 @@ from .so3 import ( ) from .transform3d import Rotate, RotateAxisAngle, Scale, Transform3d, Translate + __all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/pytorch3d/transforms/rotation_conversions.py b/pytorch3d/transforms/rotation_conversions.py index e1678638..a30e3759 100644 --- a/pytorch3d/transforms/rotation_conversions.py +++ b/pytorch3d/transforms/rotation_conversions.py @@ -2,6 +2,7 @@ import functools from typing import Optional + import torch @@ -155,9 +156,7 @@ def euler_angles_to_matrix(euler_angles, convention: str): for letter in convention: if letter not in ("X", "Y", "Z"): raise ValueError(f"Invalid letter {letter} in convention string.") - matrices = map( - _axis_angle_rotation, convention, torch.unbind(euler_angles, -1) - ) + matrices = map(_axis_angle_rotation, convention, torch.unbind(euler_angles, -1)) return functools.reduce(torch.matmul, matrices) @@ -246,10 +245,7 @@ def matrix_to_euler_angles(matrix, convention: str): def random_quaternions( - n: int, - dtype: Optional[torch.dtype] = None, - device=None, - requires_grad=False, + n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False ): """ Generate random quaternions representing rotations, @@ -266,19 +262,14 @@ def random_quaternions( Returns: Quaternions as tensor of shape (N, 4). """ - o = torch.randn( - (n, 4), dtype=dtype, device=device, requires_grad=requires_grad - ) + o = torch.randn((n, 4), dtype=dtype, device=device, requires_grad=requires_grad) s = (o * o).sum(1) o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None] return o def random_rotations( - n: int, - dtype: Optional[torch.dtype] = None, - device=None, - requires_grad=False, + n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False ): """ Generate random rotations as 3x3 rotation matrices. diff --git a/pytorch3d/transforms/so3.py b/pytorch3d/transforms/so3.py index ec53cf9a..18c63b78 100644 --- a/pytorch3d/transforms/so3.py +++ b/pytorch3d/transforms/so3.py @@ -3,6 +3,7 @@ import torch + HAT_INV_SKEW_SYMMETRIC_TOL = 1e-5 @@ -65,9 +66,7 @@ def so3_rotation_angle(R, eps: float = 1e-4, cos_angle: bool = False): rot_trace = R[:, 0, 0] + R[:, 1, 1] + R[:, 2, 2] if ((rot_trace < -1.0 - eps) + (rot_trace > 3.0 + eps)).any(): - raise ValueError( - "A matrix has trace outside valid range [-1-eps,3+eps]." - ) + raise ValueError("A matrix has trace outside valid range [-1-eps,3+eps].") # clamp to valid range rot_trace = torch.clamp(rot_trace, -1.0, 3.0) diff --git a/pytorch3d/transforms/transform3d.py b/pytorch3d/transforms/transform3d.py index 53ff5e40..e3f58050 100644 --- a/pytorch3d/transforms/transform3d.py +++ b/pytorch3d/transforms/transform3d.py @@ -3,6 +3,7 @@ import math import warnings from typing import Optional + import torch from .rotation_conversions import _axis_angle_rotation @@ -230,9 +231,7 @@ class Transform3d: # the transformations with get_matrix(), this correctly # right-multiplies by the inverse of self._matrix # at the end of the composition. - tinv._transforms = [ - t.inverse() for t in reversed(self._transforms) - ] + tinv._transforms = [t.inverse() for t in reversed(self._transforms)] last = Transform3d(device=self.device) last._matrix = i_matrix tinv._transforms.append(last) @@ -334,9 +333,7 @@ class Transform3d: return self.compose(Scale(device=self.device, *args, **kwargs)) def rotate_axis_angle(self, *args, **kwargs): - return self.compose( - RotateAxisAngle(device=self.device, *args, **kwargs) - ) + return self.compose(RotateAxisAngle(device=self.device, *args, **kwargs)) def clone(self): """ @@ -388,9 +385,7 @@ class Transform3d: class Translate(Transform3d): - def __init__( - self, x, y=None, z=None, dtype=torch.float32, device: str = "cpu" - ): + def __init__(self, x, y=None, z=None, dtype=torch.float32, device: str = "cpu"): """ Create a new Transform3d representing 3D translations. @@ -424,9 +419,7 @@ class Translate(Transform3d): class Scale(Transform3d): - def __init__( - self, x, y=None, z=None, dtype=torch.float32, device: str = "cpu" - ): + def __init__(self, x, y=None, z=None, dtype=torch.float32, device: str = "cpu"): """ A Transform3d representing a scaling operation, with different scale factors along each coordinate axis. @@ -444,9 +437,7 @@ class Scale(Transform3d): - 1D torch tensor """ super().__init__(device=device) - xyz = _handle_input( - x, y, z, dtype, device, "scale", allow_singleton=True - ) + xyz = _handle_input(x, y, z, dtype, device, "scale", allow_singleton=True) N = xyz.shape[0] # TODO: Can we do this all in one go somehow? @@ -469,11 +460,7 @@ class Scale(Transform3d): class Rotate(Transform3d): def __init__( - self, - R, - dtype=torch.float32, - device: str = "cpu", - orthogonal_tol: float = 1e-5, + self, R, dtype=torch.float32, device: str = "cpu", orthogonal_tol: float = 1e-5 ): """ Create a new Transform3d representing 3D rotation using a rotation @@ -562,9 +549,7 @@ def _handle_coord(c, dtype, device): return c -def _handle_input( - x, y, z, dtype, device, name: str, allow_singleton: bool = False -): +def _handle_input(x, y, z, dtype, device, name: str, allow_singleton: bool = False): """ Helper function to handle parsing logic for building transforms. The output is always a tensor of shape (N, 3), but there are several types of allowed diff --git a/pytorch3d/utils/__init__.py b/pytorch3d/utils/__init__.py index bcf5f27f..a14edd62 100644 --- a/pytorch3d/utils/__init__.py +++ b/pytorch3d/utils/__init__.py @@ -3,4 +3,5 @@ from .ico_sphere import ico_sphere from .torus import torus + __all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/pytorch3d/utils/ico_sphere.py b/pytorch3d/utils/ico_sphere.py index 7e652f2e..64f108d3 100644 --- a/pytorch3d/utils/ico_sphere.py +++ b/pytorch3d/utils/ico_sphere.py @@ -2,10 +2,10 @@ import torch - from pytorch3d.ops.subdivide_meshes import SubdivideMeshes from pytorch3d.structures.meshes import Meshes + # Vertex coordinates for a level 0 ico-sphere. _ico_verts0 = [ [-0.5257, 0.8507, 0.0000], diff --git a/pytorch3d/utils/torus.py b/pytorch3d/utils/torus.py index 1675a75f..b1cbba36 100644 --- a/pytorch3d/utils/torus.py +++ b/pytorch3d/utils/torus.py @@ -3,8 +3,8 @@ from itertools import tee from math import cos, pi, sin from typing import Iterator, Optional, Tuple -import torch +import torch from pytorch3d.structures.meshes import Meshes @@ -16,11 +16,7 @@ def _make_pair_range(N: int) -> Iterator[Tuple[int, int]]: def torus( - r: float, - R: float, - sides: int, - rings: int, - device: Optional[torch.device] = None, + r: float, R: float, sides: int, rings: int, device: Optional[torch.device] = None ) -> Meshes: """ Create vertices and faces for a torus. diff --git a/scripts/parse_tutorials.py b/scripts/parse_tutorials.py index 638ba890..6b4a9c05 100755 --- a/scripts/parse_tutorials.py +++ b/scripts/parse_tutorials.py @@ -4,10 +4,12 @@ import argparse import json import os + import nbformat from bs4 import BeautifulSoup from nbconvert import HTMLExporter, ScriptExporter + TEMPLATE = """const CWD = process.cwd(); const React = require('react'); @@ -41,9 +43,7 @@ def gen_tutorials(repo_dir: str) -> None: Also create ipynb and py versions of tutorial in Docusaurus site for download. """ - with open( - os.path.join(repo_dir, "website", "tutorials.json"), "r" - ) as infile: + with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile: tutorial_config = json.loads(infile.read()) tutorial_ids = {x["id"] for v in tutorial_config.values() for x in v} @@ -107,10 +107,7 @@ if __name__ == "__main__": description="Generate JS, HTML, ipynb, and py files for tutorials." ) parser.add_argument( - "--repo_dir", - metavar="path", - required=True, - help="PyTorch3D repo directory.", + "--repo_dir", metavar="path", required=True, help="PyTorch3D repo directory." ) args = parser.parse_args() gen_tutorials(args.repo_dir) diff --git a/setup.py b/setup.py index 78a5cdc4..bf25bede 100755 --- a/setup.py +++ b/setup.py @@ -3,8 +3,9 @@ import glob import os -from setuptools import find_packages, setup + import torch +from setuptools import find_packages, setup from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension diff --git a/tests/bm_blending.py b/tests/bm_blending.py index 561de111..cdde1975 100644 --- a/tests/bm_blending.py +++ b/tests/bm_blending.py @@ -2,8 +2,8 @@ from itertools import product -from fvcore.common.benchmark import benchmark +from fvcore.common.benchmark import benchmark from test_blending import TestBlending @@ -18,12 +18,7 @@ def bm_blending() -> None: for case in test_cases: n, s, k, d = case kwargs_list.append( - { - "num_meshes": n, - "image_size": s, - "faces_per_pixel": k, - "device": d, - } + {"num_meshes": n, "image_size": s, "faces_per_pixel": k, "device": d} ) benchmark( diff --git a/tests/bm_chamfer.py b/tests/bm_chamfer.py index 9ea04e65..500019d9 100644 --- a/tests/bm_chamfer.py +++ b/tests/bm_chamfer.py @@ -3,7 +3,6 @@ import torch from fvcore.common.benchmark import benchmark - from test_chamfer import TestChamfer @@ -25,9 +24,4 @@ def bm_chamfer() -> None: {"batch_size": 1, "P1": 1000, "P2": 3000, "return_normals": False}, {"batch_size": 1, "P1": 1000, "P2": 30000, "return_normals": True}, ] - benchmark( - TestChamfer.chamfer_with_init, - "CHAMFER", - kwargs_list, - warmup_iters=1, - ) + benchmark(TestChamfer.chamfer_with_init, "CHAMFER", kwargs_list, warmup_iters=1) diff --git a/tests/bm_cubify.py b/tests/bm_cubify.py index beff3eb6..239b1e69 100644 --- a/tests/bm_cubify.py +++ b/tests/bm_cubify.py @@ -1,7 +1,6 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from fvcore.common.benchmark import benchmark - from test_cubify import TestCubify @@ -11,6 +10,4 @@ def bm_cubify() -> None: {"batch_size": 64, "V": 16}, {"batch_size": 16, "V": 32}, ] - benchmark( - TestCubify.cubify_with_init, "CUBIFY", kwargs_list, warmup_iters=1 - ) + benchmark(TestCubify.cubify_with_init, "CUBIFY", kwargs_list, warmup_iters=1) diff --git a/tests/bm_face_areas_normals.py b/tests/bm_face_areas_normals.py index f99ef7e4..0a01441f 100644 --- a/tests/bm_face_areas_normals.py +++ b/tests/bm_face_areas_normals.py @@ -2,9 +2,9 @@ from itertools import product + import torch from fvcore.common.benchmark import benchmark - from test_face_areas_normals import TestFaceAreasNormals diff --git a/tests/bm_graph_conv.py b/tests/bm_graph_conv.py index c19ff8e1..404c44a4 100644 --- a/tests/bm_graph_conv.py +++ b/tests/bm_graph_conv.py @@ -2,9 +2,9 @@ from itertools import product + import torch from fvcore.common.benchmark import benchmark - from test_graph_conv import TestGraphConv diff --git a/tests/bm_knn.py b/tests/bm_knn.py index 1bf935d2..d0041fcb 100644 --- a/tests/bm_knn.py +++ b/tests/bm_knn.py @@ -1,9 +1,9 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from itertools import product + import torch from fvcore.common.benchmark import benchmark - from pytorch3d import _C from pytorch3d.ops.knn import _knn_points_idx_naive @@ -32,9 +32,7 @@ def benchmark_knn_cuda_versions() -> None: knn_kwargs.append({"N": N, "D": D, "P": P, "K": K, "v": version}) for N, P, D in product(Ns, Ps, Ds): nn_kwargs.append({"N": N, "D": D, "P": P}) - benchmark( - knn_cuda_with_init, "KNN_CUDA_VERSIONS", knn_kwargs, warmup_iters=1 - ) + benchmark(knn_cuda_with_init, "KNN_CUDA_VERSIONS", knn_kwargs, warmup_iters=1) benchmark(nn_cuda_with_init, "NN_CUDA", nn_kwargs, warmup_iters=1) @@ -50,10 +48,7 @@ def benchmark_knn_cuda_vs_naive() -> None: if P <= 4096: naive_kwargs.append({"N": N, "D": D, "P": P, "K": K}) benchmark( - knn_python_cuda_with_init, - "KNN_CUDA_PYTHON", - naive_kwargs, - warmup_iters=1, + knn_python_cuda_with_init, "KNN_CUDA_PYTHON", naive_kwargs, warmup_iters=1 ) benchmark(knn_cuda_with_init, "KNN_CUDA", knn_kwargs, warmup_iters=1) @@ -68,9 +63,7 @@ def benchmark_knn_cpu() -> None: knn_kwargs.append({"N": N, "D": D, "P": P, "K": K}) for N, P, D in product(Ns, Ps, Ds): nn_kwargs.append({"N": N, "D": D, "P": P}) - benchmark( - knn_python_cpu_with_init, "KNN_CPU_PYTHON", knn_kwargs, warmup_iters=1 - ) + benchmark(knn_python_cpu_with_init, "KNN_CPU_PYTHON", knn_kwargs, warmup_iters=1) benchmark(knn_cpu_with_init, "KNN_CPU_CPP", knn_kwargs, warmup_iters=1) benchmark(nn_cpu_with_init, "NN_CPU_CPP", nn_kwargs, warmup_iters=1) diff --git a/tests/bm_main.py b/tests/bm_main.py index 65c9bfae..f178ef98 100755 --- a/tests/bm_main.py +++ b/tests/bm_main.py @@ -5,6 +5,7 @@ import glob import importlib from os.path import basename, dirname, isfile, join, sys + if __name__ == "__main__": # pyre-ignore[16] if len(sys.argv) > 1: @@ -25,7 +26,5 @@ if __name__ == "__main__": for attr in dir(module): # Run all the functions with names "bm_*" in the module. if attr.startswith("bm_"): - print( - "Running benchmarks for " + module_name + "/" + attr + "..." - ) + print("Running benchmarks for " + module_name + "/" + attr + "...") getattr(module, attr)() diff --git a/tests/bm_mesh_edge_loss.py b/tests/bm_mesh_edge_loss.py index 487ef503..b7a9566b 100644 --- a/tests/bm_mesh_edge_loss.py +++ b/tests/bm_mesh_edge_loss.py @@ -2,8 +2,8 @@ from itertools import product -from fvcore.common.benchmark import benchmark +from fvcore.common.benchmark import benchmark from test_mesh_edge_loss import TestMeshEdgeLoss @@ -17,8 +17,5 @@ def bm_mesh_edge_loss() -> None: n, v, f = case kwargs_list.append({"num_meshes": n, "max_v": v, "max_f": f}) benchmark( - TestMeshEdgeLoss.mesh_edge_loss, - "MESH_EDGE_LOSS", - kwargs_list, - warmup_iters=1, + TestMeshEdgeLoss.mesh_edge_loss, "MESH_EDGE_LOSS", kwargs_list, warmup_iters=1 ) diff --git a/tests/bm_mesh_io.py b/tests/bm_mesh_io.py index 33b0861d..a4f9b5ab 100644 --- a/tests/bm_mesh_io.py +++ b/tests/bm_mesh_io.py @@ -1,7 +1,6 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from fvcore.common.benchmark import benchmark - from test_obj_io import TestMeshObjIO from test_ply_io import TestMeshPlyIO diff --git a/tests/bm_mesh_laplacian_smoothing.py b/tests/bm_mesh_laplacian_smoothing.py index 4f332c75..44eeec2a 100644 --- a/tests/bm_mesh_laplacian_smoothing.py +++ b/tests/bm_mesh_laplacian_smoothing.py @@ -2,9 +2,9 @@ from itertools import product + import torch from fvcore.common.benchmark import benchmark - from test_mesh_laplacian_smoothing import TestLaplacianSmoothing diff --git a/tests/bm_mesh_normal_consistency.py b/tests/bm_mesh_normal_consistency.py index d77e1466..2d69c76d 100644 --- a/tests/bm_mesh_normal_consistency.py +++ b/tests/bm_mesh_normal_consistency.py @@ -2,9 +2,9 @@ from itertools import product + import torch from fvcore.common.benchmark import benchmark - from test_mesh_normal_consistency import TestMeshNormalConsistency diff --git a/tests/bm_meshes.py b/tests/bm_meshes.py index 5a0bc5c5..66c4178e 100644 --- a/tests/bm_meshes.py +++ b/tests/bm_meshes.py @@ -2,9 +2,9 @@ from itertools import product + import torch from fvcore.common.benchmark import benchmark - from test_meshes import TestMeshes @@ -20,9 +20,7 @@ def bm_compute_packed_padded_meshes() -> None: test_cases = product(num_meshes, max_v, max_f, devices) for case in test_cases: n, v, f, d = case - kwargs_list.append( - {"num_meshes": n, "max_v": v, "max_f": f, "device": d} - ) + kwargs_list.append({"num_meshes": n, "max_v": v, "max_f": f, "device": d}) benchmark( TestMeshes.compute_packed_with_init, "COMPUTE_PACKED", diff --git a/tests/bm_nearest_neighbor_points.py b/tests/bm_nearest_neighbor_points.py index 3b9cc50b..f98ae17e 100644 --- a/tests/bm_nearest_neighbor_points.py +++ b/tests/bm_nearest_neighbor_points.py @@ -1,9 +1,9 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from itertools import product + import torch from fvcore.common.benchmark import benchmark - from test_nearest_neighbor_points import TestNearestNeighborPoints diff --git a/tests/bm_packed_to_padded.py b/tests/bm_packed_to_padded.py index 787f0e63..ff597a21 100644 --- a/tests/bm_packed_to_padded.py +++ b/tests/bm_packed_to_padded.py @@ -2,9 +2,9 @@ from itertools import product + import torch from fvcore.common.benchmark import benchmark - from test_packed_to_padded import TestPackedToPadded @@ -23,13 +23,7 @@ def bm_packed_to_padded() -> None: for case in test_cases: n, v, f, d, b = case kwargs_list.append( - { - "num_meshes": n, - "num_verts": v, - "num_faces": f, - "num_d": d, - "device": b, - } + {"num_meshes": n, "num_verts": v, "num_faces": f, "num_d": d, "device": b} ) benchmark( TestPackedToPadded.packed_to_padded_with_init, diff --git a/tests/bm_pointclouds.py b/tests/bm_pointclouds.py index e3dbd83b..a214ce2f 100644 --- a/tests/bm_pointclouds.py +++ b/tests/bm_pointclouds.py @@ -2,8 +2,8 @@ from itertools import product -from fvcore.common.benchmark import benchmark +from fvcore.common.benchmark import benchmark from test_pointclouds import TestPointclouds diff --git a/tests/bm_rasterize_meshes.py b/tests/bm_rasterize_meshes.py index e033b116..22acae7e 100644 --- a/tests/bm_rasterize_meshes.py +++ b/tests/bm_rasterize_meshes.py @@ -2,11 +2,12 @@ from itertools import product + import torch from fvcore.common.benchmark import benchmark - from test_rasterize_meshes import TestRasterizeMeshes + # ico levels: # 0: (12 verts, 20 faces) # 1: (42 verts, 80 faces) @@ -39,12 +40,7 @@ def bm_rasterize_meshes() -> None: for case in test_cases: n, ic, im, b = case kwargs_list.append( - { - "num_meshes": n, - "ico_level": ic, - "image_size": im, - "blur_radius": b, - } + {"num_meshes": n, "ico_level": ic, "image_size": im, "blur_radius": b} ) benchmark( TestRasterizeMeshes.rasterize_meshes_cpu_with_init, @@ -63,9 +59,7 @@ def bm_rasterize_meshes() -> None: test_cases = product(num_meshes, ico_level, image_size, blur, bin_size) # only keep cases where bin_size == 0 or image_size / bin_size < 16 test_cases = [ - elem - for elem in test_cases - if (elem[-1] == 0 or elem[-3] / elem[-1] < 16) + elem for elem in test_cases if (elem[-1] == 0 or elem[-3] / elem[-1] < 16) ] for case in test_cases: n, ic, im, b, bn = case diff --git a/tests/bm_rasterize_points.py b/tests/bm_rasterize_points.py index 3aaf77c3..eb32b22c 100644 --- a/tests/bm_rasterize_points.py +++ b/tests/bm_rasterize_points.py @@ -3,7 +3,6 @@ import torch from fvcore.common.benchmark import benchmark - from pytorch3d.renderer.points.rasterize_points import ( rasterize_points, rasterize_points_python, @@ -40,9 +39,7 @@ def bm_python_vs_cpu() -> None: {"N": 1, "P": 32, "img_size": 32, "radius": 0.1, "pts_per_pxl": 3}, {"N": 2, "P": 32, "img_size": 32, "radius": 0.1, "pts_per_pxl": 3}, ] - benchmark( - _bm_python_with_init, "RASTERIZE_PYTHON", kwargs_list, warmup_iters=1 - ) + benchmark(_bm_python_with_init, "RASTERIZE_PYTHON", kwargs_list, warmup_iters=1) benchmark(_bm_cpu_with_init, "RASTERIZE_CPU", kwargs_list, warmup_iters=1) kwargs_list = [ {"N": 2, "P": 32, "img_size": 32, "radius": 0.1, "pts_per_pxl": 3}, diff --git a/tests/bm_sample_points_from_meshes.py b/tests/bm_sample_points_from_meshes.py index 7201beea..0b8dbadd 100644 --- a/tests/bm_sample_points_from_meshes.py +++ b/tests/bm_sample_points_from_meshes.py @@ -2,9 +2,9 @@ from itertools import product + import torch from fvcore.common.benchmark import benchmark - from test_sample_points_from_meshes import TestSamplePoints diff --git a/tests/bm_so3.py b/tests/bm_so3.py index d481ef06..9d7ebaa0 100644 --- a/tests/bm_so3.py +++ b/tests/bm_so3.py @@ -1,7 +1,6 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from fvcore.common.benchmark import benchmark - from test_so3 import TestSO3 diff --git a/tests/bm_subdivide_meshes.py b/tests/bm_subdivide_meshes.py index ee16a1f4..c4e5b2bc 100644 --- a/tests/bm_subdivide_meshes.py +++ b/tests/bm_subdivide_meshes.py @@ -2,8 +2,8 @@ from itertools import product -from fvcore.common.benchmark import benchmark +from fvcore.common.benchmark import benchmark from test_subdivide_meshes import TestSubdivideMeshes diff --git a/tests/bm_vert_align.py b/tests/bm_vert_align.py index 574a0273..9b695428 100644 --- a/tests/bm_vert_align.py +++ b/tests/bm_vert_align.py @@ -2,9 +2,9 @@ from itertools import product + import torch from fvcore.common.benchmark import benchmark - from test_vert_align import TestVertAlign @@ -25,8 +25,5 @@ def bm_vert_align() -> None: ) benchmark( - TestVertAlign.vert_align_with_init, - "VERT_ALIGN", - kwargs_list, - warmup_iters=1, + TestVertAlign.vert_align_with_init, "VERT_ALIGN", kwargs_list, warmup_iters=1 ) diff --git a/tests/common_testing.py b/tests/common_testing.py index 605ad314..4549aae1 100644 --- a/tests/common_testing.py +++ b/tests/common_testing.py @@ -1,8 +1,9 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import numpy as np import unittest + +import numpy as np import torch @@ -11,17 +12,13 @@ class TestCaseMixin(unittest.TestCase): """ Verify that tensor1 and tensor2 have their data in distinct locations. """ - self.assertNotEqual( - tensor1.storage().data_ptr(), tensor2.storage().data_ptr() - ) + self.assertNotEqual(tensor1.storage().data_ptr(), tensor2.storage().data_ptr()) def assertNotSeparate(self, tensor1, tensor2) -> None: """ Verify that tensor1 and tensor2 have their data in the same locations. """ - self.assertEqual( - tensor1.storage().data_ptr(), tensor2.storage().data_ptr() - ) + self.assertEqual(tensor1.storage().data_ptr(), tensor2.storage().data_ptr()) def assertAllSeparate(self, tensor_list) -> None: """ @@ -57,7 +54,5 @@ class TestCaseMixin(unittest.TestCase): input, other, rtol=rtol, atol=atol, equal_nan=equal_nan ) else: - close = np.allclose( - input, other, rtol=rtol, atol=atol, equal_nan=equal_nan - ) + close = np.allclose(input, other, rtol=rtol, atol=atol, equal_nan=equal_nan) self.assertTrue(close) diff --git a/tests/test_blending.py b/tests/test_blending.py index 16d9939c..6e5aa62c 100644 --- a/tests/test_blending.py +++ b/tests/test_blending.py @@ -1,9 +1,9 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import numpy as np import unittest -import torch +import numpy as np +import torch from pytorch3d.renderer.blending import ( BlendParams, hard_rgb_blend, @@ -43,9 +43,7 @@ def sigmoid_blend_naive_loop(colors, fragments, blend_params): return pixel_colors -def sigmoid_blend_naive_loop_backward( - grad_images, images, fragments, blend_params -): +def sigmoid_blend_naive_loop_backward(grad_images, images, fragments, blend_params): pix_to_face = fragments.pix_to_face dists = fragments.dists sigma = blend_params.sigma @@ -135,14 +133,7 @@ class TestBlending(unittest.TestCase): torch.manual_seed(42) def _compare_impls( - self, - fn1, - fn2, - args1, - args2, - grad_var1=None, - grad_var2=None, - compare_grads=True, + self, fn1, fn2, args1, args2, grad_var1=None, grad_var2=None, compare_grads=True ): out1 = fn1(*args1) @@ -160,9 +151,7 @@ class TestBlending(unittest.TestCase): (out2 * grad_out).sum().backward() self.assertTrue(hasattr(grad_var2, "grad")) self.assertTrue( - torch.allclose( - grad_var1.grad.cpu(), grad_var2.grad.cpu(), atol=2e-5 - ) + torch.allclose(grad_var1.grad.cpu(), grad_var2.grad.cpu(), atol=2e-5) ) def test_hard_rgb_blend(self): @@ -199,9 +188,7 @@ class TestBlending(unittest.TestCase): # # (-) means inside triangle, (+) means outside triangle. random_sign_flip = torch.rand((N, S, S, K)) random_sign_flip[random_sign_flip > 0.5] *= -1.0 - dists = torch.randn( - size=(N, S, S, K), requires_grad=True, device=device - ) + dists = torch.randn(size=(N, S, S, K), requires_grad=True, device=device) fragments = Fragments( pix_to_face=pix_to_face, bary_coords=empty, # dummy @@ -238,9 +225,7 @@ class TestBlending(unittest.TestCase): # # (-) means inside triangle, (+) means outside triangle. random_sign_flip = torch.rand((N, S, S, K)) random_sign_flip[random_sign_flip > 0.5] *= -1.0 - dists1 = torch.randn( - size=(N, S, S, K), requires_grad=True, device=device - ) + dists1 = torch.randn(size=(N, S, S, K), requires_grad=True, device=device) dists2 = dists1.detach().clone() dists2.requires_grad = True @@ -276,9 +261,7 @@ class TestBlending(unittest.TestCase): # of the image with surrounding padded values. N, S, K = 1, 8, 2 device = torch.device("cuda") - pix_to_face = -torch.ones( - (N, S, S, K), dtype=torch.int64, device=device - ) + pix_to_face = -torch.ones((N, S, S, K), dtype=torch.int64, device=device) h = int(S / 2) pix_to_face_full = torch.randint( size=(N, h, h, K), low=0, high=100, device=device @@ -294,9 +277,7 @@ class TestBlending(unittest.TestCase): # randomly flip the sign of the distance # (-) means inside triangle, (+) means outside triangle. - dists1 = ( - torch.randn(size=(N, S, S, K), device=device) * random_sign_flip - ) + dists1 = torch.randn(size=(N, S, S, K), device=device) * random_sign_flip dists2 = dists1.clone() zbuf2 = zbuf1.clone() dists1.requires_grad = True @@ -353,9 +334,7 @@ class TestBlending(unittest.TestCase): # # (-) means inside triangle, (+) means outside triangle. random_sign_flip = torch.rand((N, S, S, K), device=device) random_sign_flip[random_sign_flip > 0.5] *= -1.0 - dists1 = torch.randn( - size=(N, S, S, K), requires_grad=True, device=device - ) + dists1 = torch.randn(size=(N, S, S, K), requires_grad=True, device=device) fragments = Fragments( pix_to_face=pix_to_face, bary_coords=empty, # dummy @@ -398,15 +377,10 @@ class TestBlending(unittest.TestCase): # # (-) means inside triangle, (+) means outside triangle. random_sign_flip = torch.rand((N, S, S, K), device=device) random_sign_flip[random_sign_flip > 0.5] *= -1.0 - dists1 = torch.randn( - size=(N, S, S, K), requires_grad=True, device=device - ) + dists1 = torch.randn(size=(N, S, S, K), requires_grad=True, device=device) zbuf = torch.randn(size=(N, S, S, K), requires_grad=True, device=device) fragments = Fragments( - pix_to_face=pix_to_face, - bary_coords=empty, # dummy - zbuf=zbuf, - dists=dists1, + pix_to_face=pix_to_face, bary_coords=empty, zbuf=zbuf, dists=dists1 # dummy ) blend_params = BlendParams(sigma=1e-3) diff --git a/tests/test_build.py b/tests/test_build.py index 865a7fd5..7cfd3011 100644 --- a/tests/test_build.py +++ b/tests/test_build.py @@ -3,6 +3,7 @@ import unittest from collections import Counter from pathlib import Path + # This file groups together tests which look at the code without running it. @@ -61,6 +62,5 @@ class TestBuild(unittest.TestCase): if firstline.startswith(("# -*-", "#!")): firstline = f.readline() self.assertTrue( - firstline.endswith(expect), - f"{i} missing copyright header.", + firstline.endswith(expect), f"{i} missing copyright header." ) diff --git a/tests/test_cameras.py b/tests/test_cameras.py index 085af8d0..6e22e702 100644 --- a/tests/test_cameras.py +++ b/tests/test_cameras.py @@ -26,10 +26,11 @@ # SOFTWARE. import math -import numpy as np import unittest -import torch +import numpy as np +import torch +from common_testing import TestCaseMixin from pytorch3d.renderer.cameras import ( OpenGLOrthographicCameras, OpenGLPerspectiveCameras, @@ -43,8 +44,6 @@ from pytorch3d.renderer.cameras import ( from pytorch3d.transforms import Transform3d from pytorch3d.transforms.so3 import so3_exponential_map -from common_testing import TestCaseMixin - # Naive function adapted from SoftRasterizer for test purposes. def perspective_project_naive(points, fov=60.0): @@ -58,9 +57,7 @@ def perspective_project_naive(points, fov=60.0): coordinate (no z renormalization) """ device = points.device - halfFov = torch.tensor( - (fov / 2) / 180 * np.pi, dtype=torch.float32, device=device - ) + halfFov = torch.tensor((fov / 2) / 180 * np.pi, dtype=torch.float32, device=device) scale = torch.tan(halfFov[None]) scale = scale[:, None] z = points[:, :, 2] @@ -150,9 +147,9 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase): dist = 2.7 elev = 90.0 azim = 0.0 - expected_position = torch.tensor( - [0.0, 2.7, 0.0], dtype=torch.float32 - ).view(1, 3) + expected_position = torch.tensor([0.0, 2.7, 0.0], dtype=torch.float32).view( + 1, 3 + ) position = camera_position_from_spherical_angles(dist, elev, azim) self.assertClose(position, expected_position, atol=2e-7) @@ -171,9 +168,9 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase): dist = torch.tensor(2.7) elev = torch.tensor(0.0) azim = torch.tensor(90.0) - expected_position = torch.tensor( - [2.7, 0.0, 0.0], dtype=torch.float32 - ).view(1, 3) + expected_position = torch.tensor([2.7, 0.0, 0.0], dtype=torch.float32).view( + 1, 3 + ) position = camera_position_from_spherical_angles(dist, elev, azim) self.assertClose(position, expected_position, atol=2e-7) @@ -181,9 +178,9 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase): dist = 2.7 elev = torch.tensor(0.0) azim = 90.0 - expected_position = torch.tensor( - [2.7, 0.0, 0.0], dtype=torch.float32 - ).view(1, 3) + expected_position = torch.tensor([2.7, 0.0, 0.0], dtype=torch.float32).view( + 1, 3 + ) position = camera_position_from_spherical_angles(dist, elev, azim) self.assertClose(position, expected_position, atol=2e-7) @@ -228,8 +225,7 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase): elev = torch.tensor([0.0]) azim = torch.tensor([90.0]) expected_position = torch.tensor( - [[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]], - dtype=torch.float32, + [[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]], dtype=torch.float32 ) position = camera_position_from_spherical_angles(dist, elev, azim) self.assertClose(position, expected_position, atol=3e-7) @@ -239,8 +235,7 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase): elev = 0.0 azim = torch.tensor(90.0) expected_position = torch.tensor( - [[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]], - dtype=torch.float32, + [[2.0, 0.0, 0.0], [3.0, 0.0, 0.0], [5.0, 0.0, 0.0]], dtype=torch.float32 ) position = camera_position_from_spherical_angles(dist, elev, azim) self.assertClose(position, expected_position, atol=3e-7) @@ -364,9 +359,7 @@ class TestCameraHelpers(TestCaseMixin, unittest.TestCase): ): cam = cam_type(R=R, T=T) RT_class = cam.get_world_to_view_transform() - self.assertTrue( - torch.allclose(RT.get_matrix(), RT_class.get_matrix()) - ) + self.assertTrue(torch.allclose(RT.get_matrix(), RT_class.get_matrix())) self.assertTrue(isinstance(RT, Transform3d)) @@ -539,9 +532,7 @@ class TestOpenGLOrthographicProjection(TestCaseMixin, unittest.TestCase): # applying the scale puts the z coordinate at the far clipping plane # so the z is mapped to 1.0 projected_verts = torch.tensor([2, 1, 1], dtype=torch.float32) - cameras = OpenGLOrthographicCameras( - znear=1.0, zfar=10.0, scale_xyz=scale - ) + cameras = OpenGLOrthographicCameras(znear=1.0, zfar=10.0, scale_xyz=scale) P = cameras.get_projection_transform() v1 = P.transform_points(vertices) v2 = orthographic_project_naive(vertices, scale) @@ -578,9 +569,7 @@ class TestOpenGLOrthographicProjection(TestCaseMixin, unittest.TestCase): far = torch.tensor([10.0]) near = 1.0 scale = torch.tensor([[1.0, 1.0, 1.0]], requires_grad=True) - cameras = OpenGLOrthographicCameras( - znear=near, zfar=far, scale_xyz=scale - ) + cameras = OpenGLOrthographicCameras(znear=near, zfar=far, scale_xyz=scale) P = cameras.get_projection_transform() vertices = torch.tensor([1.0, 2.0, 10.0], dtype=torch.float32) vertices_batch = vertices[None, None, :] @@ -683,15 +672,11 @@ class TestSfMPerspectiveProjection(TestCaseMixin, unittest.TestCase): self.assertClose(v3[..., :2], v2[..., :2]) def test_perspective_kwargs(self): - cameras = SfMPerspectiveCameras( - focal_length=5.0, principal_point=((2.5, 2.5),) - ) + cameras = SfMPerspectiveCameras(focal_length=5.0, principal_point=((2.5, 2.5),)) P = cameras.get_projection_transform( focal_length=2.0, principal_point=((2.5, 3.5),) ) vertices = torch.randn([3, 4, 3], dtype=torch.float32) v1 = P.transform_points(vertices) - v2 = sfm_perspective_project_naive( - vertices, fx=2.0, fy=2.0, p0x=2.5, p0y=3.5 - ) + v2 = sfm_perspective_project_naive(vertices, fx=2.0, fy=2.0, p0x=2.5, p0y=3.5) self.assertClose(v1, v2) diff --git a/tests/test_chamfer.py b/tests/test_chamfer.py index c0752e20..05bde5ef 100644 --- a/tests/test_chamfer.py +++ b/tests/test_chamfer.py @@ -1,12 +1,11 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import unittest + import torch import torch.nn.functional as F - -from pytorch3d.loss import chamfer_distance - from common_testing import TestCaseMixin +from pytorch3d.loss import chamfer_distance class TestChamfer(TestCaseMixin, unittest.TestCase): @@ -19,14 +18,10 @@ class TestChamfer(TestCaseMixin, unittest.TestCase): """ device = torch.device("cuda:0") p1 = torch.rand((batch_size, P1, 3), dtype=torch.float32, device=device) - p1_normals = torch.rand( - (batch_size, P1, 3), dtype=torch.float32, device=device - ) + p1_normals = torch.rand((batch_size, P1, 3), dtype=torch.float32, device=device) p1_normals = p1_normals / p1_normals.norm(dim=2, p=2, keepdim=True) p2 = torch.rand((batch_size, P2, 3), dtype=torch.float32, device=device) - p2_normals = torch.rand( - (batch_size, P2, 3), dtype=torch.float32, device=device - ) + p2_normals = torch.rand((batch_size, P2, 3), dtype=torch.float32, device=device) p2_normals = p2_normals / p2_normals.norm(dim=2, p=2, keepdim=True) weights = torch.rand((batch_size,), dtype=torch.float32, device=device) @@ -47,9 +42,7 @@ class TestChamfer(TestCaseMixin, unittest.TestCase): for n in range(N): for i1 in range(P1): for i2 in range(P2): - dist[n, i1, i2] = torch.sum( - (p1[n, i1, :] - p2[n, i2, :]) ** 2 - ) + dist[n, i1, i2] = torch.sum((p1[n, i1, :] - p2[n, i2, :]) ** 2) loss = [ torch.min(dist, dim=2)[0], # (N, P1) @@ -146,11 +139,7 @@ class TestChamfer(TestCaseMixin, unittest.TestCase): # Error when point_reduction = "none" and batch_reduction = "none". with self.assertRaises(ValueError): chamfer_distance( - p1, - p2, - weights=weights, - batch_reduction="none", - point_reduction="none", + p1, p2, weights=weights, batch_reduction="none", point_reduction="none" ) # Error when batch_reduction is not in ["none", "mean", "sum"]. @@ -339,9 +328,7 @@ class TestChamfer(TestCaseMixin, unittest.TestCase): loss, loss_norm = chamfer_distance(p1, p2, weights=weights) @staticmethod - def chamfer_with_init( - batch_size: int, P1: int, P2: int, return_normals: bool - ): + def chamfer_with_init(batch_size: int, P1: int, P2: int, return_normals: bool): p1, p2, p1_normals, p2_normals, weights = TestChamfer.init_pointclouds( batch_size, P1, P2 ) diff --git a/tests/test_compositing.py b/tests/test_compositing.py index 7f4a5753..0e396b2a 100644 --- a/tests/test_compositing.py +++ b/tests/test_compositing.py @@ -1,8 +1,8 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import unittest -import torch +import torch from pytorch3d.renderer.compositing import ( alpha_composite, norm_weighted_sum, @@ -37,9 +37,7 @@ class TestAccumulatePoints(unittest.TestCase): continue alpha = alphas[b, k, j, i] - output[b, c, j, i] += ( - features[c, n_idx] * alpha * t_alpha - ) + output[b, c, j, i] += features[c, n_idx] * alpha * t_alpha t_alpha = (1 - alpha) * t_alpha return output @@ -105,17 +103,13 @@ class TestAccumulatePoints(unittest.TestCase): continue alpha = alphas[b, k, j, i] - output[b, c, j, i] += ( - features[c, n_idx] * alpha / t_alpha - ) + output[b, c, j, i] += features[c, n_idx] * alpha / t_alpha return output def test_python(self): device = torch.device("cpu") - self._simple_alphacomposite( - self.accumulate_alphacomposite_python, device - ) + self._simple_alphacomposite(self.accumulate_alphacomposite_python, device) self._simple_wsum(self.accumulate_weightedsum_python, device) self._simple_wsumnorm(self.accumulate_weightedsumnorm_python, device) @@ -138,9 +132,7 @@ class TestAccumulatePoints(unittest.TestCase): self._python_vs_cpu_vs_cuda( self.accumulate_weightedsumnorm_python, norm_weighted_sum ) - self._python_vs_cpu_vs_cuda( - self.accumulate_weightedsum_python, weighted_sum - ) + self._python_vs_cpu_vs_cuda(self.accumulate_weightedsum_python, weighted_sum) def _python_vs_cpu_vs_cuda(self, accumulate_func_python, accumulate_func): torch.manual_seed(231) @@ -208,15 +200,11 @@ class TestAccumulatePoints(unittest.TestCase): grads2 = [gradsi.grad.data.clone().cpu() for gradsi in grads2] for i in range(0, len(grads1)): - self.assertTrue( - torch.allclose(grads1[i].cpu(), grads2[i].cpu(), atol=1e-6) - ) + self.assertTrue(torch.allclose(grads1[i].cpu(), grads2[i].cpu(), atol=1e-6)) def _simple_wsum(self, accum_func, device): # Initialise variables - features = torch.Tensor( - [[0.1, 0.4, 0.6, 0.9], [0.1, 0.4, 0.6, 0.9]] - ).to(device) + features = torch.Tensor([[0.1, 0.4, 0.6, 0.9], [0.1, 0.4, 0.6, 0.9]]).to(device) alphas = torch.Tensor( [ @@ -285,15 +273,11 @@ class TestAccumulatePoints(unittest.TestCase): ] ).to(device) - self.assertTrue( - torch.allclose(result.cpu(), true_result.cpu(), rtol=1e-3) - ) + self.assertTrue(torch.allclose(result.cpu(), true_result.cpu(), rtol=1e-3)) def _simple_wsumnorm(self, accum_func, device): # Initialise variables - features = torch.Tensor( - [[0.1, 0.4, 0.6, 0.9], [0.1, 0.4, 0.6, 0.9]] - ).to(device) + features = torch.Tensor([[0.1, 0.4, 0.6, 0.9], [0.1, 0.4, 0.6, 0.9]]).to(device) alphas = torch.Tensor( [ @@ -362,15 +346,11 @@ class TestAccumulatePoints(unittest.TestCase): ] ).to(device) - self.assertTrue( - torch.allclose(result.cpu(), true_result.cpu(), rtol=1e-3) - ) + self.assertTrue(torch.allclose(result.cpu(), true_result.cpu(), rtol=1e-3)) def _simple_alphacomposite(self, accum_func, device): # Initialise variables - features = torch.Tensor( - [[0.1, 0.4, 0.6, 0.9], [0.1, 0.4, 0.6, 0.9]] - ).to(device) + features = torch.Tensor([[0.1, 0.4, 0.6, 0.9], [0.1, 0.4, 0.6, 0.9]]).to(device) alphas = torch.Tensor( [ diff --git a/tests/test_cubify.py b/tests/test_cubify.py index f5b19390..5e3c0da4 100644 --- a/tests/test_cubify.py +++ b/tests/test_cubify.py @@ -1,8 +1,8 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import unittest -import torch +import torch from pytorch3d.ops import cubify @@ -33,9 +33,7 @@ class TestCubify(unittest.TestCase): # 1st-check verts, faces = meshes.get_mesh_verts_faces(0) - self.assertTrue( - torch.allclose(faces.max(), torch.tensor([verts.size(0) - 1])) - ) + self.assertTrue(torch.allclose(faces.max(), torch.tensor([verts.size(0) - 1]))) self.assertTrue( torch.allclose( verts, @@ -80,9 +78,7 @@ class TestCubify(unittest.TestCase): ) # 2nd-check verts, faces = meshes.get_mesh_verts_faces(1) - self.assertTrue( - torch.allclose(faces.max(), torch.tensor([verts.size(0) - 1])) - ) + self.assertTrue(torch.allclose(faces.max(), torch.tensor([verts.size(0) - 1]))) self.assertTrue( torch.allclose( verts, @@ -275,9 +271,7 @@ class TestCubify(unittest.TestCase): @staticmethod def cubify_with_init(batch_size: int, V: int): device = torch.device("cuda:0") - voxels = torch.rand( - (batch_size, V, V, V), dtype=torch.float32, device=device - ) + voxels = torch.rand((batch_size, V, V, V), dtype=torch.float32, device=device) torch.cuda.synchronize() def convert(): diff --git a/tests/test_face_areas_normals.py b/tests/test_face_areas_normals.py index a34d7678..4b9cb974 100644 --- a/tests/test_face_areas_normals.py +++ b/tests/test_face_areas_normals.py @@ -2,13 +2,12 @@ import unittest -import torch +import torch +from common_testing import TestCaseMixin from pytorch3d.ops import mesh_face_areas_normals from pytorch3d.structures.meshes import Meshes -from common_testing import TestCaseMixin - class TestFaceAreasNormals(TestCaseMixin, unittest.TestCase): def setUp(self) -> None: @@ -27,10 +26,7 @@ class TestFaceAreasNormals(TestCaseMixin, unittest.TestCase): faces_list = [] for _ in range(num_meshes): verts = torch.rand( - (num_verts, 3), - dtype=torch.float32, - device=device, - requires_grad=True, + (num_verts, 3), dtype=torch.float32, device=device, requires_grad=True ) faces = torch.randint( num_verts, size=(num_faces, 3), dtype=torch.int64, device=device @@ -55,9 +51,7 @@ class TestFaceAreasNormals(TestCaseMixin, unittest.TestCase): v02 = vertices_faces[:, 2] - vertices_faces[:, 0] normals = torch.cross(v01, v02, dim=1) # (F, 3) face_areas = normals.norm(dim=-1) / 2 - face_normals = torch.nn.functional.normalize( - normals, p=2, dim=1, eps=1e-6 - ) + face_normals = torch.nn.functional.normalize(normals, p=2, dim=1, eps=1e-6) return face_areas, face_normals def _test_face_areas_normals_helper(self, device, dtype=torch.float32): @@ -76,10 +70,7 @@ class TestFaceAreasNormals(TestCaseMixin, unittest.TestCase): verts_torch = verts.detach().clone().to(dtype) verts_torch.requires_grad = True faces_torch = faces.detach().clone() - ( - areas_torch, - normals_torch, - ) = TestFaceAreasNormals.face_areas_normals_python( + (areas_torch, normals_torch) = TestFaceAreasNormals.face_areas_normals_python( verts_torch, faces_torch ) self.assertClose(areas_torch, areas, atol=1e-7) diff --git a/tests/test_graph_conv.py b/tests/test_graph_conv.py index 993e80cb..8462cec7 100644 --- a/tests/test_graph_conv.py +++ b/tests/test_graph_conv.py @@ -1,20 +1,15 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import unittest + import torch import torch.nn as nn - +from common_testing import TestCaseMixin from pytorch3d import _C -from pytorch3d.ops.graph_conv import ( - GraphConv, - gather_scatter, - gather_scatter_python, -) +from pytorch3d.ops.graph_conv import GraphConv, gather_scatter, gather_scatter_python from pytorch3d.structures.meshes import Meshes from pytorch3d.utils import ico_sphere -from common_testing import TestCaseMixin - class TestGraphConv(TestCaseMixin, unittest.TestCase): def test_undirected(self): @@ -89,8 +84,7 @@ class TestGraphConv(TestCaseMixin, unittest.TestCase): w1 = torch.tensor([[-1, -1, -1]], dtype=dtype) expected_y = torch.tensor( - [[1 + 2 + 3 - 4 - 5 - 6 - 7 - 8 - 9], [4 + 5 + 6], [7 + 8 + 9]], - dtype=dtype, + [[1 + 2 + 3 - 4 - 5 - 6 - 7 - 8 - 9], [4 + 5 + 6], [7 + 8 + 9]], dtype=dtype ) conv = GraphConv(3, 1, directed=True).to(dtype) @@ -126,17 +120,13 @@ class TestGraphConv(TestCaseMixin, unittest.TestCase): def test_cpu_cuda_tensor_error(self): device = torch.device("cuda:0") verts = torch.tensor( - [[1, 2, 3], [4, 5, 6], [7, 8, 9]], - dtype=torch.float32, - device=device, + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float32, device=device ) edges = torch.tensor([[0, 1], [0, 2]]) conv = GraphConv(3, 1, directed=True).to(torch.float32) with self.assertRaises(Exception) as err: conv(verts, edges) - self.assertTrue( - "tensors must be on the same device." in str(err.exception) - ) + self.assertTrue("tensors must be on the same device." in str(err.exception)) def test_gather_scatter(self): """ @@ -178,12 +168,10 @@ class TestGraphConv(TestCaseMixin, unittest.TestCase): backend: str = "cuda", ): device = torch.device("cuda") if backend == "cuda" else "cpu" - verts_list = torch.tensor( - num_verts * [[0.11, 0.22, 0.33]], device=device - ).view(-1, 3) - faces_list = torch.tensor(num_faces * [[1, 2, 3]], device=device).view( + verts_list = torch.tensor(num_verts * [[0.11, 0.22, 0.33]], device=device).view( -1, 3 ) + faces_list = torch.tensor(num_faces * [[1, 2, 3]], device=device).view(-1, 3) meshes = Meshes(num_meshes * [verts_list], num_meshes * [faces_list]) gconv = GraphConv(gconv_dim, gconv_dim, directed=directed) gconv.to(device) @@ -191,9 +179,7 @@ class TestGraphConv(TestCaseMixin, unittest.TestCase): total_verts = meshes.verts_packed().shape[0] # Features. - x = torch.randn( - total_verts, gconv_dim, device=device, requires_grad=True - ) + x = torch.randn(total_verts, gconv_dim, device=device, requires_grad=True) torch.cuda.synchronize() def run_graph_conv(): diff --git a/tests/test_knn.py b/tests/test_knn.py index 1a090e3a..d7fda868 100644 --- a/tests/test_knn.py +++ b/tests/test_knn.py @@ -2,8 +2,8 @@ import unittest from itertools import product -import torch +import torch from pytorch3d.ops.knn import _knn_points_idx_naive, knn_points_idx diff --git a/tests/test_lighting.py b/tests/test_lighting.py index 138e4d87..37d8653a 100644 --- a/tests/test_lighting.py +++ b/tests/test_lighting.py @@ -1,14 +1,13 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import numpy as np import unittest -import torch +import numpy as np +import torch +from common_testing import TestCaseMixin from pytorch3d.renderer.lighting import DirectionalLights, PointLights from pytorch3d.transforms import RotateAxisAngle -from common_testing import TestCaseMixin - class TestLights(TestCaseMixin, unittest.TestCase): def test_init_lights(self): @@ -56,9 +55,7 @@ class TestLights(TestCaseMixin, unittest.TestCase): self.assertSeparate(new_prop, prop) def test_lights_accessor(self): - d_light = DirectionalLights( - ambient_color=((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)) - ) + d_light = DirectionalLights(ambient_color=((0.0, 0.0, 0.0), (1.0, 1.0, 1.0))) p_light = PointLights(ambient_color=((0.0, 0.0, 0.0), (1.0, 1.0, 1.0))) for light in [d_light, p_light]: # Update element @@ -96,14 +93,12 @@ class TestLights(TestCaseMixin, unittest.TestCase): """ with self.assertRaises(ValueError): DirectionalLights( - ambient_color=torch.randn(10, 3), - diffuse_color=torch.randn(15, 3), + ambient_color=torch.randn(10, 3), diffuse_color=torch.randn(15, 3) ) with self.assertRaises(ValueError): PointLights( - ambient_color=torch.randn(10, 3), - diffuse_color=torch.randn(15, 3), + ambient_color=torch.randn(10, 3), diffuse_color=torch.randn(15, 3) ) def test_initialize_lights_dimensions_fail(self): @@ -138,8 +133,7 @@ class TestDiffuseLighting(TestCaseMixin, unittest.TestCase): normals = torch.tensor([0, 0, 1], dtype=torch.float32) normals = normals[None, None, :] expected_output = torch.tensor( - [1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)], - dtype=torch.float32, + [1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)], dtype=torch.float32 ) expected_output = expected_output.view(1, 1, 3).repeat(3, 1, 1) light = DirectionalLights(diffuse_color=color, direction=direction) @@ -169,13 +163,10 @@ class TestDiffuseLighting(TestCaseMixin, unittest.TestCase): points = torch.tensor([0, 0, 0], dtype=torch.float32) normals = torch.tensor([0, 0, 1], dtype=torch.float32) expected_output = torch.tensor( - [1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)], - dtype=torch.float32, + [1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)], dtype=torch.float32 ) expected_output = expected_output.view(-1, 1, 3) - light = PointLights( - diffuse_color=color[None, :], location=location[None, :] - ) + light = PointLights(diffuse_color=color[None, :], location=location[None, :]) output_light = light.diffuse( points=points[None, None, :], normals=normals[None, None, :] ) @@ -184,9 +175,7 @@ class TestDiffuseLighting(TestCaseMixin, unittest.TestCase): # Change light direction to be 90 degrees apart from normal direction. location = torch.tensor([0, 1, 0], dtype=torch.float32) expected_output = torch.zeros_like(expected_output) - light = PointLights( - diffuse_color=color[None, :], location=location[None, :] - ) + light = PointLights(diffuse_color=color[None, :], location=location[None, :]) output_light = light.diffuse( points=points[None, None, :], normals=normals[None, None, :] ) @@ -204,8 +193,7 @@ class TestDiffuseLighting(TestCaseMixin, unittest.TestCase): ) normals = torch.tensor([0, 0, 1], dtype=torch.float32) expected_out = torch.tensor( - [1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)], - dtype=torch.float32, + [1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)], dtype=torch.float32 ) # Reshape @@ -231,8 +219,7 @@ class TestDiffuseLighting(TestCaseMixin, unittest.TestCase): ) normals = torch.tensor([0, 0, 1], dtype=torch.float32) expected_out = torch.tensor( - [1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)], - dtype=torch.float32, + [1 / np.sqrt(2), 1 / np.sqrt(2), 1 / np.sqrt(2)], dtype=torch.float32 ) # Reshape @@ -258,9 +245,7 @@ class TestDiffuseLighting(TestCaseMixin, unittest.TestCase): device = torch.device("cuda:0") color = torch.tensor([1, 1, 1], dtype=torch.float32, device=device) direction = torch.tensor( - [0, 1 / np.sqrt(2), 1 / np.sqrt(2)], - dtype=torch.float32, - device=device, + [0, 1 / np.sqrt(2), 1 / np.sqrt(2)], dtype=torch.float32, device=device ) normals = torch.tensor([0, 0, 1], dtype=torch.float32, device=device) normals = normals.view(1, 1, 1, 1, 3).expand(N, H, W, K, -1) @@ -373,9 +358,7 @@ class TestSpecularLighting(TestCaseMixin, unittest.TestCase): normals = torch.tensor([0, 1, 0], dtype=torch.float32) expected_output = torch.tensor([1.0, 0.0, 1.0], dtype=torch.float32) expected_output = expected_output.view(-1, 1, 3) - lights = PointLights( - specular_color=color[None, :], location=location[None, :] - ) + lights = PointLights(specular_color=color[None, :], location=location[None, :]) output_light = lights.specular( points=points[None, None, :], normals=normals[None, None, :], @@ -528,8 +511,7 @@ class TestSpecularLighting(TestCaseMixin, unittest.TestCase): mesh_to_vert_idx = torch.tensor(mesh_to_vert_idx, dtype=torch.int64) color = torch.tensor([[1, 1, 1], [1, 0, 1]], dtype=torch.float32) direction = torch.tensor( - [[-1 / np.sqrt(2), 1 / np.sqrt(2), 0], [-1, 1, 0]], - dtype=torch.float32, + [[-1 / np.sqrt(2), 1 / np.sqrt(2), 0], [-1, 1, 0]], dtype=torch.float32 ) camera_position = torch.tensor( [ diff --git a/tests/test_materials.py b/tests/test_materials.py index b5cbc35e..ae1826d8 100644 --- a/tests/test_materials.py +++ b/tests/test_materials.py @@ -1,11 +1,10 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import unittest + import torch - -from pytorch3d.renderer.materials import Materials - from common_testing import TestCaseMixin +from pytorch3d.renderer.materials import Materials class TestMaterials(TestCaseMixin, unittest.TestCase): @@ -64,8 +63,7 @@ class TestMaterials(TestCaseMixin, unittest.TestCase): """ with self.assertRaises(ValueError): Materials( - ambient_color=torch.randn(10, 3), - diffuse_color=torch.randn(15, 3), + ambient_color=torch.randn(10, 3), diffuse_color=torch.randn(15, 3) ) def test_initialize_materials_dimensions_fail(self): @@ -80,16 +78,12 @@ class TestMaterials(TestCaseMixin, unittest.TestCase): Materials(shininess=torch.randn(10, 2)) def test_initialize_materials_mixed_inputs(self): - mat = Materials( - ambient_color=torch.randn(1, 3), diffuse_color=((1, 1, 1),) - ) + mat = Materials(ambient_color=torch.randn(1, 3), diffuse_color=((1, 1, 1),)) self.assertTrue(mat.ambient_color.shape == (1, 3)) self.assertTrue(mat.diffuse_color.shape == (1, 3)) def test_initialize_materials_mixed_inputs_broadcast(self): - mat = Materials( - ambient_color=torch.randn(10, 3), diffuse_color=((1, 1, 1),) - ) + mat = Materials(ambient_color=torch.randn(10, 3), diffuse_color=((1, 1, 1),)) self.assertTrue(mat.ambient_color.shape == (10, 3)) self.assertTrue(mat.diffuse_color.shape == (10, 3)) self.assertTrue(mat.specular_color.shape == (10, 3)) diff --git a/tests/test_mesh_edge_loss.py b/tests/test_mesh_edge_loss.py index 16248dbd..4577bf4a 100644 --- a/tests/test_mesh_edge_loss.py +++ b/tests/test_mesh_edge_loss.py @@ -1,12 +1,11 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import unittest -import torch +import torch +from common_testing import TestCaseMixin from pytorch3d.loss import mesh_edge_loss from pytorch3d.structures import Meshes - -from common_testing import TestCaseMixin from test_sample_points_from_meshes import TestSamplePoints @@ -27,9 +26,7 @@ class TestMeshEdgeLoss(TestCaseMixin, unittest.TestCase): mesh = Meshes(verts=verts_list, faces=faces_list) loss = mesh_edge_loss(mesh, target_length=target_length) - self.assertClose( - loss, torch.tensor([0.0], dtype=torch.float32, device=device) - ) + self.assertClose(loss, torch.tensor([0.0], dtype=torch.float32, device=device)) self.assertTrue(loss.requires_grad) @staticmethod @@ -53,9 +50,7 @@ class TestMeshEdgeLoss(TestCaseMixin, unittest.TestCase): num_edges = mesh_edges.size(0) for e in range(num_edges): v0, v1 = verts_edges[e, 0], verts_edges[e, 1] - predlosses[b] += ( - (v0 - v1).norm(dim=0, p=2) - target_length - ) ** 2.0 + predlosses[b] += ((v0 - v1).norm(dim=0, p=2) - target_length) ** 2.0 if num_edges > 0: predlosses[b] = predlosses[b] / num_edges @@ -96,12 +91,8 @@ class TestMeshEdgeLoss(TestCaseMixin, unittest.TestCase): self.assertClose(loss, predloss) @staticmethod - def mesh_edge_loss( - num_meshes: int = 10, max_v: int = 100, max_f: int = 300 - ): - meshes = TestSamplePoints.init_meshes( - num_meshes, max_v, max_f, device="cuda:0" - ) + def mesh_edge_loss(num_meshes: int = 10, max_v: int = 100, max_f: int = 300): + meshes = TestSamplePoints.init_meshes(num_meshes, max_v, max_f, device="cuda:0") torch.cuda.synchronize() def compute_loss(): diff --git a/tests/test_mesh_laplacian_smoothing.py b/tests/test_mesh_laplacian_smoothing.py index ea6ce49b..1e9139c6 100644 --- a/tests/test_mesh_laplacian_smoothing.py +++ b/tests/test_mesh_laplacian_smoothing.py @@ -2,8 +2,8 @@ import unittest -import torch +import torch from pytorch3d.loss.mesh_laplacian_smoothing import mesh_laplacian_smoothing from pytorch3d.structures.meshes import Meshes @@ -56,9 +56,7 @@ class TestLaplacianSmoothing(unittest.TestCase): V = verts_packed.shape[0] L = torch.zeros((V, V), dtype=torch.float32, device=meshes.device) - inv_areas = torch.zeros( - (V, 1), dtype=torch.float32, device=meshes.device - ) + inv_areas = torch.zeros((V, 1), dtype=torch.float32, device=meshes.device) for f in faces_packed: v0 = verts_packed[f[0], :] @@ -69,9 +67,7 @@ class TestLaplacianSmoothing(unittest.TestCase): C = (v0 - v1).norm() s = 0.5 * (A + B + C) - face_area = ( - (s * (s - A) * (s - B) * (s - C)).clamp_(min=1e-12).sqrt() - ) + face_area = (s * (s - A) * (s - B) * (s - C)).clamp_(min=1e-12).sqrt() inv_areas[f[0]] += face_area inv_areas[f[1]] += face_area inv_areas[f[2]] += face_area @@ -114,16 +110,13 @@ class TestLaplacianSmoothing(unittest.TestCase): return loss.sum() / len(meshes) @staticmethod - def init_meshes( - num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000 - ): + def init_meshes(num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000): device = torch.device("cuda:0") verts_list = [] faces_list = [] for _ in range(num_meshes): verts = ( - torch.rand((num_verts, 3), dtype=torch.float32, device=device) - * 2.0 + torch.rand((num_verts, 3), dtype=torch.float32, device=device) * 2.0 - 1.0 ) # verts in the space of [-1, 1] faces = torch.stack( @@ -148,9 +141,7 @@ class TestLaplacianSmoothing(unittest.TestCase): # feats in list out = mesh_laplacian_smoothing(meshes, method="uniform") - naive_out = TestLaplacianSmoothing.laplacian_smoothing_naive_uniform( - meshes - ) + naive_out = TestLaplacianSmoothing.laplacian_smoothing_naive_uniform(meshes) self.assertTrue(torch.allclose(out, naive_out)) @@ -190,9 +181,7 @@ class TestLaplacianSmoothing(unittest.TestCase): verts_list = [] faces_list = [] for _ in range(num_meshes): - verts = torch.rand( - (num_verts, 3), dtype=torch.float32, device=device - ) + verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device) faces = torch.randint( num_verts, size=(num_faces, 3), dtype=torch.int64, device=device ) diff --git a/tests/test_mesh_normal_consistency.py b/tests/test_mesh_normal_consistency.py index 63fff18e..352bdc6f 100644 --- a/tests/test_mesh_normal_consistency.py +++ b/tests/test_mesh_normal_consistency.py @@ -2,8 +2,8 @@ import unittest -import torch +import torch from pytorch3d.loss.mesh_normal_consistency import mesh_normal_consistency from pytorch3d.structures.meshes import Meshes from pytorch3d.utils.ico_sphere import ico_sphere @@ -33,17 +33,14 @@ class TestMeshNormalConsistency(unittest.TestCase): return faces @staticmethod - def init_meshes( - num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000 - ): + def init_meshes(num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000): device = torch.device("cuda:0") valid_faces = TestMeshNormalConsistency.init_faces(num_verts).to(device) verts_list = [] faces_list = [] for _ in range(num_meshes): verts = ( - torch.rand((num_verts, 3), dtype=torch.float32, device=device) - * 2.0 + torch.rand((num_verts, 3), dtype=torch.float32, device=device) * 2.0 - 1.0 ) # verts in the space of [-1, 1] """ @@ -105,8 +102,7 @@ class TestMeshNormalConsistency(unittest.TestCase): ( 1 - torch.cosine_similarity( - normals[i].view(1, 3), - -normals[j].view(1, 3), + normals[i].view(1, 3), -normals[j].view(1, 3) ) ) ) @@ -137,9 +133,7 @@ class TestMeshNormalConsistency(unittest.TestCase): device = torch.device("cuda:0") # mesh1 shown above verts1 = torch.rand((4, 3), dtype=torch.float32, device=device) - faces1 = torch.tensor( - [[0, 1, 2], [2, 1, 3]], dtype=torch.int64, device=device - ) + faces1 = torch.tensor([[0, 1, 2], [2, 1, 3]], dtype=torch.int64, device=device) # mesh2 is a cuboid with 8 verts, 12 faces and 18 edges verts2 = torch.tensor( @@ -181,9 +175,7 @@ class TestMeshNormalConsistency(unittest.TestCase): [[0, 1, 2], [2, 1, 3], [2, 1, 4]], dtype=torch.int64, device=device ) - meshes = Meshes( - verts=[verts1, verts2, verts3], faces=[faces1, faces2, faces3] - ) + meshes = Meshes(verts=[verts1, verts2, verts3], faces=[faces1, faces2, faces3]) # mesh1: normal consistency computation n0 = (verts1[1] - verts1[2]).cross(verts1[3] - verts1[2]) diff --git a/tests/test_mesh_rendering_utils.py b/tests/test_mesh_rendering_utils.py index 47e4af14..92347097 100644 --- a/tests/test_mesh_rendering_utils.py +++ b/tests/test_mesh_rendering_utils.py @@ -2,8 +2,8 @@ import unittest -import torch +import torch from pytorch3d.renderer.mesh.utils import _clip_barycentric_coordinates diff --git a/tests/test_meshes.py b/tests/test_meshes.py index 479cb700..d00efcc4 100644 --- a/tests/test_meshes.py +++ b/tests/test_meshes.py @@ -1,12 +1,11 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import numpy as np import unittest + +import numpy as np import torch - -from pytorch3d.structures.meshes import Meshes - from common_testing import TestCaseMixin +from pytorch3d.structures.meshes import Meshes class TestMeshes(TestCaseMixin, unittest.TestCase): @@ -54,9 +53,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): # For lists of faces and vertices, we can sample different v/f # per mesh. f = torch.randint(max_f, size=(num_meshes,), dtype=torch.int32) - v = torch.randint( - 3, high=max_v, size=(num_meshes,), dtype=torch.int32 - ) + v = torch.randint(3, high=max_v, size=(num_meshes,), dtype=torch.int32) # Generate the actual vertices and faces. for i in range(num_meshes): @@ -90,12 +87,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): device=device, ), torch.tensor( - [ - [0.1, 0.3, 0.3], - [0.6, 0.7, 0.8], - [0.2, 0.3, 0.4], - [0.1, 0.5, 0.3], - ], + [[0.1, 0.3, 0.3], [0.6, 0.7, 0.8], [0.2, 0.3, 0.4], [0.1, 0.5, 0.3]], dtype=torch.float32, device=device, ), @@ -113,9 +105,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): ] faces = [ torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device), - torch.tensor( - [[0, 1, 2], [1, 2, 3]], dtype=torch.int64, device=device - ), + torch.tensor([[0, 1, 2], [1, 2, 3]], dtype=torch.int64, device=device), torch.tensor( [ [1, 2, 0], @@ -136,12 +126,8 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): mesh = TestMeshes.init_simple_mesh("cuda:0") # Check that faces/verts per mesh are set in init: - self.assertClose( - mesh._num_faces_per_mesh.cpu(), torch.tensor([1, 2, 7]) - ) - self.assertClose( - mesh._num_verts_per_mesh.cpu(), torch.tensor([3, 4, 5]) - ) + self.assertClose(mesh._num_faces_per_mesh.cpu(), torch.tensor([1, 2, 7])) + self.assertClose(mesh._num_verts_per_mesh.cpu(), torch.tensor([3, 4, 5])) # Check computed tensors self.assertClose( @@ -163,8 +149,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): mesh.mesh_to_faces_packed_first_idx().cpu(), torch.tensor([0, 1, 3]) ) self.assertClose( - mesh.num_edges_per_mesh().cpu(), - torch.tensor([3, 5, 10], dtype=torch.int32), + mesh.num_edges_per_mesh().cpu(), torch.tensor([3, 5, 10], dtype=torch.int32) ) def test_simple_random_meshes(self): @@ -172,9 +157,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): # Define the test mesh object either as a list or tensor of faces/verts. for lists_to_tensors in (False, True): N = 10 - mesh = TestMeshes.init_mesh( - N, 100, 300, lists_to_tensors=lists_to_tensors - ) + mesh = TestMeshes.init_mesh(N, 100, 300, lists_to_tensors=lists_to_tensors) verts_list = mesh.verts_list() faces_list = mesh.faces_list() @@ -207,12 +190,8 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): for n in range(N): v = verts_list[n].shape[0] f = faces_list[n].shape[0] - self.assertClose( - verts_packed[curv : curv + v, :], verts_list[n] - ) - self.assertClose( - faces_packed[curf : curf + f, :] - curv, faces_list[n] - ) + self.assertClose(verts_packed[curv : curv + v, :], verts_list[n]) + self.assertClose(faces_packed[curf : curf + f, :] - curv, faces_list[n]) self.assertTrue(vert_to_mesh[curv : curv + v].eq(n).all()) self.assertTrue(face_to_mesh[curf : curf + f].eq(n).all()) self.assertTrue(mesh_to_vert[n] == curv) @@ -232,9 +211,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): npedges = np.concatenate((e12, e20, e01), axis=0) npedges = np.sort(npedges, axis=1) - unique_edges, unique_idx = np.unique( - npedges, return_index=True, axis=0 - ) + unique_edges, unique_idx = np.unique(npedges, return_index=True, axis=0) self.assertTrue(np.allclose(edges, unique_edges)) temp = face_to_mesh.cpu().numpy() temp = np.concatenate((temp, temp, temp), axis=0) @@ -266,13 +243,9 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): v = torch.randint( 3, high=V, size=(1,), dtype=torch.int32, device=device )[0] - f = torch.randint( - F, size=(1,), dtype=torch.int32, device=device - )[0] + f = torch.randint(F, size=(1,), dtype=torch.int32, device=device)[0] verts = torch.rand((v, 3), dtype=torch.float32, device=device) - faces = torch.randint( - v, size=(f, 3), dtype=torch.int64, device=device - ) + faces = torch.randint(v, size=(f, 3), dtype=torch.int64, device=device) else: verts = torch.tensor([], dtype=torch.float32, device=device) faces = torch.tensor([], dtype=torch.int64, device=device) @@ -309,16 +282,12 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): ) for n in range(N): verts.append(torch.rand((V, 3), dtype=torch.float32, device=device)) - this_faces = torch.full( - (F, 3), -1, dtype=torch.int64, device=device - ) + this_faces = torch.full((F, 3), -1, dtype=torch.int64, device=device) if valid[n]: v = torch.randint( 3, high=V, size=(1,), dtype=torch.int32, device=device )[0] - f = torch.randint( - F, size=(1,), dtype=torch.int32, device=device - )[0] + f = torch.randint(F, size=(1,), dtype=torch.int32, device=device)[0] this_faces[:f, :] = torch.randint( v, size=(f, 3), dtype=torch.int64, device=device ) @@ -329,9 +298,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): mesh = Meshes(verts=torch.stack(verts), faces=torch.stack(faces)) # Check verts/faces per mesh are set correctly in init. - self.assertListEqual( - mesh._num_faces_per_mesh.tolist(), num_faces.tolist() - ) + self.assertListEqual(mesh._num_faces_per_mesh.tolist(), num_faces.tolist()) self.assertListEqual(mesh._num_verts_per_mesh.tolist(), [V] * N) for n, (vv, ff) in enumerate(zip(mesh.verts_list(), mesh.faces_list())): @@ -339,12 +306,8 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): self.assertClose(vv, verts[n]) new_faces = [ff.clone() for ff in faces] - v = torch.randint( - 3, high=V, size=(1,), dtype=torch.int32, device=device - )[0] - f = torch.randint(F - 10, size=(1,), dtype=torch.int32, device=device)[ - 0 - ] + v = torch.randint(3, high=V, size=(1,), dtype=torch.int32, device=device)[0] + f = torch.randint(F - 10, size=(1,), dtype=torch.int32, device=device)[0] this_faces = torch.full((F, 3), -1, dtype=torch.int64, device=device) this_faces[10 : f + 10, :] = torch.randint( v, size=(f, 3), dtype=torch.int64, device=device @@ -376,9 +339,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): torch.allclose(new_mesh._verts_list[0], mesh._verts_list[0]) ) self.assertFalse( - torch.allclose( - mesh.num_verts_per_mesh(), new_mesh.num_verts_per_mesh() - ) + torch.allclose(mesh.num_verts_per_mesh(), new_mesh.num_verts_per_mesh()) ) self.assertSeparate(new_mesh.verts_packed(), mesh.verts_packed()) self.assertSeparate(new_mesh.verts_padded(), mesh.verts_padded()) @@ -438,9 +399,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): mesh._compute_face_areas_normals(refresh=True) mesh._compute_vertex_normals(refresh=True) - deform = torch.rand( - (all_v, 3), dtype=torch.float32, device=mesh.device - ) + deform = torch.rand((all_v, 3), dtype=torch.float32, device=mesh.device) # new meshes class to hold the deformed mesh new_mesh_naive = naive_offset_verts(mesh, deform) @@ -458,9 +417,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): self.assertClose( new_mesh.verts_list()[i], new_mesh_naive.verts_list()[i] ) - self.assertClose( - mesh.faces_list()[i], new_mesh_naive.faces_list()[i] - ) + self.assertClose(mesh.faces_list()[i], new_mesh_naive.faces_list()[i]) self.assertClose( new_mesh.faces_list()[i], new_mesh_naive.faces_list()[i] ) @@ -475,21 +432,11 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): ) # check padded & packed - self.assertClose( - new_mesh.faces_padded(), new_mesh_naive.faces_padded() - ) - self.assertClose( - new_mesh.verts_padded(), new_mesh_naive.verts_padded() - ) - self.assertClose( - new_mesh.faces_packed(), new_mesh_naive.faces_packed() - ) - self.assertClose( - new_mesh.verts_packed(), new_mesh_naive.verts_packed() - ) - self.assertClose( - new_mesh.edges_packed(), new_mesh_naive.edges_packed() - ) + self.assertClose(new_mesh.faces_padded(), new_mesh_naive.faces_padded()) + self.assertClose(new_mesh.verts_padded(), new_mesh_naive.verts_padded()) + self.assertClose(new_mesh.faces_packed(), new_mesh_naive.faces_packed()) + self.assertClose(new_mesh.verts_packed(), new_mesh_naive.verts_packed()) + self.assertClose(new_mesh.edges_packed(), new_mesh_naive.edges_packed()) self.assertClose( new_mesh.verts_packed_to_mesh_idx(), new_mesh_naive.verts_packed_to_mesh_idx(), @@ -499,8 +446,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): new_mesh_naive.mesh_to_verts_packed_first_idx(), ) self.assertClose( - new_mesh.num_verts_per_mesh(), - new_mesh_naive.num_verts_per_mesh(), + new_mesh.num_verts_per_mesh(), new_mesh_naive.num_verts_per_mesh() ) self.assertClose( new_mesh.faces_packed_to_mesh_idx(), @@ -511,8 +457,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): new_mesh_naive.mesh_to_faces_packed_first_idx(), ) self.assertClose( - new_mesh.num_faces_per_mesh(), - new_mesh_naive.num_faces_per_mesh(), + new_mesh.num_faces_per_mesh(), new_mesh_naive.num_faces_per_mesh() ) self.assertClose( new_mesh.edges_packed_to_mesh_idx(), @@ -527,24 +472,19 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): # check face areas, normals and vertex normals self.assertClose( - new_mesh.verts_normals_packed(), - new_mesh_naive.verts_normals_packed(), + new_mesh.verts_normals_packed(), new_mesh_naive.verts_normals_packed() ) self.assertClose( - new_mesh.verts_normals_padded(), - new_mesh_naive.verts_normals_padded(), + new_mesh.verts_normals_padded(), new_mesh_naive.verts_normals_padded() ) self.assertClose( - new_mesh.faces_normals_packed(), - new_mesh_naive.faces_normals_packed(), + new_mesh.faces_normals_packed(), new_mesh_naive.faces_normals_packed() ) self.assertClose( - new_mesh.faces_normals_padded(), - new_mesh_naive.faces_normals_padded(), + new_mesh.faces_normals_padded(), new_mesh_naive.faces_normals_padded() ) self.assertClose( - new_mesh.faces_areas_packed(), - new_mesh_naive.faces_areas_packed(), + new_mesh.faces_areas_packed(), new_mesh_naive.faces_areas_packed() ) def test_scale_verts(self): @@ -579,13 +519,11 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): for i in range(N): if test == "tensor": self.assertClose( - scales[i] * mesh.verts_list()[i], - new_mesh.verts_list()[i], + scales[i] * mesh.verts_list()[i], new_mesh.verts_list()[i] ) else: self.assertClose( - scales * mesh.verts_list()[i], - new_mesh.verts_list()[i], + scales * mesh.verts_list()[i], new_mesh.verts_list()[i] ) self.assertClose( new_mesh.verts_list()[i], new_mesh_naive.verts_list()[i] @@ -607,21 +545,11 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): ) # check padded & packed - self.assertClose( - new_mesh.faces_padded(), new_mesh_naive.faces_padded() - ) - self.assertClose( - new_mesh.verts_padded(), new_mesh_naive.verts_padded() - ) - self.assertClose( - new_mesh.faces_packed(), new_mesh_naive.faces_packed() - ) - self.assertClose( - new_mesh.verts_packed(), new_mesh_naive.verts_packed() - ) - self.assertClose( - new_mesh.edges_packed(), new_mesh_naive.edges_packed() - ) + self.assertClose(new_mesh.faces_padded(), new_mesh_naive.faces_padded()) + self.assertClose(new_mesh.verts_padded(), new_mesh_naive.verts_padded()) + self.assertClose(new_mesh.faces_packed(), new_mesh_naive.faces_packed()) + self.assertClose(new_mesh.verts_packed(), new_mesh_naive.verts_packed()) + self.assertClose(new_mesh.edges_packed(), new_mesh_naive.edges_packed()) self.assertClose( new_mesh.verts_packed_to_mesh_idx(), new_mesh_naive.verts_packed_to_mesh_idx(), @@ -631,8 +559,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): new_mesh_naive.mesh_to_verts_packed_first_idx(), ) self.assertClose( - new_mesh.num_verts_per_mesh(), - new_mesh_naive.num_verts_per_mesh(), + new_mesh.num_verts_per_mesh(), new_mesh_naive.num_verts_per_mesh() ) self.assertClose( new_mesh.faces_packed_to_mesh_idx(), @@ -643,8 +570,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): new_mesh_naive.mesh_to_faces_packed_first_idx(), ) self.assertClose( - new_mesh.num_faces_per_mesh(), - new_mesh_naive.num_faces_per_mesh(), + new_mesh.num_faces_per_mesh(), new_mesh_naive.num_faces_per_mesh() ) self.assertClose( new_mesh.edges_packed_to_mesh_idx(), @@ -675,8 +601,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): new_mesh_naive.faces_normals_padded(), ) self.assertClose( - new_mesh.faces_areas_packed(), - new_mesh_naive.faces_areas_packed(), + new_mesh.faces_areas_packed(), new_mesh_naive.faces_areas_packed() ) def test_extend_list(self): @@ -730,10 +655,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): self.assertTrue(len(split_meshes[0]) == 2) self.assertTrue( split_meshes[0].verts_list() - == [ - mesh.get_mesh_verts_faces(0)[0], - mesh.get_mesh_verts_faces(1)[0], - ] + == [mesh.get_mesh_verts_faces(0)[0], mesh.get_mesh_verts_faces(1)[0]] ) self.assertTrue(len(split_meshes[1]) == 3) self.assertTrue( @@ -756,9 +678,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): verts_faces = [(10, 100), (20, 200)] for (V, F) in verts_faces: verts = torch.rand((V, 3), dtype=torch.float32, device=device) - faces = torch.randint( - V, size=(F, 3), dtype=torch.int64, device=device - ) + faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device) verts_list.append(verts) faces_list.append(faces) @@ -782,9 +702,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): faces_list = [] for (V, F) in [(10, 100)]: verts = torch.rand((V, 3), dtype=torch.float32, device=device) - faces = torch.randint( - V, size=(F, 3), dtype=torch.int64, device=device - ) + faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device) verts_list.append(verts) faces_list.append(faces) @@ -802,9 +720,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): verts_faces = [(10, 100), (20, 200), (30, 300)] for (V, F) in verts_faces: verts = torch.rand((V, 3), dtype=torch.float32, device=device) - faces = torch.randint( - V, size=(F, 3), dtype=torch.int64, device=device - ) + faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device) verts_list.append(verts) faces_list.append(faces) @@ -814,9 +730,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): verts_padded = mesh.verts_padded() verts_padded_flat = verts_padded.view(-1, 3) - self.assertClose( - verts_padded_flat[verts_padded_to_packed_idx], verts_packed - ) + self.assertClose(verts_padded_flat[verts_padded_to_packed_idx], verts_packed) idx = verts_padded_to_packed_idx.view(-1, 1).expand(-1, 3) self.assertClose(verts_padded_flat.gather(0, idx), verts_packed) @@ -828,9 +742,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): verts_faces = [(10, 100), (20, 200), (30, 300)] for (V, F) in verts_faces: verts = torch.rand((V, 3), dtype=torch.float32, device=device) - faces = torch.randint( - V, size=(F, 3), dtype=torch.int64, device=device - ) + faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device) verts_list.append(verts) faces_list.append(faces) @@ -1006,12 +918,10 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): verts_normals_packed = meshes.verts_normals_packed() faces_normals_packed = meshes.faces_normals_packed() self.assertTrue( - list(verts_normals_packed.shape) - == [verts.shape[0] + verts2.shape[0], 3] + list(verts_normals_packed.shape) == [verts.shape[0] + verts2.shape[0], 3] ) self.assertTrue( - list(faces_normals_packed.shape) - == [faces.shape[0] + faces2.shape[0], 3] + list(faces_normals_packed.shape) == [faces.shape[0] + faces2.shape[0], 3] ) # Single mesh where two faces share one vertex so the normal is @@ -1079,17 +989,12 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): # with areas > eps=1e-6 nonzero = face_areas_cpu > 1e-6 self.assertClose( - face_normals_cpu[nonzero], - face_normals_cuda.cpu()[nonzero], - atol=1e-6, + face_normals_cpu[nonzero], face_normals_cuda.cpu()[nonzero], atol=1e-6 ) @staticmethod def compute_packed_with_init( - num_meshes: int = 10, - max_v: int = 100, - max_f: int = 300, - device: str = "cpu", + num_meshes: int = 10, max_v: int = 100, max_f: int = 300, device: str = "cpu" ): mesh = TestMeshes.init_mesh(num_meshes, max_v, max_f, device=device) torch.cuda.synchronize() @@ -1102,10 +1007,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): @staticmethod def compute_padded_with_init( - num_meshes: int = 10, - max_v: int = 100, - max_f: int = 300, - device: str = "cpu", + num_meshes: int = 10, max_v: int = 100, max_f: int = 300, device: str = "cpu" ): mesh = TestMeshes.init_mesh(num_meshes, max_v, max_f, device=device) torch.cuda.synchronize() diff --git a/tests/test_nearest_neighbor_points.py b/tests/test_nearest_neighbor_points.py index 4332defb..7537b65a 100644 --- a/tests/test_nearest_neighbor_points.py +++ b/tests/test_nearest_neighbor_points.py @@ -2,8 +2,8 @@ import unittest from itertools import product -import torch +import torch from pytorch3d import _C @@ -33,9 +33,7 @@ class TestNearestNeighborPoints(unittest.TestCase): # to the cpp or cuda versions of the function # depending on the input type. idx1 = _C.nn_points_idx(x, y) - idx2 = TestNearestNeighborPoints.nn_points_idx_naive( - x, y - ) + idx2 = TestNearestNeighborPoints.nn_points_idx_naive(x, y) self.assertTrue(idx1.size(1) == P1) self.assertTrue(torch.all(idx1 == idx2)) diff --git a/tests/test_obj_io.py b/tests/test_obj_io.py index 3e6d7ef7..23bfa94b 100644 --- a/tests/test_obj_io.py +++ b/tests/test_obj_io.py @@ -4,14 +4,13 @@ import os import unittest from io import StringIO from pathlib import Path -import torch +import torch +from common_testing import TestCaseMixin from pytorch3d.io import load_obj, load_objs_as_meshes, save_obj from pytorch3d.structures import Meshes, Textures, join_meshes from pytorch3d.utils import torus -from common_testing import TestCaseMixin - class TestMeshObjIO(TestCaseMixin, unittest.TestCase): def test_load_obj_simple(self): @@ -34,12 +33,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): tex_maps = aux.texture_images expected_verts = torch.tensor( - [ - [0.1, 0.2, 0.3], - [0.2, 0.3, 0.4], - [0.3, 0.4, 0.5], - [0.4, 0.5, 0.6], - ], + [[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]], dtype=torch.float32, ) expected_faces = torch.tensor( @@ -124,12 +118,8 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): [[0.749279, 0.501284], [0.999110, 0.501077], [0.999455, 0.750380]], dtype=torch.float32, ) - expected_faces_normals_idx = torch.tensor( - [[1, 1, 1]], dtype=torch.int64 - ) - expected_faces_textures_idx = torch.tensor( - [[0, 0, 1]], dtype=torch.int64 - ) + expected_faces_normals_idx = torch.tensor([[1, 1, 1]], dtype=torch.int64) + expected_faces_textures_idx = torch.tensor([[0, 0, 1]], dtype=torch.int64) self.assertTrue(torch.all(verts == expected_verts)) self.assertTrue(torch.all(faces.verts_idx == expected_faces)) @@ -153,23 +143,13 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): ] ) obj_file = StringIO(obj_file) - expected_faces_normals_idx = torch.tensor( - [[0, 0, 1]], dtype=torch.int64 - ) + expected_faces_normals_idx = torch.tensor([[0, 0, 1]], dtype=torch.int64) expected_normals = torch.tensor( - [ - [0.000000, 0.000000, -1.000000], - [-1.000000, -0.000000, -0.000000], - ], + [[0.000000, 0.000000, -1.000000], [-1.000000, -0.000000, -0.000000]], dtype=torch.float32, ) expected_verts = torch.tensor( - [ - [0.1, 0.2, 0.3], - [0.2, 0.3, 0.4], - [0.3, 0.4, 0.5], - [0.4, 0.5, 0.6], - ], + [[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]], dtype=torch.float32, ) verts, faces, aux = load_obj(obj_file) @@ -198,19 +178,12 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): ] ) obj_file = StringIO(obj_file) - expected_faces_textures_idx = torch.tensor( - [[0, 0, 1]], dtype=torch.int64 - ) + expected_faces_textures_idx = torch.tensor([[0, 0, 1]], dtype=torch.int64) expected_textures = torch.tensor( [[0.999110, 0.501077], [0.999455, 0.750380]], dtype=torch.float32 ) expected_verts = torch.tensor( - [ - [0.1, 0.2, 0.3], - [0.2, 0.3, 0.4], - [0.3, 0.4, 0.5], - [0.4, 0.5, 0.6], - ], + [[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]], dtype=torch.float32, ) verts, faces, aux = load_obj(obj_file) @@ -257,9 +230,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): with self.assertRaises(ValueError) as err: load_obj(obj_file) - self.assertTrue( - "Vertex properties are inconsistent" in str(err.exception) - ) + self.assertTrue("Vertex properties are inconsistent" in str(err.exception)) def test_load_obj_error_too_many_vertex_properties(self): obj_file = "\n".join(["f 2/1/1/3"]) @@ -267,9 +238,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): with self.assertRaises(ValueError) as err: load_obj(obj_file) - self.assertTrue( - "Face vertices can ony have 3 properties" in str(err.exception) - ) + self.assertTrue("Face vertices can ony have 3 properties" in str(err.exception)) def test_load_obj_error_invalid_vertex_indices(self): obj_file = "\n".join( @@ -320,7 +289,9 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): verts = torch.FloatTensor([[0.1, 0.2, 0.3, 0.4]]) # (V, 4) faces = torch.LongTensor([[0, 1, 2]]) save_obj(StringIO(), verts, faces) - expected_message = "Argument 'verts' should either be empty or of shape (num_verts, 3)." + expected_message = ( + "Argument 'verts' should either be empty or of shape (num_verts, 3)." + ) self.assertTrue(expected_message, error.exception) # Invalid faces shape @@ -328,7 +299,9 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): verts = torch.FloatTensor([[0.1, 0.2, 0.3]]) faces = torch.LongTensor([[0, 1, 2, 3]]) # (F, 4) save_obj(StringIO(), verts, faces) - expected_message = "Argument 'faces' should either be empty or of shape (num_faces, 3)." + expected_message = ( + "Argument 'faces' should either be empty or of shape (num_faces, 3)." + ) self.assertTrue(expected_message, error.exception) def test_save_obj_invalid_indices(self): @@ -395,12 +368,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): def test_save_obj(self): verts = torch.tensor( - [ - [0.01, 0.2, 0.301], - [0.2, 0.03, 0.408], - [0.3, 0.4, 0.05], - [0.6, 0.7, 0.8], - ], + [[0.01, 0.2, 0.301], [0.2, 0.03, 0.408], [0.3, 0.4, 0.05], [0.6, 0.7, 0.8]], dtype=torch.float32, ) faces = torch.tensor( @@ -424,9 +392,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): self.assertEqual(actual_file, expected_file) def test_load_mtl(self): - DATA_DIR = ( - Path(__file__).resolve().parent.parent / "docs/tutorials/data" - ) + DATA_DIR = Path(__file__).resolve().parent.parent / "docs/tutorials/data" obj_filename = "cow_mesh/cow.obj" filename = os.path.join(DATA_DIR, obj_filename) verts, faces, aux = load_obj(filename) @@ -452,19 +418,13 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): # Check all keys and values in dictionary are the same. for n1, n2 in zip(materials.keys(), expected_materials.keys()): self.assertTrue(n1 == n2) - for k1, k2 in zip( - materials[n1].keys(), expected_materials[n2].keys() - ): + for k1, k2 in zip(materials[n1].keys(), expected_materials[n2].keys()): self.assertTrue( - torch.allclose( - materials[n1][k1], expected_materials[n2][k2] - ) + torch.allclose(materials[n1][k1], expected_materials[n2][k2]) ) def test_load_mtl_noload(self): - DATA_DIR = ( - Path(__file__).resolve().parent.parent / "docs/tutorials/data" - ) + DATA_DIR = Path(__file__).resolve().parent.parent / "docs/tutorials/data" obj_filename = "cow_mesh/cow.obj" filename = os.path.join(DATA_DIR, obj_filename) verts, faces, aux = load_obj(filename, load_textures=False) @@ -490,12 +450,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): verts, faces, aux = load_obj(obj_file) expected_verts = torch.tensor( - [ - [0.1, 0.2, 0.3], - [0.2, 0.3, 0.4], - [0.3, 0.4, 0.5], - [0.4, 0.5, 0.6], - ], + [[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]], dtype=torch.float32, ) expected_faces = torch.tensor([[0, 1, 2], [0, 1, 3]], dtype=torch.int64) @@ -514,12 +469,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): verts, faces, aux = load_obj(filename) expected_verts = torch.tensor( - [ - [0.1, 0.2, 0.3], - [0.2, 0.3, 0.4], - [0.3, 0.4, 0.5], - [0.4, 0.5, 0.6], - ], + [[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]], dtype=torch.float32, ) expected_faces = torch.tensor([[0, 1, 2], [0, 1, 3]], dtype=torch.int64) @@ -533,12 +483,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): verts, faces, aux = load_obj(filename, load_textures=False) expected_verts = torch.tensor( - [ - [0.1, 0.2, 0.3], - [0.2, 0.3, 0.4], - [0.3, 0.4, 0.5], - [0.4, 0.5, 0.6], - ], + [[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]], dtype=torch.float32, ) expected_faces = torch.tensor([[0, 1, 2], [0, 1, 3]], dtype=torch.int64) @@ -555,12 +500,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): verts, faces, aux = load_obj(filename) expected_verts = torch.tensor( - [ - [0.1, 0.2, 0.3], - [0.2, 0.3, 0.4], - [0.3, 0.4, 0.5], - [0.4, 0.5, 0.6], - ], + [[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]], dtype=torch.float32, ) expected_faces = torch.tensor([[0, 1, 2], [0, 1, 3]], dtype=torch.int64) @@ -574,12 +514,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): verts, faces, aux = load_obj(filename, load_textures=False) expected_verts = torch.tensor( - [ - [0.1, 0.2, 0.3], - [0.2, 0.3, 0.4], - [0.3, 0.4, 0.5], - [0.4, 0.5, 0.6], - ], + [[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6]], dtype=torch.float32, ) expected_faces = torch.tensor([[0, 1, 2], [0, 1, 3]], dtype=torch.int64) @@ -607,33 +542,24 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): check_item(mesh.verts_padded(), mesh3.verts_padded()) check_item(mesh.faces_padded(), mesh3.faces_padded()) if mesh.textures is not None: + check_item(mesh.textures.maps_padded(), mesh3.textures.maps_padded()) check_item( - mesh.textures.maps_padded(), mesh3.textures.maps_padded() + mesh.textures.faces_uvs_padded(), mesh3.textures.faces_uvs_padded() ) check_item( - mesh.textures.faces_uvs_padded(), - mesh3.textures.faces_uvs_padded(), + mesh.textures.verts_uvs_padded(), mesh3.textures.verts_uvs_padded() ) check_item( - mesh.textures.verts_uvs_padded(), - mesh3.textures.verts_uvs_padded(), - ) - check_item( - mesh.textures.verts_rgb_padded(), - mesh3.textures.verts_rgb_padded(), + mesh.textures.verts_rgb_padded(), mesh3.textures.verts_rgb_padded() ) - DATA_DIR = ( - Path(__file__).resolve().parent.parent / "docs/tutorials/data" - ) + DATA_DIR = Path(__file__).resolve().parent.parent / "docs/tutorials/data" obj_filename = DATA_DIR / "cow_mesh/cow.obj" mesh = load_objs_as_meshes([obj_filename]) mesh3 = load_objs_as_meshes([obj_filename, obj_filename, obj_filename]) check_triple(mesh, mesh3) - self.assertTupleEqual( - mesh.textures.maps_padded().shape, (1, 1024, 1024, 3) - ) + self.assertTupleEqual(mesh.textures.maps_padded().shape, (1, 1024, 1024, 3)) mesh_notex = load_objs_as_meshes([obj_filename], load_textures=False) mesh3_notex = load_objs_as_meshes( @@ -655,9 +581,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): teapot_obj = DATA_DIR / "teapot.obj" mesh_teapot = load_objs_as_meshes([teapot_obj]) teapot_verts, teapot_faces = mesh_teapot.get_mesh_verts_faces(0) - mix_mesh = load_objs_as_meshes( - [obj_filename, teapot_obj], load_textures=False - ) + mix_mesh = load_objs_as_meshes([obj_filename, teapot_obj], load_textures=False) self.assertEqual(len(mix_mesh), 2) self.assertClose(mix_mesh.verts_list()[0], mesh.verts_list()[0]) self.assertClose(mix_mesh.faces_list()[0], mesh.faces_list()[0]) @@ -671,15 +595,11 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase): self.assertClose(cow3_tea.faces_list()[3], mesh_teapot.faces_list()[0]) @staticmethod - def _bm_save_obj( - verts: torch.Tensor, faces: torch.Tensor, decimal_places: int - ): + def _bm_save_obj(verts: torch.Tensor, faces: torch.Tensor, decimal_places: int): return lambda: save_obj(StringIO(), verts, faces, decimal_places) @staticmethod - def _bm_load_obj( - verts: torch.Tensor, faces: torch.Tensor, decimal_places: int - ): + def _bm_load_obj(verts: torch.Tensor, faces: torch.Tensor, decimal_places: int): f = StringIO() save_obj(f, verts, faces, decimal_places) s = f.getvalue() diff --git a/tests/test_packed_to_padded.py b/tests/test_packed_to_padded.py index 5f32fb52..28ce5d43 100644 --- a/tests/test_packed_to_padded.py +++ b/tests/test_packed_to_padded.py @@ -1,13 +1,12 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import unittest -import torch +import torch +from common_testing import TestCaseMixin from pytorch3d.ops import packed_to_padded, padded_to_packed from pytorch3d.structures.meshes import Meshes -from common_testing import TestCaseMixin - class TestPackedToPadded(TestCaseMixin, unittest.TestCase): def setUp(self) -> None: @@ -25,9 +24,7 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase): verts_list = [] faces_list = [] for _ in range(num_meshes): - verts = torch.rand( - (num_verts, 3), dtype=torch.float32, device=device - ) + verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device) faces = torch.randint( num_verts, size=(num_faces, 3), dtype=torch.int64, device=device ) @@ -47,9 +44,7 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase): if D == 0: inputs_padded = torch.zeros((num_meshes, max_size), device=device) else: - inputs_padded = torch.zeros( - (num_meshes, max_size, D), device=device - ) + inputs_padded = torch.zeros((num_meshes, max_size, D), device=device) for m in range(num_meshes): s = first_idxs[m] if m == num_meshes - 1: @@ -92,13 +87,9 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase): max_faces = meshes.num_faces_per_mesh().max().item() if D == 0: - values = torch.rand( - (faces.shape[0],), device=device, requires_grad=True - ) + values = torch.rand((faces.shape[0],), device=device, requires_grad=True) else: - values = torch.rand( - (faces.shape[0], D), device=device, requires_grad=True - ) + values = torch.rand((faces.shape[0], D), device=device, requires_grad=True) values_torch = values.detach().clone() values_torch.requires_grad = True values_padded = packed_to_padded( @@ -120,10 +111,7 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase): values_padded_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 = TestPackedToPadded.padded_to_packed_python( - grad_inputs, - mesh_to_faces_packed_first_idx, - values.size(0), - device=device, + grad_inputs, mesh_to_faces_packed_first_idx, values.size(0), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) @@ -165,9 +153,7 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase): values_torch = values.detach().clone() values_torch.requires_grad = True values_packed = padded_to_packed( - values, - mesh_to_faces_packed_first_idx, - num_faces_per_mesh.sum().item(), + values, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item() ) values_packed_torch = TestPackedToPadded.padded_to_packed_python( values_torch, @@ -180,9 +166,7 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase): # check backward if D == 0: - grad_inputs = torch.rand( - (num_faces_per_mesh.sum().item()), device=device - ) + grad_inputs = torch.rand((num_faces_per_mesh.sum().item()), device=device) else: grad_inputs = torch.rand( (num_faces_per_mesh.sum().item(), D), device=device @@ -192,10 +176,7 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase): values_packed_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 = TestPackedToPadded.packed_to_padded_python( - grad_inputs, - mesh_to_faces_packed_first_idx, - values.size(1), - device=device, + grad_inputs, mesh_to_faces_packed_first_idx, values.size(1), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) @@ -219,34 +200,24 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase): self._test_padded_to_packed_helper(16, "cuda:0") def test_invalid_inputs_shapes(self, device="cuda:0"): - with self.assertRaisesRegex( - ValueError, "input can only be 2-dimensional." - ): + with self.assertRaisesRegex(ValueError, "input can only be 2-dimensional."): values = torch.rand((100, 50, 2), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) packed_to_padded(values, first_idxs, 100) - with self.assertRaisesRegex( - ValueError, "input can only be 3-dimensional." - ): + with self.assertRaisesRegex(ValueError, "input can only be 3-dimensional."): values = torch.rand((100,), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20) - with self.assertRaisesRegex( - ValueError, "input can only be 3-dimensional." - ): + with self.assertRaisesRegex(ValueError, "input can only be 3-dimensional."): values = torch.rand((100, 50, 2, 2), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20) @staticmethod def packed_to_padded_with_init( - num_meshes: int, - num_verts: int, - num_faces: int, - num_d: int, - device: str = "cpu", + num_meshes: int, num_verts: int, num_faces: int, num_d: int, device: str = "cpu" ): meshes = TestPackedToPadded.init_meshes( num_meshes, num_verts, num_faces, device @@ -268,11 +239,7 @@ class TestPackedToPadded(TestCaseMixin, unittest.TestCase): @staticmethod def packed_to_padded_with_init_torch( - num_meshes: int, - num_verts: int, - num_faces: int, - num_d: int, - device: str = "cpu", + num_meshes: int, num_verts: int, num_faces: int, num_d: int, device: str = "cpu" ): meshes = TestPackedToPadded.init_meshes( num_meshes, num_verts, num_faces, device diff --git a/tests/test_ply_io.py b/tests/test_ply_io.py index 3568ccca..34a48bde 100644 --- a/tests/test_ply_io.py +++ b/tests/test_ply_io.py @@ -3,13 +3,12 @@ import struct import unittest from io import BytesIO, StringIO -import torch +import torch +from common_testing import TestCaseMixin from pytorch3d.io.ply_io import _load_ply_raw, load_ply, save_ply from pytorch3d.utils import torus -from common_testing import TestCaseMixin - class TestMeshPlyIO(TestCaseMixin, unittest.TestCase): def test_raw_load_simple_ascii(self): @@ -155,14 +154,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase): def test_load_simple_binary(self): for big_endian in [True, False]: verts = ( - "0 0 0 " - "0 0 1 " - "0 1 1 " - "0 1 0 " - "1 0 0 " - "1 0 1 " - "1 1 1 " - "1 1 0" + "0 0 0 " "0 0 1 " "0 1 1 " "0 1 0 " "1 0 0 " "1 0 1 " "1 1 1 " "1 1 0" ).split() faces = ( "4 0 1 2 3 " @@ -176,9 +168,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase): "3 4 5 1" ).split() short_one = b"\00\01" if big_endian else b"\01\00" - mixed_data = b"\00\00" b"\03\03" + ( - short_one + b"\00\01\01\01" b"\00\02" - ) + mixed_data = b"\00\00" b"\03\03" + (short_one + b"\00\01\01\01" b"\00\02") minus_one_data = b"\xff" * 14 endian_char = ">" if big_endian else "<" format = ( @@ -306,9 +296,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase): lines2 = lines.copy() lines2[8] = "1 2" - with self.assertRaisesRegex( - ValueError, "Inconsistent data for vertex." - ): + with self.assertRaisesRegex(ValueError, "Inconsistent data for vertex."): _load_ply_raw(StringIO("\n".join(lines2))) lines2 = lines[:-1] @@ -344,9 +332,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase): lines2 = lines.copy() lines2.insert(4, "element bad 1") - with self.assertRaisesRegex( - ValueError, "Found an element with no properties." - ): + with self.assertRaisesRegex(ValueError, "Found an element with no properties."): _load_ply_raw(StringIO("\n".join(lines2))) lines2 = lines.copy() @@ -369,25 +355,19 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase): lines2 = lines.copy() lines2.insert(4, "property double y") - with self.assertRaisesRegex( - ValueError, "Too little data for an element." - ): + with self.assertRaisesRegex(ValueError, "Too little data for an element."): _load_ply_raw(StringIO("\n".join(lines2))) lines2[-2] = "3.3 4.2" _load_ply_raw(StringIO("\n".join(lines2))) lines2[-2] = "3.3 4.3 2" - with self.assertRaisesRegex( - ValueError, "Too much data for an element." - ): + with self.assertRaisesRegex(ValueError, "Too much data for an element."): _load_ply_raw(StringIO("\n".join(lines2))) # Now make the ply file actually be readable as a Mesh - with self.assertRaisesRegex( - ValueError, "The ply file has no face element." - ): + with self.assertRaisesRegex(ValueError, "The ply file has no face element."): load_ply(StringIO("\n".join(lines))) lines2 = lines.copy() @@ -398,9 +378,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase): lines2.insert(5, "property float z") lines2.insert(5, "property float y") lines2[-2] = "0 0 0" - with self.assertRaisesRegex( - ValueError, "Faces must have at least 3 vertices." - ): + with self.assertRaisesRegex(ValueError, "Faces must have at least 3 vertices."): load_ply(StringIO("\n".join(lines2))) # Good one @@ -408,17 +386,11 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase): load_ply(StringIO("\n".join(lines2))) @staticmethod - def _bm_save_ply( - verts: torch.Tensor, faces: torch.Tensor, decimal_places: int - ): - return lambda: save_ply( - StringIO(), verts, faces, decimal_places=decimal_places - ) + def _bm_save_ply(verts: torch.Tensor, faces: torch.Tensor, decimal_places: int): + return lambda: save_ply(StringIO(), verts, faces, decimal_places=decimal_places) @staticmethod - def _bm_load_ply( - verts: torch.Tensor, faces: torch.Tensor, decimal_places: int - ): + def _bm_load_ply(verts: torch.Tensor, faces: torch.Tensor, decimal_places: int): f = StringIO() save_ply(f, verts, faces, decimal_places) s = f.getvalue() diff --git a/tests/test_pointclouds.py b/tests/test_pointclouds.py index d1deea2e..aae7924a 100644 --- a/tests/test_pointclouds.py +++ b/tests/test_pointclouds.py @@ -1,13 +1,12 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import numpy as np import unittest + +import numpy as np import torch - -from pytorch3d.structures.pointclouds import Pointclouds - from common_testing import TestCaseMixin +from pytorch3d.structures.pointclouds import Pointclouds class TestPointclouds(TestCaseMixin, unittest.TestCase): @@ -52,13 +51,11 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): normals_list, features_list = None, None if with_normals: normals_list = [ - torch.rand((i, 3), device=device, dtype=torch.float32) - for i in p + torch.rand((i, 3), device=device, dtype=torch.float32) for i in p ] if with_features: features_list = [ - torch.rand((i, channels), device=device, dtype=torch.float32) - for i in p + torch.rand((i, channels), device=device, dtype=torch.float32) for i in p ] if lists_to_tensors: @@ -68,9 +65,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): if with_features: features_list = torch.stack(features_list) - return Pointclouds( - points_list, normals=normals_list, features=features_list - ) + return Pointclouds(points_list, normals=normals_list, features=features_list) def test_simple(self): device = torch.device("cuda:0") @@ -81,12 +76,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): device=device, ), torch.tensor( - [ - [0.1, 0.3, 0.3], - [0.6, 0.7, 0.8], - [0.2, 0.3, 0.4], - [0.1, 0.5, 0.3], - ], + [[0.1, 0.3, 0.3], [0.6, 0.7, 0.8], [0.2, 0.3, 0.4], [0.1, 0.5, 0.3]], dtype=torch.float32, device=device, ), @@ -111,9 +101,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): self.assertClose( clouds.cloud_to_packed_first_idx().cpu(), torch.tensor([0, 3, 7]) ) - self.assertClose( - clouds.num_points_per_cloud().cpu(), torch.tensor([3, 4, 5]) - ) + self.assertClose(clouds.num_points_per_cloud().cpu(), torch.tensor([3, 4, 5])) self.assertClose( clouds.padded_to_packed_idx().cpu(), torch.tensor([0, 1, 2, 5, 6, 7, 8, 10, 11, 12, 13, 14]), @@ -129,11 +117,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): "points_padded", "padded_to_packed_idx", ] - public_normals_getters = [ - "normals_list", - "normals_packed", - "normals_padded", - ] + public_normals_getters = ["normals_list", "normals_packed", "normals_padded"] public_features_getters = [ "features_list", "features_packed", @@ -147,17 +131,13 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): points_data = [torch.zeros((max_len, 3)).uniform_() for i in lengths] normals_data = [torch.zeros((max_len, 3)).uniform_() for i in lengths] features_data = [torch.zeros((max_len, C)).uniform_() for i in lengths] - for length, p, n, f in zip( - lengths, points_data, normals_data, features_data - ): + for length, p, n, f in zip(lengths, points_data, normals_data, features_data): p[length:] = 0.0 n[length:] = 0.0 f[length:] = 0.0 points_list = [d[:length] for length, d in zip(lengths, points_data)] normals_list = [d[:length] for length, d in zip(lengths, normals_data)] - features_list = [ - d[:length] for length, d in zip(lengths, features_data) - ] + features_list = [d[:length] for length, d in zip(lengths, features_data)] points_packed = torch.cat(points_data) normals_packed = torch.cat(normals_data) features_packed = torch.cat(features_data) @@ -173,13 +153,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): ("emptylist_emptylist_emptylist", [], [], []), ] false_cases_inputs = [ - ( - "list_packed", - points_list, - normals_packed, - features_packed, - ValueError, - ), + ("list_packed", points_list, normals_packed, features_packed, ValueError), ("packed_0", points_packed, None, None, ValueError), ] @@ -230,15 +204,11 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): self.assertIsNone(features_padded) for n in range(N): p = points_list[n].shape[0] - self.assertClose( - points_padded[n, :p, :], points_list[n] - ) + self.assertClose(points_padded[n, :p, :], points_list[n]) if with_normals: norms = normals_list[n].shape[0] self.assertEqual(p, norms) - self.assertClose( - normals_padded[n, :p, :], normals_list[n] - ) + self.assertClose(normals_padded[n, :p, :], normals_list[n]) if with_features: f = features_list[n].shape[0] self.assertEqual(p, f) @@ -248,9 +218,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): if points_padded.shape[1] > p: self.assertTrue(points_padded[n, p:, :].eq(0).all()) if with_features: - self.assertTrue( - features_padded[n, p:, :].eq(0).all() - ) + self.assertTrue(features_padded[n, p:, :].eq(0).all()) self.assertEqual(points_per_cloud[n], p) # Check compute packed. @@ -272,17 +240,13 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): ) if with_normals: self.assertClose( - normals_packed[cur : cur + p, :], - normals_list[n], + normals_packed[cur : cur + p, :], normals_list[n] ) if with_features: self.assertClose( - features_packed[cur : cur + p, :], - features_list[n], + features_packed[cur : cur + p, :], features_list[n] ) - self.assertTrue( - packed_to_cloud[cur : cur + p].eq(n).all() - ) + self.assertTrue(packed_to_cloud[cur : cur + p].eq(n).all()) self.assertTrue(cloud_to_packed[n] == cur) cur += p @@ -312,9 +276,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): )[0] points = torch.rand((p, 3), dtype=torch.float32, device=device) normals = torch.rand((p, 3), dtype=torch.float32, device=device) - features = torch.rand( - (p, C), dtype=torch.float32, device=device - ) + features = torch.rand((p, C), dtype=torch.float32, device=device) else: points = torch.tensor([], dtype=torch.float32, device=device) normals = torch.tensor([], dtype=torch.float32, device=device) @@ -331,9 +293,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): if with_features: this_features = features_list clouds = Pointclouds( - points=points_list, - normals=this_normals, - features=this_features, + points=points_list, normals=this_normals, features=this_features ) points_padded = clouds.points_padded() normals_padded = clouds.normals_padded() @@ -346,13 +306,9 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): for n in range(N): p = len(points_list[n]) if p > 0: - self.assertClose( - points_padded[n, :p, :], points_list[n] - ) + self.assertClose(points_padded[n, :p, :], points_list[n]) if with_normals: - self.assertClose( - normals_padded[n, :p, :], normals_list[n] - ) + self.assertClose(normals_padded[n, :p, :], normals_list[n]) if with_features: self.assertClose( features_padded[n, :p, :], features_list[n] @@ -360,13 +316,9 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): if points_padded.shape[1] > p: self.assertTrue(points_padded[n, p:, :].eq(0).all()) if with_normals: - self.assertTrue( - normals_padded[n, p:, :].eq(0).all() - ) + self.assertTrue(normals_padded[n, p:, :].eq(0).all()) if with_features: - self.assertTrue( - features_padded[n, p:, :].eq(0).all() - ) + self.assertTrue(features_padded[n, p:, :].eq(0).all()) self.assertTrue(points_per_cloud[n] == p) def test_clone_list(self): @@ -379,12 +331,8 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): new_clouds = clouds.clone() # Check cloned and original objects do not share tensors. - self.assertSeparate( - new_clouds.points_list()[0], clouds.points_list()[0] - ) - self.assertSeparate( - new_clouds.normals_list()[0], clouds.normals_list()[0] - ) + self.assertSeparate(new_clouds.points_list()[0], clouds.points_list()[0]) + self.assertSeparate(new_clouds.normals_list()[0], clouds.normals_list()[0]) self.assertSeparate( new_clouds.features_list()[0], clouds.features_list()[0] ) @@ -412,12 +360,8 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): new_clouds = clouds.clone() # Check cloned and original objects do not share tensors. - self.assertSeparate( - new_clouds.points_list()[0], clouds.points_list()[0] - ) - self.assertSeparate( - new_clouds.normals_list()[0], clouds.normals_list()[0] - ) + self.assertSeparate(new_clouds.points_list()[0], clouds.points_list()[0]) + self.assertSeparate(new_clouds.normals_list()[0], clouds.normals_list()[0]) self.assertSeparate( new_clouds.features_list()[0], clouds.features_list()[0] ) @@ -442,9 +386,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): for i in range(N): self.assertClose(cloud1.points_list()[i], cloud2.points_list()[i]) self.assertClose(cloud1.normals_list()[i], cloud2.normals_list()[i]) - self.assertClose( - cloud1.features_list()[i], cloud2.features_list()[i] - ) + self.assertClose(cloud1.features_list()[i], cloud2.features_list()[i]) has_normals = cloud1.normals_list() is not None self.assertTrue(has_normals == (cloud2.normals_list() is not None)) has_features = cloud1.features_list() is not None @@ -459,22 +401,13 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): if has_features: self.assertClose(cloud1.features_padded(), cloud2.features_padded()) self.assertClose(cloud1.features_packed(), cloud2.features_packed()) + self.assertClose(cloud1.packed_to_cloud_idx(), cloud2.packed_to_cloud_idx()) self.assertClose( - cloud1.packed_to_cloud_idx(), cloud2.packed_to_cloud_idx() - ) - self.assertClose( - cloud1.cloud_to_packed_first_idx(), - cloud2.cloud_to_packed_first_idx(), - ) - self.assertClose( - cloud1.num_points_per_cloud(), cloud2.num_points_per_cloud() - ) - self.assertClose( - cloud1.packed_to_cloud_idx(), cloud2.packed_to_cloud_idx() - ) - self.assertClose( - cloud1.padded_to_packed_idx(), cloud2.padded_to_packed_idx() + cloud1.cloud_to_packed_first_idx(), cloud2.cloud_to_packed_first_idx() ) + self.assertClose(cloud1.num_points_per_cloud(), cloud2.num_points_per_cloud()) + self.assertClose(cloud1.packed_to_cloud_idx(), cloud2.packed_to_cloud_idx()) + self.assertClose(cloud1.padded_to_packed_idx(), cloud2.padded_to_packed_idx()) self.assertTrue(all(cloud1.valid == cloud2.valid)) self.assertTrue(cloud1.equisized == cloud2.equisized) @@ -482,9 +415,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): def naive_offset(clouds, offsets_packed): new_points_packed = clouds.points_packed() + offsets_packed new_points_list = list( - new_points_packed.split( - clouds.num_points_per_cloud().tolist(), 0 - ) + new_points_packed.split(clouds.num_points_per_cloud().tolist(), 0) ) return Pointclouds( points=new_points_list, @@ -502,9 +433,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): clouds._compute_padded() clouds.padded_to_packed_idx() - deform = torch.rand( - (all_p, 3), dtype=torch.float32, device=clouds.device - ) + deform = torch.rand((all_p, 3), dtype=torch.float32, device=clouds.device) new_clouds_naive = naive_offset(clouds, deform) new_clouds = clouds.offset(deform) @@ -521,8 +450,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): clouds.normals_list()[i], new_clouds_naive.normals_list()[i] ) self.assertClose( - clouds.features_list()[i], - new_clouds_naive.features_list()[i], + clouds.features_list()[i], new_clouds_naive.features_list()[i] ) self.assertCloudsEqual(new_clouds, new_clouds_naive) @@ -550,15 +478,13 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): new_clouds = clouds.scale(scales) for i in range(N): self.assertClose( - scales[i] * clouds.points_list()[i], - new_clouds.points_list()[i], + scales[i] * clouds.points_list()[i], new_clouds.points_list()[i] ) self.assertClose( clouds.normals_list()[i], new_clouds_naive.normals_list()[i] ) self.assertClose( - clouds.features_list()[i], - new_clouds_naive.features_list()[i], + clouds.features_list()[i], new_clouds_naive.features_list()[i] ) self.assertCloudsEqual(new_clouds, new_clouds_naive) @@ -576,20 +502,15 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): for i in range(len(clouds)): for n in range(N): self.assertClose( - clouds.points_list()[i], - new_clouds.points_list()[i * N + n], + clouds.points_list()[i], new_clouds.points_list()[i * N + n] ) self.assertClose( - clouds.normals_list()[i], - new_clouds.normals_list()[i * N + n], + clouds.normals_list()[i], new_clouds.normals_list()[i * N + n] ) self.assertClose( - clouds.features_list()[i], - new_clouds.features_list()[i * N + n], - ) - self.assertTrue( - clouds.valid[i] == new_clouds.valid[i * N + n] + clouds.features_list()[i], new_clouds.features_list()[i * N + n] ) + self.assertTrue(clouds.valid[i] == new_clouds.valid[i * N + n]) self.assertAllSeparate( clouds.points_list() + new_clouds.points_list() @@ -627,8 +548,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): "padded_to_packed_idx", ]: self.assertClose( - getattr(new_cloud, attrib)().cpu(), - getattr(cloud, attrib)().cpu(), + getattr(new_cloud, attrib)().cpu(), getattr(cloud, attrib)().cpu() ) for i in range(len(cloud)): self.assertClose( @@ -638,8 +558,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): cloud.normals_list()[i].cpu(), new_cloud.normals_list()[i].cpu() ) self.assertClose( - cloud.features_list()[i].cpu(), - new_cloud.features_list()[i].cpu(), + cloud.features_list()[i].cpu(), new_cloud.features_list()[i].cpu() ) self.assertTrue(all(cloud.valid.cpu() == new_cloud.valid.cpu())) self.assertTrue(cloud.equisized == new_cloud.equisized) @@ -666,8 +585,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): "padded_to_packed_idx", ]: self.assertClose( - getattr(new_cloud, attrib)().cpu(), - getattr(cloud, attrib)().cpu(), + getattr(new_cloud, attrib)().cpu(), getattr(cloud, attrib)().cpu() ) for i in range(len(cloud)): self.assertClose( @@ -677,8 +595,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): cloud.normals_list()[i].cpu(), new_cloud.normals_list()[i].cpu() ) self.assertClose( - cloud.features_list()[i].cpu(), - new_cloud.features_list()[i].cpu(), + cloud.features_list()[i].cpu(), new_cloud.features_list()[i].cpu() ) self.assertTrue(all(cloud.valid.cpu() == new_cloud.valid.cpu())) self.assertTrue(cloud.equisized == new_cloud.equisized) @@ -698,11 +615,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): self.assertEqual(len(split_clouds[1]), 3) self.assertTrue( split_clouds[1].points_list() - == [ - clouds.get_cloud(2)[0], - clouds.get_cloud(3)[0], - clouds.get_cloud(4)[0], - ] + == [clouds.get_cloud(2)[0], clouds.get_cloud(3)[0], clouds.get_cloud(4)[0]] ) split_sizes = [2, 0.3] @@ -751,9 +664,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): points_padded = clouds.points_padded() points_padded_flat = points_padded.view(-1, 3) - self.assertClose( - points_padded_flat[padded_to_packed_idx], points_packed - ) + self.assertClose(points_padded_flat[padded_to_packed_idx], points_packed) idx = padded_to_packed_idx.view(-1, 1).expand(-1, 3) self.assertClose(points_padded_flat.gather(0, idx), points_packed) @@ -765,16 +676,13 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): def check_equal(selected, indices): for selectedIdx, index in indices: self.assertClose( - selected.points_list()[selectedIdx], - clouds.points_list()[index], + selected.points_list()[selectedIdx], clouds.points_list()[index] ) self.assertClose( - selected.normals_list()[selectedIdx], - clouds.normals_list()[index], + selected.normals_list()[selectedIdx], clouds.normals_list()[index] ) self.assertClose( - selected.features_list()[selectedIdx], - clouds.features_list()[index], + selected.features_list()[selectedIdx], clouds.features_list()[index] ) # int index @@ -820,11 +728,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): for with_normfeat in (True, False): for with_new_normfeat in (True, False): clouds = self.init_cloud( - N, - P, - C, - with_normals=with_normfeat, - with_features=with_normfeat, + N, P, C, with_normals=with_normfeat, with_features=with_normfeat ) num_points_per_cloud = clouds.num_points_per_cloud() @@ -843,8 +747,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): clouds.points_padded().shape, device=clouds.device ) new_normals_list = [ - new_normals[i, : num_points_per_cloud[i]] - for i in range(N) + new_normals[i, : num_points_per_cloud[i]] for i in range(N) ] feat_shape = [ clouds.points_padded().shape[0], @@ -853,14 +756,11 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): ] new_features = torch.rand(feat_shape, device=clouds.device) new_features_list = [ - new_features[i, : num_points_per_cloud[i]] - for i in range(N) + new_features[i, : num_points_per_cloud[i]] for i in range(N) ] # update - new_clouds = clouds.update_padded( - new_points, new_normals, new_features - ) + new_clouds = clouds.update_padded(new_points, new_normals, new_features) self.assertIsNone(new_clouds._points_list) self.assertIsNone(new_clouds._points_packed) @@ -868,13 +768,9 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): self.assertTrue(all(new_clouds.valid == clouds.valid)) self.assertClose(new_clouds.points_padded(), new_points) - self.assertClose( - new_clouds.points_packed(), torch.cat(new_points_list) - ) + self.assertClose(new_clouds.points_packed(), torch.cat(new_points_list)) for i in range(N): - self.assertClose( - new_clouds.points_list()[i], new_points_list[i] - ) + self.assertClose(new_clouds.points_list()[i], new_points_list[i]) if with_new_normfeat: for i in range(N): @@ -890,27 +786,22 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): ) self.assertClose(new_clouds.features_padded(), new_features) self.assertClose( - new_clouds.features_packed(), - torch.cat(new_features_list), + new_clouds.features_packed(), torch.cat(new_features_list) ) else: if with_normfeat: for i in range(N): self.assertClose( - new_clouds.normals_list()[i], - clouds.normals_list()[i], + new_clouds.normals_list()[i], clouds.normals_list()[i] ) self.assertClose( - new_clouds.features_list()[i], - clouds.features_list()[i], + new_clouds.features_list()[i], clouds.features_list()[i] ) self.assertNotSeparate( - new_clouds.normals_list()[i], - clouds.normals_list()[i], + new_clouds.normals_list()[i], clouds.normals_list()[i] ) self.assertNotSeparate( - new_clouds.features_list()[i], - clouds.features_list()[i], + new_clouds.features_list()[i], clouds.features_list()[i] ) self.assertClose( @@ -920,19 +811,16 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase): new_clouds.normals_packed(), clouds.normals_packed() ) self.assertClose( - new_clouds.features_padded(), - clouds.features_padded(), + new_clouds.features_padded(), clouds.features_padded() ) self.assertClose( - new_clouds.features_packed(), - clouds.features_packed(), + new_clouds.features_packed(), clouds.features_packed() ) self.assertNotSeparate( new_clouds.normals_padded(), clouds.normals_padded() ) self.assertNotSeparate( - new_clouds.features_padded(), - clouds.features_padded(), + new_clouds.features_padded(), clouds.features_padded() ) else: self.assertIsNone(new_clouds.normals_list()) diff --git a/tests/test_rasterize_meshes.py b/tests/test_rasterize_meshes.py index 014fb5be..64090058 100644 --- a/tests/test_rasterize_meshes.py +++ b/tests/test_rasterize_meshes.py @@ -2,8 +2,9 @@ import functools import unittest -import torch +import torch +from common_testing import TestCaseMixin from pytorch3d import _C from pytorch3d.renderer.mesh.rasterize_meshes import ( rasterize_meshes, @@ -12,20 +13,14 @@ from pytorch3d.renderer.mesh.rasterize_meshes import ( from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere -from common_testing import TestCaseMixin - class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase): def test_simple_python(self): device = torch.device("cpu") - self._simple_triangle_raster( - rasterize_meshes_python, device, bin_size=-1 - ) + self._simple_triangle_raster(rasterize_meshes_python, device, bin_size=-1) self._simple_blurry_raster(rasterize_meshes_python, device, bin_size=-1) self._test_behind_camera(rasterize_meshes_python, device, bin_size=-1) - self._test_perspective_correct( - rasterize_meshes_python, device, bin_size=-1 - ) + self._test_perspective_correct(rasterize_meshes_python, device, bin_size=-1) def test_simple_cpu_naive(self): device = torch.device("cpu") @@ -350,9 +345,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase): fn1 = functools.partial(rasterize_meshes, meshes1, **kwargs) fn2 = functools.partial(rasterize_meshes_python, meshes2, **kwargs) args = () - self._compare_impls( - fn1, fn2, args, args, verts1, verts2, compare_grads=True - ) + self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True) def test_cpp_vs_cuda_perspective_correct(self): meshes = ico_sphere(2, device=torch.device("cpu")) @@ -367,9 +360,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase): fn1 = functools.partial(rasterize_meshes, meshes1, **kwargs) fn2 = functools.partial(rasterize_meshes, meshes2, bin_size=0, **kwargs) args = () - self._compare_impls( - fn1, fn2, args, args, verts1, verts2, compare_grads=True - ) + self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True) def test_cuda_naive_vs_binned_perspective_correct(self): meshes = ico_sphere(2, device=torch.device("cuda")) @@ -384,9 +375,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase): fn1 = functools.partial(rasterize_meshes, meshes1, bin_size=0, **kwargs) fn2 = functools.partial(rasterize_meshes, meshes2, bin_size=8, **kwargs) args = () - self._compare_impls( - fn1, fn2, args, args, verts1, verts2, compare_grads=True - ) + self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True) def _compare_impls( self, @@ -433,9 +422,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase): grad_verts2 = grad_var2.grad.data.clone().cpu() self.assertClose(grad_verts1, grad_verts2, rtol=1e-3) - def _test_perspective_correct( - self, rasterize_meshes_fn, device, bin_size=None - ): + def _test_perspective_correct(self, rasterize_meshes_fn, device, bin_size=None): # fmt: off verts = torch.tensor([ [-0.4, -0.4, 10], # noqa: E241, E201 @@ -542,12 +529,8 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase): zbuf_f_bary = w0_f * z0 + w1_f * z1 + w2_f * z2 zbuf_t_bary = w0_t * z0 + w1_t * z1 + w2_t * z2 mask = idx_expected != -1 - zbuf_f_bary_diff = ( - (zbuf_f_bary[mask] - zbuf_f_expected[mask]).abs().max() - ) - zbuf_t_bary_diff = ( - (zbuf_t_bary[mask] - zbuf_t_expected[mask]).abs().max() - ) + zbuf_f_bary_diff = (zbuf_f_bary[mask] - zbuf_f_expected[mask]).abs().max() + zbuf_t_bary_diff = (zbuf_t_bary[mask] - zbuf_t_expected[mask]).abs().max() self.assertLess(zbuf_f_bary_diff, 1e-4) self.assertLess(zbuf_t_bary_diff, 1e-4) @@ -719,9 +702,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase): # k = 1, second closest point. expected_p2face_k1 = expected_p2face_k0.clone() - expected_p2face_k1[0, :] = ( - torch.ones_like(expected_p2face_k1[0, :]) * -1 - ) + expected_p2face_k1[0, :] = torch.ones_like(expected_p2face_k1[0, :]) * -1 # fmt: off expected_p2face_k1[1, :] = torch.tensor( @@ -763,9 +744,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase): # Coordinate conventions +Y up, +Z in, +X left if bin_size == -1: # simple python, no bin_size - p2face, zbuf, bary, pix_dists = raster_fn( - meshes, image_size, 0.0, 2 - ) + p2face, zbuf, bary, pix_dists = raster_fn(meshes, image_size, 0.0, 2) else: p2face, zbuf, bary, pix_dists = raster_fn( meshes, image_size, 0.0, 2, bin_size @@ -914,9 +893,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase): # Expected faces using axes convention +Y down, + X right, + Z in bin_faces_expected = ( - torch.ones( - (1, 2, 2, max_faces_per_bin), dtype=torch.int32, device=device - ) + torch.ones((1, 2, 2, max_faces_per_bin), dtype=torch.int32, device=device) * -1 ) bin_faces_expected[0, 0, 0, 0] = torch.tensor([1]) @@ -979,12 +956,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase): def rasterize(): rasterize_meshes( - meshes_batch, - image_size, - blur_radius, - 8, - bin_size, - max_faces_per_bin, + meshes_batch, image_size, blur_radius, 8, bin_size, max_faces_per_bin ) torch.cuda.synchronize() diff --git a/tests/test_rasterize_points.py b/tests/test_rasterize_points.py index 070e6f58..f10bca40 100644 --- a/tests/test_rasterize_points.py +++ b/tests/test_rasterize_points.py @@ -1,10 +1,11 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import numpy as np import unittest -import torch +import numpy as np +import torch +from common_testing import TestCaseMixin from pytorch3d import _C from pytorch3d.renderer.points.rasterize_points import ( rasterize_points, @@ -12,8 +13,6 @@ from pytorch3d.renderer.points.rasterize_points import ( ) from pytorch3d.structures.pointclouds import Pointclouds -from common_testing import TestCaseMixin - class TestRasterizePoints(TestCaseMixin, unittest.TestCase): def test_python_simple_cpu(self): @@ -38,9 +37,7 @@ class TestRasterizePoints(TestCaseMixin, unittest.TestCase): self._test_behind_camera(rasterize_points, torch.device("cpu")) def test_cuda_behind_camera(self): - self._test_behind_camera( - rasterize_points, torch.device("cuda"), bin_size=0 - ) + self._test_behind_camera(rasterize_points, torch.device("cuda"), bin_size=0) def test_cpp_vs_naive_vs_binned(self): # Make sure that the backward pass runs for all pathways @@ -167,20 +164,8 @@ class TestRasterizePoints(TestCaseMixin, unittest.TestCase): points_cuda = points_cpu.cuda().detach().requires_grad_(True) pointclouds_cpu = Pointclouds(points=points_cpu) pointclouds_cuda = Pointclouds(points=points_cuda) - args_cpu = ( - pointclouds_cpu, - image_size, - radius, - points_per_pixel, - bin_size, - ) - args_cuda = ( - pointclouds_cuda, - image_size, - radius, - points_per_pixel, - bin_size, - ) + args_cpu = (pointclouds_cpu, image_size, radius, points_per_pixel, bin_size) + args_cuda = (pointclouds_cuda, image_size, radius, points_per_pixel, bin_size) self._compare_impls( rasterize_points, rasterize_points, @@ -332,9 +317,7 @@ class TestRasterizePoints(TestCaseMixin, unittest.TestCase): ], device=device) # fmt: on - dists1_expected = torch.zeros( - (5, 5, 2), dtype=torch.float32, device=device - ) + dists1_expected = torch.zeros((5, 5, 2), dtype=torch.float32, device=device) # fmt: off dists1_expected[:, :, 0] = torch.tensor([ [-1.00, -1.00, 0.16, -1.00, -1.00], # noqa: E241 diff --git a/tests/test_rasterizer.py b/tests/test_rasterizer.py index cea2d0cf..9d82e0c9 100644 --- a/tests/test_rasterizer.py +++ b/tests/test_rasterizer.py @@ -1,22 +1,17 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import numpy as np import unittest from pathlib import Path + +import numpy as np import torch from PIL import Image - -from pytorch3d.renderer.cameras import ( - OpenGLPerspectiveCameras, - look_at_view_transform, -) -from pytorch3d.renderer.mesh.rasterizer import ( - MeshRasterizer, - RasterizationSettings, -) +from pytorch3d.renderer.cameras import OpenGLPerspectiveCameras, look_at_view_transform +from pytorch3d.renderer.mesh.rasterizer import MeshRasterizer, RasterizationSettings from pytorch3d.utils.ico_sphere import ico_sphere + DATA_DIR = Path(__file__).resolve().parent / "data" DEBUG = False # Set DEBUG to true to save outputs from the tests. @@ -52,9 +47,7 @@ class TestMeshRasterizer(unittest.TestCase): ) # Init rasterizer - rasterizer = MeshRasterizer( - cameras=cameras, raster_settings=raster_settings - ) + rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) #################################### # 1. Test rasterizing a single mesh diff --git a/tests/test_rendering_meshes.py b/tests/test_rendering_meshes.py index 853fc17e..e7cb0b46 100644 --- a/tests/test_rendering_meshes.py +++ b/tests/test_rendering_meshes.py @@ -4,23 +4,17 @@ """ Sanity checks for output images from the renderer. """ -import numpy as np import unittest from pathlib import Path + +import numpy as np import torch from PIL import Image - from pytorch3d.io import load_objs_as_meshes -from pytorch3d.renderer.cameras import ( - OpenGLPerspectiveCameras, - look_at_view_transform, -) +from pytorch3d.renderer.cameras import OpenGLPerspectiveCameras, look_at_view_transform from pytorch3d.renderer.lighting import PointLights from pytorch3d.renderer.materials import Materials -from pytorch3d.renderer.mesh.rasterizer import ( - MeshRasterizer, - RasterizationSettings, -) +from pytorch3d.renderer.mesh.rasterizer import MeshRasterizer, RasterizationSettings from pytorch3d.renderer.mesh.renderer import MeshRenderer from pytorch3d.renderer.mesh.shader import ( BlendParams, @@ -34,6 +28,7 @@ from pytorch3d.renderer.mesh.texturing import Textures from pytorch3d.structures.meshes import Meshes from pytorch3d.utils.ico_sphere import ico_sphere + # If DEBUG=True, save out images generated in the tests for debugging. # All saved images have prefix DEBUG_ DEBUG = False @@ -65,9 +60,7 @@ class TestRenderingMeshes(unittest.TestCase): verts_padded = sphere_mesh.verts_padded() faces_padded = sphere_mesh.faces_padded() textures = Textures(verts_rgb=torch.ones_like(verts_padded)) - sphere_mesh = Meshes( - verts=verts_padded, faces=faces_padded, textures=textures - ) + sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) # Init rasterizer settings if elevated_camera: @@ -90,9 +83,7 @@ class TestRenderingMeshes(unittest.TestCase): raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0 ) - rasterizer = MeshRasterizer( - cameras=cameras, raster_settings=raster_settings - ) + rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) # Test several shaders shaders = { @@ -101,9 +92,7 @@ class TestRenderingMeshes(unittest.TestCase): "flat": HardFlatShader, } for (name, shader_init) in shaders.items(): - shader = shader_init( - lights=lights, cameras=cameras, materials=materials - ) + shader = shader_init(lights=lights, cameras=cameras, materials=materials) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_mesh) filename = "simple_sphere_light_%s%s.png" % (name, postfix) @@ -125,9 +114,7 @@ class TestRenderingMeshes(unittest.TestCase): phong_shader = HardPhongShader( lights=lights, cameras=cameras, materials=materials ) - phong_renderer = MeshRenderer( - rasterizer=rasterizer, shader=phong_shader - ) + phong_renderer = MeshRenderer(rasterizer=rasterizer, shader=phong_shader) images = phong_renderer(sphere_mesh, lights=lights) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: @@ -137,9 +124,7 @@ class TestRenderingMeshes(unittest.TestCase): ) # Load reference image - image_ref_phong_dark = load_rgb_image( - "test_simple_sphere_dark%s.png" % postfix - ) + image_ref_phong_dark = load_rgb_image("test_simple_sphere_dark%s.png" % postfix) self.assertTrue(torch.allclose(rgb, image_ref_phong_dark, atol=0.05)) def test_simple_sphere_elevated_camera(self): @@ -184,18 +169,14 @@ class TestRenderingMeshes(unittest.TestCase): lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] # Init renderer - rasterizer = MeshRasterizer( - cameras=cameras, raster_settings=raster_settings - ) + rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) shaders = { "phong": HardGouraudShader, "gouraud": HardGouraudShader, "flat": HardFlatShader, } for (name, shader_init) in shaders.items(): - shader = shader_init( - lights=lights, cameras=cameras, materials=materials - ) + shader = shader_init(lights=lights, cameras=cameras, materials=materials) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_meshes) image_ref = load_rgb_image("test_simple_sphere_light_%s.png" % name) @@ -228,9 +209,7 @@ class TestRenderingMeshes(unittest.TestCase): # Init renderer renderer = MeshRenderer( - rasterizer=MeshRasterizer( - cameras=cameras, raster_settings=raster_settings - ), + rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=SoftSilhouetteShader(blend_params=blend_params), ) images = renderer(sphere_mesh) @@ -258,9 +237,7 @@ class TestRenderingMeshes(unittest.TestCase): The pupils in the eyes of the cow should always be looking to the left. """ device = torch.device("cuda:0") - DATA_DIR = ( - Path(__file__).resolve().parent.parent / "docs/tutorials/data" - ) + DATA_DIR = Path(__file__).resolve().parent.parent / "docs/tutorials/data" obj_filename = DATA_DIR / "cow_mesh/cow.obj" # Load mesh + texture @@ -283,9 +260,7 @@ class TestRenderingMeshes(unittest.TestCase): # Init renderer renderer = MeshRenderer( - rasterizer=MeshRasterizer( - cameras=cameras, raster_settings=raster_settings - ), + rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=TexturedSoftPhongShader( lights=lights, cameras=cameras, materials=materials ), @@ -306,9 +281,7 @@ class TestRenderingMeshes(unittest.TestCase): # Check grad exists [verts] = mesh.verts_list() verts.requires_grad = True - mesh2 = Meshes( - verts=[verts], faces=mesh.faces_list(), textures=mesh.textures - ) + mesh2 = Meshes(verts=[verts], faces=mesh.faces_list(), textures=mesh.textures) images = renderer(mesh2) images[0, ...].sum().backward() self.assertIsNotNone(verts.grad) diff --git a/tests/test_rendering_utils.py b/tests/test_rendering_utils.py index 917d173d..118f3845 100644 --- a/tests/test_rendering_utils.py +++ b/tests/test_rendering_utils.py @@ -1,13 +1,12 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import numpy as np import unittest + +import numpy as np import torch - -from pytorch3d.renderer.utils import TensorProperties - from common_testing import TestCaseMixin +from pytorch3d.renderer.utils import TensorProperties # Example class for testing @@ -81,9 +80,5 @@ class TestTensorProperties(TestCaseMixin, unittest.TestCase): if inds.sum() > 0: # Check the gathered points in the output have the same value from # the input. - self.assertClose( - test_class_gathered.x[inds].mean(dim=0), x[i, ...] - ) - self.assertClose( - test_class_gathered.y[inds].mean(dim=0), y[i, ...] - ) + self.assertClose(test_class_gathered.x[inds].mean(dim=0), x[i, ...]) + self.assertClose(test_class_gathered.y[inds].mean(dim=0), y[i, ...]) diff --git a/tests/test_rotation_conversions.py b/tests/test_rotation_conversions.py index 0e62b709..a65b00cf 100644 --- a/tests/test_rotation_conversions.py +++ b/tests/test_rotation_conversions.py @@ -4,8 +4,8 @@ import itertools import math import unittest -import torch +import torch from pytorch3d.transforms.rotation_conversions import ( euler_angles_to_matrix, matrix_to_euler_angles, @@ -45,9 +45,7 @@ class TestRandomRotation(unittest.TestCase): ) # The 0.1 significance level for chisquare(8-1) is # scipy.stats.chi2(7).ppf(0.9) == 12.017. - self.assertLess( - chisquare_statistic, 12, (counts, chisquare_statistic, k) - ) + self.assertLess(chisquare_statistic, 12, (counts, chisquare_statistic, k)) class TestRotationConversion(unittest.TestCase): diff --git a/tests/test_sample_points_from_meshes.py b/tests/test_sample_points_from_meshes.py index 9bd0bc62..6343aa72 100644 --- a/tests/test_sample_points_from_meshes.py +++ b/tests/test_sample_points_from_meshes.py @@ -3,14 +3,13 @@ import unittest from pathlib import Path -import torch +import torch +from common_testing import TestCaseMixin from pytorch3d.ops import sample_points_from_meshes from pytorch3d.structures.meshes import Meshes from pytorch3d.utils.ico_sphere import ico_sphere -from common_testing import TestCaseMixin - class TestSamplePoints(TestCaseMixin, unittest.TestCase): def setUp(self) -> None: @@ -28,9 +27,7 @@ class TestSamplePoints(TestCaseMixin, unittest.TestCase): verts_list = [] faces_list = [] for _ in range(num_meshes): - verts = torch.rand( - (num_verts, 3), dtype=torch.float32, device=device - ) + verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device) faces = torch.randint( num_verts, size=(num_faces, 3), dtype=torch.int64, device=device ) @@ -48,13 +45,9 @@ class TestSamplePoints(TestCaseMixin, unittest.TestCase): device = torch.device("cuda:0") verts1 = torch.tensor([], dtype=torch.float32, device=device) faces1 = torch.tensor([], dtype=torch.int64, device=device) - meshes = Meshes( - verts=[verts1, verts1, verts1], faces=[faces1, faces1, faces1] - ) + meshes = Meshes(verts=[verts1, verts1, verts1], faces=[faces1, faces1, faces1]) with self.assertRaises(ValueError) as err: - sample_points_from_meshes( - meshes, num_samples=100, return_normals=True - ) + sample_points_from_meshes(meshes, num_samples=100, return_normals=True) self.assertTrue("Meshes are empty." in str(err.exception)) def test_sampling_output(self): @@ -67,12 +60,7 @@ class TestSamplePoints(TestCaseMixin, unittest.TestCase): # Unit simplex. verts_pyramid = torch.tensor( - [ - [0.0, 0.0, 0.0], - [1.0, 0.0, 0.0], - [0.0, 1.0, 0.0], - [0.0, 0.0, 1.0], - ], + [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=torch.float32, device=device, ) @@ -113,12 +101,8 @@ class TestSamplePoints(TestCaseMixin, unittest.TestCase): pyramid_verts = samples[2, :] pyramid_normals = normals[2, :] - self.assertClose( - pyramid_verts.lt(1).float(), torch.ones_like(pyramid_verts) - ) - self.assertClose( - (pyramid_verts >= 0).float(), torch.ones_like(pyramid_verts) - ) + self.assertClose(pyramid_verts.lt(1).float(), torch.ones_like(pyramid_verts)) + self.assertClose((pyramid_verts >= 0).float(), torch.ones_like(pyramid_verts)) # Face 1: z = 0, x + y <= 1, normals = (0, 0, 1). face_1_idxs = pyramid_verts[:, 2] == 0 @@ -126,14 +110,10 @@ class TestSamplePoints(TestCaseMixin, unittest.TestCase): pyramid_verts[face_1_idxs, :], pyramid_normals[face_1_idxs, :], ) - self.assertTrue( - torch.all((face_1_verts[:, 0] + face_1_verts[:, 1]) <= 1) - ) + self.assertTrue(torch.all((face_1_verts[:, 0] + face_1_verts[:, 1]) <= 1)) self.assertClose( face_1_normals, - torch.tensor([0, 0, 1], dtype=torch.float32).expand( - face_1_normals.size() - ), + torch.tensor([0, 0, 1], dtype=torch.float32).expand(face_1_normals.size()), ) # Face 2: x = 0, z + y <= 1, normals = (1, 0, 0). @@ -142,14 +122,10 @@ class TestSamplePoints(TestCaseMixin, unittest.TestCase): pyramid_verts[face_2_idxs, :], pyramid_normals[face_2_idxs, :], ) - self.assertTrue( - torch.all((face_2_verts[:, 1] + face_2_verts[:, 2]) <= 1) - ) + self.assertTrue(torch.all((face_2_verts[:, 1] + face_2_verts[:, 2]) <= 1)) self.assertClose( face_2_normals, - torch.tensor([1, 0, 0], dtype=torch.float32).expand( - face_2_normals.size() - ), + torch.tensor([1, 0, 0], dtype=torch.float32).expand(face_2_normals.size()), ) # Face 3: y = 0, x + z <= 1, normals = (0, -1, 0). @@ -158,14 +134,10 @@ class TestSamplePoints(TestCaseMixin, unittest.TestCase): pyramid_verts[face_3_idxs, :], pyramid_normals[face_3_idxs, :], ) - self.assertTrue( - torch.all((face_3_verts[:, 0] + face_3_verts[:, 2]) <= 1) - ) + self.assertTrue(torch.all((face_3_verts[:, 0] + face_3_verts[:, 2]) <= 1)) self.assertClose( face_3_normals, - torch.tensor([0, -1, 0], dtype=torch.float32).expand( - face_3_normals.size() - ), + torch.tensor([0, -1, 0], dtype=torch.float32).expand(face_3_normals.size()), ) # Face 4: x + y + z = 1, normals = (1, 1, 1)/sqrt(3). @@ -279,22 +251,15 @@ class TestSamplePoints(TestCaseMixin, unittest.TestCase): num_faces = 50 for device in ["cpu", "cuda:0"]: for invalid in ["nan", "inf"]: - verts = torch.rand( - (num_verts, 3), dtype=torch.float32, device=device - ) + verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device) # randomly assign an invalid type verts[torch.randperm(num_verts)[:10]] = float(invalid) faces = torch.randint( - num_verts, - size=(num_faces, 3), - dtype=torch.int64, - device=device, + num_verts, size=(num_faces, 3), dtype=torch.int64, device=device ) meshes = Meshes(verts=[verts], faces=[faces]) - with self.assertRaisesRegex( - ValueError, "Meshes contain nan or inf." - ): + with self.assertRaisesRegex(ValueError, "Meshes contain nan or inf."): sample_points_from_meshes( meshes, num_samples=100, return_normals=True ) @@ -310,9 +275,7 @@ class TestSamplePoints(TestCaseMixin, unittest.TestCase): verts_list = [] faces_list = [] for _ in range(num_meshes): - verts = torch.rand( - (num_verts, 3), dtype=torch.float32, device=device - ) + verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device) faces = torch.randint( num_verts, size=(num_faces, 3), dtype=torch.int64, device=device ) diff --git a/tests/test_so3.py b/tests/test_so3.py index 8c1735ed..8e261529 100644 --- a/tests/test_so3.py +++ b/tests/test_so3.py @@ -1,10 +1,10 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import numpy as np import unittest -import torch +import numpy as np +import torch from pytorch3d.transforms.so3 import ( hat, so3_exponential_map, @@ -26,9 +26,7 @@ class TestSO3(unittest.TestCase): randomly generated logarithms of rotation matrices. """ device = torch.device("cuda:0") - log_rot = torch.randn( - (batch_size, 3), dtype=torch.float32, device=device - ) + log_rot = torch.randn((batch_size, 3), dtype=torch.float32, device=device) return log_rot @staticmethod @@ -85,16 +83,12 @@ class TestSO3(unittest.TestCase): log_rot = torch.randn(size=[5, 4], device=device) with self.assertRaises(ValueError) as err: so3_exponential_map(log_rot) - self.assertTrue( - "Input tensor shape has to be Nx3." in str(err.exception) - ) + self.assertTrue("Input tensor shape has to be Nx3." in str(err.exception)) rot = torch.randn(size=[5, 3, 5], device=device) with self.assertRaises(ValueError) as err: so3_log_map(rot) - self.assertTrue( - "Input has to be a batch of 3x3 Tensors." in str(err.exception) - ) + self.assertTrue("Input has to be a batch of 3x3 Tensors." in str(err.exception)) # trace of rot definitely bigger than 3 or smaller than -1 rot = torch.cat( diff --git a/tests/test_struct_utils.py b/tests/test_struct_utils.py index 6f2b5f7f..4d555c70 100644 --- a/tests/test_struct_utils.py +++ b/tests/test_struct_utils.py @@ -2,11 +2,10 @@ import unittest + import torch - -from pytorch3d.structures import utils as struct_utils - from common_testing import TestCaseMixin +from pytorch3d.structures import utils as struct_utils class TestStructUtils(TestCaseMixin, unittest.TestCase): @@ -27,22 +26,16 @@ class TestStructUtils(TestCaseMixin, unittest.TestCase): self.assertEqual(x_padded.shape[1], K) self.assertEqual(x_padded.shape[2], K) for i in range(N): - self.assertClose( - x_padded[i, : x[i].shape[0], : x[i].shape[1]], x[i] - ) + self.assertClose(x_padded[i, : x[i].shape[0], : x[i].shape[1]], x[i]) # check for no pad size (defaults to max dimension) - x_padded = struct_utils.list_to_padded( - x, pad_value=0.0, equisized=False - ) + x_padded = struct_utils.list_to_padded(x, pad_value=0.0, equisized=False) max_size0 = max(y.shape[0] for y in x) max_size1 = max(y.shape[1] for y in x) self.assertEqual(x_padded.shape[1], max_size0) self.assertEqual(x_padded.shape[2], max_size1) for i in range(N): - self.assertClose( - x_padded[i, : x[i].shape[0], : x[i].shape[1]], x[i] - ) + self.assertClose(x_padded[i, : x[i].shape[0], : x[i].shape[1]], x[i]) # check for equisized x = [torch.rand((K, 10), device=device) for _ in range(N)] @@ -88,9 +81,7 @@ class TestStructUtils(TestCaseMixin, unittest.TestCase): split_size = torch.randint(1, K, size=(2 * N,)).view(N, 2).unbind(0) x_list = struct_utils.padded_to_list(x, split_size) for i in range(N): - self.assertClose( - x_list[i], x[i, : split_size[i][0], : split_size[i][1]] - ) + self.assertClose(x_list[i], x[i, : split_size[i][0], : split_size[i][1]]) with self.assertRaisesRegex(ValueError, "Supports only"): x = torch.rand((N, K, K, K, K), device=device) @@ -124,32 +115,24 @@ class TestStructUtils(TestCaseMixin, unittest.TestCase): # Add some random values in the input which are the same as the pad_value. # These should not be filtered out. x_list.append( - torch.randint( - low=pad_value, high=10, size=(dim, K), device=device - ) + torch.randint(low=pad_value, high=10, size=(dim, K), device=device) ) split_size.append(dim) x_padded = struct_utils.list_to_padded(x_list, pad_value=pad_value) x_packed = struct_utils.padded_to_packed(x_padded, pad_value=pad_value) curr = 0 for i in range(N): - self.assertClose( - x_packed[curr : curr + split_size[i], ...], x_list[i] - ) + self.assertClose(x_packed[curr : curr + split_size[i], ...], x_list[i]) self.assertClose(torch.cat(x_list), x_packed) curr += split_size[i] # Case 3: split_size is provided. # Check each section of the packed tensor matches the corresponding # unpadded elements. - x_packed = struct_utils.padded_to_packed( - x_padded, split_size=split_size - ) + x_packed = struct_utils.padded_to_packed(x_padded, split_size=split_size) curr = 0 for i in range(N): - self.assertClose( - x_packed[curr : curr + split_size[i], ...], x_list[i] - ) + self.assertClose(x_packed[curr : curr + split_size[i], ...], x_list[i]) self.assertClose(torch.cat(x_list), x_packed) curr += split_size[i] @@ -157,17 +140,13 @@ class TestStructUtils(TestCaseMixin, unittest.TestCase): # Raise an error. split_size = torch.randint(1, K, size=(2 * N,)).view(N, 2).unbind(0) with self.assertRaisesRegex(ValueError, "1-dimensional"): - x_packed = struct_utils.padded_to_packed( - x_padded, split_size=split_size - ) + x_packed = struct_utils.padded_to_packed(x_padded, split_size=split_size) split_size = torch.randint(1, K, size=(2 * N,)).view(N * 2).tolist() with self.assertRaisesRegex( ValueError, "same length as inputs first dimension" ): - x_packed = struct_utils.padded_to_packed( - x_padded, split_size=split_size - ) + x_packed = struct_utils.padded_to_packed(x_padded, split_size=split_size) # Case 5: both pad_value and split_size are provided. # Raise an error. @@ -204,8 +183,6 @@ class TestStructUtils(TestCaseMixin, unittest.TestCase): for i in range(N): self.assertTrue(num_items[i] == x_dims[i]) self.assertTrue(item_packed_first_idx[i] == cur) - self.assertTrue( - item_packed_to_list_idx[cur : cur + x_dims[i]].eq(i).all() - ) + self.assertTrue(item_packed_to_list_idx[cur : cur + x_dims[i]].eq(i).all()) self.assertClose(x_packed[cur : cur + x_dims[i]], x[i]) cur += x_dims[i] diff --git a/tests/test_subdivide_meshes.py b/tests/test_subdivide_meshes.py index 19d8d37b..ba6da2a1 100644 --- a/tests/test_subdivide_meshes.py +++ b/tests/test_subdivide_meshes.py @@ -2,14 +2,13 @@ import unittest -import torch +import torch +from common_testing import TestCaseMixin from pytorch3d.ops.subdivide_meshes import SubdivideMeshes from pytorch3d.structures.meshes import Meshes from pytorch3d.utils.ico_sphere import ico_sphere -from common_testing import TestCaseMixin - class TestSubdivideMeshes(TestCaseMixin, unittest.TestCase): def test_simple_subdivide(self): @@ -72,25 +71,14 @@ class TestSubdivideMeshes(TestCaseMixin, unittest.TestCase): ) faces1 = torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device) verts2 = torch.tensor( - [ - [0.5, 1.0, 0.0], - [1.0, 0.0, 0.0], - [0.0, 0.0, 0.0], - [1.5, 1.0, 0.0], - ], + [[0.5, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.5, 1.0, 0.0]], dtype=torch.float32, device=device, requires_grad=True, ) - faces2 = torch.tensor( - [[0, 1, 2], [0, 3, 1]], dtype=torch.int64, device=device - ) - faces3 = torch.tensor( - [[0, 1, 2], [0, 2, 3]], dtype=torch.int64, device=device - ) - mesh = Meshes( - verts=[verts1, verts2, verts2], faces=[faces1, faces2, faces3] - ) + faces2 = torch.tensor([[0, 1, 2], [0, 3, 1]], dtype=torch.int64, device=device) + faces3 = torch.tensor([[0, 1, 2], [0, 2, 3]], dtype=torch.int64, device=device) + mesh = Meshes(verts=[verts1, verts2, verts2], faces=[faces1, faces2, faces3]) subdivide = SubdivideMeshes() new_mesh = subdivide(mesh.clone()) @@ -218,9 +206,7 @@ class TestSubdivideMeshes(TestCaseMixin, unittest.TestCase): self.assertTrue(new_feats.requires_grad == gt_feats.requires_grad) @staticmethod - def subdivide_meshes_with_init( - num_meshes: int = 10, same_topo: bool = False - ): + def subdivide_meshes_with_init(num_meshes: int = 10, same_topo: bool = False): device = torch.device("cuda:0") meshes = ico_sphere(0, device=device) if num_meshes > 1: diff --git a/tests/test_texturing.py b/tests/test_texturing.py index 5e235c01..f5b0ddc3 100644 --- a/tests/test_texturing.py +++ b/tests/test_texturing.py @@ -2,9 +2,10 @@ import unittest + import torch import torch.nn.functional as F - +from common_testing import TestCaseMixin from pytorch3d.renderer.mesh.rasterizer import Fragments from pytorch3d.renderer.mesh.texturing import ( interpolate_face_attributes, @@ -13,8 +14,6 @@ from pytorch3d.renderer.mesh.texturing import ( ) from pytorch3d.structures import Meshes, Textures from pytorch3d.structures.utils import list_to_padded - -from common_testing import TestCaseMixin from test_meshes import TestMeshes @@ -68,12 +67,7 @@ class TestTexturing(TestCaseMixin, unittest.TestCase): dists=torch.ones_like(pix_to_face), ) grad_vert_tex = torch.tensor( - [ - [0.3, 0.3, 0.3], - [0.9, 0.9, 0.9], - [0.5, 0.5, 0.5], - [0.3, 0.3, 0.3], - ], + [[0.3, 0.3, 0.3], [0.9, 0.9, 0.9], [0.5, 0.5, 0.5], [0.3, 0.3, 0.3]], dtype=torch.float32, ) texels = interpolate_vertex_colors(fragments, mesh) @@ -115,9 +109,7 @@ class TestTexturing(TestCaseMixin, unittest.TestCase): [[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32 ).view(1, 1, 1, 2, -1) dummy_verts = torch.zeros(4, 3) - vert_uvs = torch.tensor( - [[1, 0], [0, 1], [1, 1], [0, 0]], dtype=torch.float32 - ) + vert_uvs = torch.tensor([[1, 0], [0, 1], [1, 1], [0, 0]], dtype=torch.float32) face_uvs = torch.tensor([[0, 1, 2], [1, 2, 3]], dtype=torch.int64) interpolated_uvs = torch.tensor( [[0.5 + 0.2, 0.3 + 0.2], [0.6, 0.3 + 0.6]], dtype=torch.float32 @@ -137,9 +129,7 @@ class TestTexturing(TestCaseMixin, unittest.TestCase): dists=pix_to_face, ) tex = Textures( - maps=tex_map, - faces_uvs=face_uvs[None, ...], - verts_uvs=vert_uvs[None, ...], + maps=tex_map, faces_uvs=face_uvs[None, ...], verts_uvs=vert_uvs[None, ...] ) meshes = Meshes(verts=[dummy_verts], faces=[face_uvs], textures=tex) texels = interpolate_texture_map(fragments, meshes) @@ -151,9 +141,7 @@ class TestTexturing(TestCaseMixin, unittest.TestCase): tex_map = tex_map.permute(0, 3, 1, 2) tex_map = torch.cat([tex_map, tex_map], dim=0) expected_out = F.grid_sample(tex_map, pixel_uvs, align_corners=False) - self.assertTrue( - torch.allclose(texels.squeeze(), expected_out.squeeze()) - ) + self.assertTrue(torch.allclose(texels.squeeze(), expected_out.squeeze())) def test_init_rgb_uv_fail(self): V = 20 @@ -183,9 +171,7 @@ class TestTexturing(TestCaseMixin, unittest.TestCase): Textures(verts_rgb=torch.ones((5, 16, 16, 3))) # maps provided without verts/faces uvs - with self.assertRaisesRegex( - ValueError, "faces_uvs and verts_uvs are required" - ): + with self.assertRaisesRegex(ValueError, "faces_uvs and verts_uvs are required"): Textures(maps=torch.ones((5, 16, 16, 3))) def test_padded_to_packed(self): @@ -209,9 +195,7 @@ class TestTexturing(TestCaseMixin, unittest.TestCase): # This is set inside Meshes when textures is passed as an input. # Here we set _num_faces_per_mesh and _num_verts_per_mesh explicity. tex1 = tex.clone() - tex1._num_faces_per_mesh = ( - faces_uvs_padded.gt(-1).all(-1).sum(-1).tolist() - ) + tex1._num_faces_per_mesh = faces_uvs_padded.gt(-1).all(-1).sum(-1).tolist() tex1._num_verts_per_mesh = torch.tensor([5, 4]) faces_packed = tex1.faces_uvs_packed() verts_packed = tex1.verts_uvs_packed() @@ -245,16 +229,12 @@ class TestTexturing(TestCaseMixin, unittest.TestCase): for i in range(N): self.assertTrue( - (faces_list[i] == faces_uvs_padded[i, ...].squeeze()) - .all() - .item() + (faces_list[i] == faces_uvs_padded[i, ...].squeeze()).all().item() ) for i in range(N): self.assertTrue( - (verts_list[i] == verts_uvs_padded[i, ...].squeeze()) - .all() - .item() + (verts_list[i] == verts_uvs_padded[i, ...].squeeze()).all().item() ) def test_clone(self): @@ -344,9 +324,7 @@ class TestTexturing(TestCaseMixin, unittest.TestCase): verts_uvs=torch.randn((B, V, 2)), ) tex_mesh = Meshes( - verts=mesh.verts_padded(), - faces=mesh.faces_padded(), - textures=tex_uv, + verts=mesh.verts_padded(), faces=mesh.faces_padded(), textures=tex_uv ) N = 20 new_mesh = tex_mesh.extend(N) @@ -359,12 +337,10 @@ class TestTexturing(TestCaseMixin, unittest.TestCase): for i in range(len(tex_mesh)): for n in range(N): self.assertClose( - tex_init.faces_uvs_list()[i], - new_tex.faces_uvs_list()[i * N + n], + tex_init.faces_uvs_list()[i], new_tex.faces_uvs_list()[i * N + n] ) self.assertClose( - tex_init.verts_uvs_list()[i], - new_tex.verts_uvs_list()[i * N + n], + tex_init.verts_uvs_list()[i], new_tex.verts_uvs_list()[i * N + n] ) self.assertAllSeparate( [ @@ -384,9 +360,7 @@ class TestTexturing(TestCaseMixin, unittest.TestCase): # 2. Texture vertex RGB tex_rgb = Textures(verts_rgb=torch.randn((B, V, 3))) tex_mesh_rgb = Meshes( - verts=mesh.verts_padded(), - faces=mesh.faces_padded(), - textures=tex_rgb, + verts=mesh.verts_padded(), faces=mesh.faces_padded(), textures=tex_rgb ) N = 20 new_mesh_rgb = tex_mesh_rgb.extend(N) @@ -399,8 +373,7 @@ class TestTexturing(TestCaseMixin, unittest.TestCase): for i in range(len(tex_mesh_rgb)): for n in range(N): self.assertClose( - tex_init.verts_rgb_list()[i], - new_tex.verts_rgb_list()[i * N + n], + tex_init.verts_rgb_list()[i], new_tex.verts_rgb_list()[i * N + n] ) self.assertAllSeparate( [tex_init.verts_rgb_padded(), new_tex.verts_rgb_padded()] diff --git a/tests/test_transforms.py b/tests/test_transforms.py index e8502121..d466937b 100644 --- a/tests/test_transforms.py +++ b/tests/test_transforms.py @@ -3,8 +3,8 @@ import math import unittest -import torch +import torch from pytorch3d.transforms.so3 import so3_exponential_map from pytorch3d.transforms.transform3d import ( Rotate, @@ -18,9 +18,7 @@ from pytorch3d.transforms.transform3d import ( class TestTransform(unittest.TestCase): def test_to(self): tr = Translate(torch.FloatTensor([[1.0, 2.0, 3.0]])) - R = torch.FloatTensor( - [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]] - ) + R = torch.FloatTensor([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]) R = Rotate(R) t = Transform3d().compose(R, tr) for _ in range(3): @@ -36,9 +34,7 @@ class TestTransform(unittest.TestCase): the same as composition of clones of translation and rotation. """ tr = Translate(torch.FloatTensor([[1.0, 2.0, 3.0]])) - R = torch.FloatTensor( - [[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]] - ) + R = torch.FloatTensor([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]) R = Rotate(R) # check that the _matrix property of clones of @@ -63,9 +59,9 @@ class TestTransform(unittest.TestCase): def test_translate(self): t = Transform3d().translate(1, 2, 3) - points = torch.tensor( - [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0]] - ).view(1, 3, 3) + points = torch.tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0]]).view( + 1, 3, 3 + ) normals = torch.tensor( [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]] ).view(1, 3, 3) @@ -82,9 +78,9 @@ class TestTransform(unittest.TestCase): def test_scale(self): t = Transform3d().scale(2.0).scale(0.5, 0.25, 1.0) - points = torch.tensor( - [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0]] - ).view(1, 3, 3) + points = torch.tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0]]).view( + 1, 3, 3 + ) normals = torch.tensor( [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]] ).view(1, 3, 3) @@ -101,9 +97,9 @@ class TestTransform(unittest.TestCase): def test_scale_translate(self): t = Transform3d().scale(2, 1, 3).translate(1, 2, 3) - points = torch.tensor( - [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0]] - ).view(1, 3, 3) + points = torch.tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 0.0]]).view( + 1, 3, 3 + ) normals = torch.tensor( [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]] ).view(1, 3, 3) @@ -120,9 +116,9 @@ class TestTransform(unittest.TestCase): def test_rotate_axis_angle(self): t = Transform3d().rotate_axis_angle(90.0, axis="Z") - points = torch.tensor( - [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0]] - ).view(1, 3, 3) + points = torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0]]).view( + 1, 3, 3 + ) normals = torch.tensor( [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]] ).view(1, 3, 3) @@ -194,9 +190,7 @@ class TestTransform(unittest.TestCase): t_ = Rotate( so3_exponential_map( torch.randn( - (batch_size, 3), - dtype=torch.float32, - device=device, + (batch_size, 3), dtype=torch.float32, device=device ) ), device=device, @@ -717,9 +711,7 @@ class TestRotate(unittest.TestCase): def test_inverse(self, batch_size=5): device = torch.device("cuda:0") - log_rot = torch.randn( - (batch_size, 3), dtype=torch.float32, device=device - ) + log_rot = torch.randn((batch_size, 3), dtype=torch.float32, device=device) R = so3_exponential_map(log_rot) t = Rotate(R) im = t.inverse()._matrix @@ -749,9 +741,7 @@ class TestRotateAxisAngle(unittest.TestCase): transformed_points = t.transform_points(points) expected_points = torch.tensor([0.0, 0.0, 1.0]) self.assertTrue( - torch.allclose( - transformed_points.squeeze(), expected_points, atol=1e-7 - ) + torch.allclose(transformed_points.squeeze(), expected_points, atol=1e-7) ) self.assertTrue(torch.allclose(t._matrix, matrix)) @@ -775,9 +765,7 @@ class TestRotateAxisAngle(unittest.TestCase): transformed_points = t.transform_points(points) expected_points = torch.tensor([0.0, 0.0, 1.0]) self.assertTrue( - torch.allclose( - transformed_points.squeeze(), expected_points, atol=1e-7 - ) + torch.allclose(transformed_points.squeeze(), expected_points, atol=1e-7) ) self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7)) @@ -835,9 +823,7 @@ class TestRotateAxisAngle(unittest.TestCase): transformed_points = t.transform_points(points) expected_points = torch.tensor([0.0, 0.0, -1.0]) self.assertTrue( - torch.allclose( - transformed_points.squeeze(), expected_points, atol=1e-7 - ) + torch.allclose(transformed_points.squeeze(), expected_points, atol=1e-7) ) self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7)) @@ -866,9 +852,7 @@ class TestRotateAxisAngle(unittest.TestCase): transformed_points = t.transform_points(points) expected_points = torch.tensor([0.0, 0.0, -1.0]) self.assertTrue( - torch.allclose( - transformed_points.squeeze(), expected_points, atol=1e-7 - ) + torch.allclose(transformed_points.squeeze(), expected_points, atol=1e-7) ) self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7)) @@ -923,9 +907,7 @@ class TestRotateAxisAngle(unittest.TestCase): transformed_points = t.transform_points(points) expected_points = torch.tensor([0.0, 1.0, 0.0]) self.assertTrue( - torch.allclose( - transformed_points.squeeze(), expected_points, atol=1e-7 - ) + torch.allclose(transformed_points.squeeze(), expected_points, atol=1e-7) ) self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7)) @@ -949,9 +931,7 @@ class TestRotateAxisAngle(unittest.TestCase): transformed_points = t.transform_points(points) expected_points = torch.tensor([0.0, 1.0, 0.0]) self.assertTrue( - torch.allclose( - transformed_points.squeeze(), expected_points, atol=1e-7 - ) + torch.allclose(transformed_points.squeeze(), expected_points, atol=1e-7) ) self.assertTrue(torch.allclose(t._matrix, matrix, atol=1e-7)) diff --git a/tests/test_vert_align.py b/tests/test_vert_align.py index 40ef91d1..13935590 100644 --- a/tests/test_vert_align.py +++ b/tests/test_vert_align.py @@ -2,22 +2,18 @@ import unittest + import torch import torch.nn.functional as F - +from common_testing import TestCaseMixin from pytorch3d.ops.vert_align import vert_align from pytorch3d.structures.meshes import Meshes -from common_testing import TestCaseMixin - class TestVertAlign(TestCaseMixin, unittest.TestCase): @staticmethod def vert_align_naive( - feats, - verts_or_meshes, - return_packed: bool = False, - align_corners: bool = True, + feats, verts_or_meshes, return_packed: bool = False, align_corners: bool = True ): """ Naive implementation of vert_align. @@ -60,16 +56,13 @@ class TestVertAlign(TestCaseMixin, unittest.TestCase): return out_feats @staticmethod - def init_meshes( - num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000 - ): + def init_meshes(num_meshes: int = 10, num_verts: int = 1000, num_faces: int = 3000): device = torch.device("cuda:0") verts_list = [] faces_list = [] for _ in range(num_meshes): verts = ( - torch.rand((num_verts, 3), dtype=torch.float32, device=device) - * 2.0 + torch.rand((num_verts, 3), dtype=torch.float32, device=device) * 2.0 - 1.0 ) # verts in the space of [-1, 1] faces = torch.randint( @@ -82,15 +75,11 @@ class TestVertAlign(TestCaseMixin, unittest.TestCase): return meshes @staticmethod - def init_feats( - batch_size: int = 10, num_channels: int = 256, device: str = "cuda" - ): + def init_feats(batch_size: int = 10, num_channels: int = 256, device: str = "cuda"): H, W = [14, 28], [14, 28] feats = [] for (h, w) in zip(H, W): - feats.append( - torch.rand((batch_size, num_channels, h, w), device=device) - ) + feats.append(torch.rand((batch_size, num_channels, h, w), device=device)) return feats def test_vert_align_with_meshes(self): @@ -102,16 +91,12 @@ class TestVertAlign(TestCaseMixin, unittest.TestCase): # feats in list out = vert_align(feats, meshes, return_packed=True) - naive_out = TestVertAlign.vert_align_naive( - feats, meshes, return_packed=True - ) + naive_out = TestVertAlign.vert_align_naive(feats, meshes, return_packed=True) self.assertClose(out, naive_out) # feats as tensor out = vert_align(feats[0], meshes, return_packed=True) - naive_out = TestVertAlign.vert_align_naive( - feats[0], meshes, return_packed=True - ) + naive_out = TestVertAlign.vert_align_naive(feats[0], meshes, return_packed=True) self.assertClose(out, naive_out) def test_vert_align_with_verts(self): @@ -120,30 +105,21 @@ class TestVertAlign(TestCaseMixin, unittest.TestCase): """ feats = TestVertAlign.init_feats(10, 256) verts = ( - torch.rand( - (10, 100, 3), dtype=torch.float32, device=feats[0].device - ) - * 2.0 + torch.rand((10, 100, 3), dtype=torch.float32, device=feats[0].device) * 2.0 - 1.0 ) # feats in list out = vert_align(feats, verts, return_packed=True) - naive_out = TestVertAlign.vert_align_naive( - feats, verts, return_packed=True - ) + naive_out = TestVertAlign.vert_align_naive(feats, verts, return_packed=True) self.assertClose(out, naive_out) # feats as tensor out = vert_align(feats[0], verts, return_packed=True) - naive_out = TestVertAlign.vert_align_naive( - feats[0], verts, return_packed=True - ) + naive_out = TestVertAlign.vert_align_naive(feats[0], verts, return_packed=True) self.assertClose(out, naive_out) - out2 = vert_align( - feats[0], verts, return_packed=True, align_corners=False - ) + out2 = vert_align(feats[0], verts, return_packed=True, align_corners=False) naive_out2 = TestVertAlign.vert_align_naive( feats[0], verts, return_packed=True, align_corners=False ) @@ -158,9 +134,7 @@ class TestVertAlign(TestCaseMixin, unittest.TestCase): verts_list = [] faces_list = [] for _ in range(num_meshes): - verts = torch.rand( - (num_verts, 3), dtype=torch.float32, device=device - ) + verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device) faces = torch.randint( num_verts, size=(num_faces, 3), dtype=torch.int64, device=device )