From df08ea8eb47e29bd3fafa106d390c52cad5a0741 Mon Sep 17 00:00:00 2001 From: Jeremy Reizenstein Date: Wed, 13 Apr 2022 04:40:56 -0700 Subject: [PATCH] Fix inferred typing Summary: D35513897 (https://github.com/facebookresearch/pytorch3d/commit/4b94649f7b7de4ce3a0597c5a17a7596928ad20b) was a pyre infer job which got some things wrong. Correct by adding the correct types, so these things shouldn't need worrying about again. Reviewed By: patricklabatut Differential Revision: D35546144 fbshipit-source-id: 89f6ea2b67be27aa0b0b14afff4347cccf23feb7 --- pytorch3d/ops/points_normals.py | 3 +- pytorch3d/ops/subdivide_meshes.py | 12 +++---- pytorch3d/renderer/cameras.py | 56 ++++++++++++++++++------------- pytorch3d/vis/texture_vis.py | 4 +-- 4 files changed, 41 insertions(+), 34 deletions(-) diff --git a/pytorch3d/ops/points_normals.py b/pytorch3d/ops/points_normals.py index f9113299..edd0293e 100644 --- a/pytorch3d/ops/points_normals.py +++ b/pytorch3d/ops/points_normals.py @@ -167,7 +167,7 @@ def estimate_pointcloud_local_coord_frames( return curvatures, local_coord_frames -def _disambiguate_vector_directions(pcl, knns, vecs: float) -> float: +def _disambiguate_vector_directions(pcl, knns, vecs: torch.Tensor) -> torch.Tensor: """ Disambiguates normal directions according to [1]. @@ -181,7 +181,6 @@ def _disambiguate_vector_directions(pcl, knns, vecs: float) -> float: # each element of the neighborhood df = knns - pcl[:, :, None] # projection of the difference on the principal direction - # pyre-fixme[16]: `float` has no attribute `__getitem__`. proj = (vecs[:, :, None] * df).sum(3) # check how many projections are positive n_pos = (proj > 0).type_as(knns).sum(2, keepdim=True) diff --git a/pytorch3d/ops/subdivide_meshes.py b/pytorch3d/ops/subdivide_meshes.py index 2dd0cab4..2426fee6 100644 --- a/pytorch3d/ops/subdivide_meshes.py +++ b/pytorch3d/ops/subdivide_meshes.py @@ -261,7 +261,7 @@ class SubdivideMeshes(nn.Module): # Calculate the indices needed to group the new and existing verts # for each mesh. - verts_sort_idx = create_verts_index( + verts_sort_idx = _create_verts_index( num_verts_per_mesh, num_edges_per_mesh, meshes.device ) # (sum(V_n)+sum(E_n),) @@ -282,7 +282,9 @@ class SubdivideMeshes(nn.Module): # Calculate the indices needed to group the existing and new faces # for each mesh. - face_sort_idx = create_faces_index(num_faces_per_mesh, device=meshes.device) + face_sort_idx = _create_faces_index( + num_faces_per_mesh, device=meshes.device + ) # Reorder the faces to sequentially group existing and new faces # for each mesh. @@ -329,7 +331,7 @@ class SubdivideMeshes(nn.Module): return new_meshes, new_feats -def create_verts_index(verts_per_mesh, edges_per_mesh, device=None): +def _create_verts_index(verts_per_mesh, edges_per_mesh, device=None): """ Helper function to group the vertex indices for each mesh. New vertices are stacked at the end of the original verts tensor, so in order to have @@ -400,7 +402,7 @@ def create_verts_index(verts_per_mesh, edges_per_mesh, device=None): return verts_idx -def create_faces_index(faces_per_mesh: int, device=None): +def _create_faces_index(faces_per_mesh: torch.Tensor, device=None): """ Helper function to group the faces indices for each mesh. New faces are stacked at the end of the original faces tensor, so in order to have @@ -417,9 +419,7 @@ def create_faces_index(faces_per_mesh: int, device=None): """ # e.g. faces_per_mesh = [2, 5, 3] - # pyre-fixme[16]: `int` has no attribute `sum`. F = faces_per_mesh.sum() # e.g. 10 - # pyre-fixme[16]: `int` has no attribute `cumsum`. faces_per_mesh_cumsum = faces_per_mesh.cumsum(dim=0) # (N,) e.g. (2, 7, 10) switch1_idx = faces_per_mesh_cumsum.clone() diff --git a/pytorch3d/renderer/cameras.py b/pytorch3d/renderer/cameras.py index 3b660c3c..dbff8a6e 100644 --- a/pytorch3d/renderer/cameras.py +++ b/pytorch3d/renderer/cameras.py @@ -21,6 +21,14 @@ from .utils import TensorProperties, convert_to_tensors_and_broadcast _R = torch.eye(3)[None] # (1, 3, 3) _T = torch.zeros(1, 3) # (1, 3) +# An input which is a float per batch element +_BatchFloatType = Union[float, Sequence[float], torch.Tensor] + +# one or two floats per batch element +_FocalLengthType = Union[ + float, Sequence[Tuple[float]], Sequence[Tuple[float, float]], torch.Tensor +] + class CamerasBase(TensorProperties): """ @@ -427,10 +435,10 @@ class CamerasBase(TensorProperties): def OpenGLPerspectiveCameras( - znear: float = 1.0, - zfar: float = 100.0, - aspect_ratio: float = 1.0, - fov: float = 60.0, + znear: _BatchFloatType = 1.0, + zfar: _BatchFloatType = 100.0, + aspect_ratio: _BatchFloatType = 1.0, + fov: _BatchFloatType = 60.0, degrees: bool = True, R: torch.Tensor = _R, T: torch.Tensor = _T, @@ -508,10 +516,10 @@ class FoVPerspectiveCameras(CamerasBase): def __init__( self, - znear=1.0, - zfar=100.0, - aspect_ratio=1.0, - fov=60.0, + znear: _BatchFloatType = 1.0, + zfar: _BatchFloatType = 100.0, + aspect_ratio: _BatchFloatType = 1.0, + fov: _BatchFloatType = 60.0, degrees: bool = True, R: torch.Tensor = _R, T: torch.Tensor = _T, @@ -709,12 +717,12 @@ class FoVPerspectiveCameras(CamerasBase): def OpenGLOrthographicCameras( - znear: float = 1.0, - zfar: float = 100.0, - top: float = 1.0, - bottom: float = -1.0, - left: float = -1.0, - right: float = 1.0, + znear: _BatchFloatType = 1.0, + zfar: _BatchFloatType = 100.0, + top: _BatchFloatType = 1.0, + bottom: _BatchFloatType = -1.0, + left: _BatchFloatType = -1.0, + right: _BatchFloatType = 1.0, scale_xyz=((1.0, 1.0, 1.0),), # (1, 3) R: torch.Tensor = _R, T: torch.Tensor = _T, @@ -769,12 +777,12 @@ class FoVOrthographicCameras(CamerasBase): def __init__( self, - znear=1.0, - zfar=100.0, - max_y=1.0, - min_y=-1.0, - max_x=1.0, - min_x=-1.0, + znear: _BatchFloatType = 1.0, + zfar: _BatchFloatType = 100.0, + max_y: _BatchFloatType = 1.0, + min_y: _BatchFloatType = -1.0, + max_x: _BatchFloatType = 1.0, + min_x: _BatchFloatType = -1.0, scale_xyz=((1.0, 1.0, 1.0),), # (1, 3) R: torch.Tensor = _R, T: torch.Tensor = _T, @@ -956,7 +964,7 @@ Note that the MultiView Cameras accept parameters in NDC space. def SfMPerspectiveCameras( - focal_length: float = 1.0, + focal_length: _FocalLengthType = 1.0, principal_point=((0.0, 0.0),), R: torch.Tensor = _R, T: torch.Tensor = _T, @@ -1008,7 +1016,7 @@ class PerspectiveCameras(CamerasBase): def __init__( self, - focal_length=1.0, + focal_length: _FocalLengthType = 1.0, principal_point=((0.0, 0.0),), R: torch.Tensor = _R, T: torch.Tensor = _T, @@ -1194,7 +1202,7 @@ class PerspectiveCameras(CamerasBase): def SfMOrthographicCameras( - focal_length: float = 1.0, + focal_length: _FocalLengthType = 1.0, principal_point=((0.0, 0.0),), R: torch.Tensor = _R, T: torch.Tensor = _T, @@ -1246,7 +1254,7 @@ class OrthographicCameras(CamerasBase): def __init__( self, - focal_length=1.0, + focal_length: _FocalLengthType = 1.0, principal_point=((0.0, 0.0),), R: torch.Tensor = _R, T: torch.Tensor = _T, diff --git a/pytorch3d/vis/texture_vis.py b/pytorch3d/vis/texture_vis.py index a3b0fcd5..0d36abb6 100644 --- a/pytorch3d/vis/texture_vis.py +++ b/pytorch3d/vis/texture_vis.py @@ -4,7 +4,7 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -from typing import Optional +from typing import Any, Optional import numpy as np from PIL import Image, ImageDraw @@ -65,7 +65,7 @@ def texturesuv_image_PIL( *, texture_index: int = 0, radius: float = 1, - color: str = "red", + color: Any = "red", subsample: Optional[int] = 10000, ): # pragma: no cover """