From e9fb6c27e3c71465dd982e03509aa3ed304955a7 Mon Sep 17 00:00:00 2001 From: Pyre Bot Jr <> Date: Thu, 3 Feb 2022 01:45:00 -0800 Subject: [PATCH] Add annotations to `vision/fair/pytorch3d` Reviewed By: shannonzhu Differential Revision: D33970393 fbshipit-source-id: 9b4dfaccfc3793fd37705a923d689cb14c9d26ba --- pytorch3d/datasets/r2n2/utils.py | 7 +++-- pytorch3d/io/obj_io.py | 10 +++++--- pytorch3d/io/ply_io.py | 4 ++- pytorch3d/io/utils.py | 2 +- pytorch3d/loss/chamfer.py | 2 +- pytorch3d/ops/knn.py | 2 +- pytorch3d/ops/points_normals.py | 3 ++- pytorch3d/ops/points_to_volumes.py | 2 +- pytorch3d/ops/subdivide_meshes.py | 4 ++- pytorch3d/ops/utils.py | 2 +- pytorch3d/renderer/cameras.py | 30 +++++++++++----------- pytorch3d/renderer/implicit/raymarching.py | 6 ++--- pytorch3d/renderer/implicit/utils.py | 2 +- pytorch3d/renderer/lighting.py | 2 +- pytorch3d/renderer/mesh/shader.py | 2 +- pytorch3d/structures/meshes.py | 2 +- pytorch3d/structures/pointclouds.py | 2 +- pytorch3d/transforms/math.py | 2 +- pytorch3d/transforms/transform3d.py | 2 +- pytorch3d/vis/plotly_vis.py | 22 ++++++++++------ pytorch3d/vis/texture_vis.py | 4 +-- 21 files changed, 65 insertions(+), 49 deletions(-) diff --git a/pytorch3d/datasets/r2n2/utils.py b/pytorch3d/datasets/r2n2/utils.py index 7b53b0dc..3fe0ebf1 100644 --- a/pytorch3d/datasets/r2n2/utils.py +++ b/pytorch3d/datasets/r2n2/utils.py @@ -98,7 +98,9 @@ def collate_batched_R2N2(batch: List[Dict]): # pragma: no cover return collated_dict -def compute_extrinsic_matrix(azimuth, elevation, distance): # pragma: no cover +def compute_extrinsic_matrix( + azimuth: float, elevation: float, distance: float +): # pragma: no cover """ Copied from meshrcnn codebase: https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/coords.py#L96 @@ -138,6 +140,7 @@ def compute_extrinsic_matrix(azimuth, elevation, distance): # pragma: no cover # rotates the model 90 degrees about the x axis. To compensate for this quirk we # roll that rotation into the extrinsic matrix here rot = torch.tensor([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) + # pyre-fixme[16]: `Tensor` has no attribute `mm`. RT = RT.mm(rot.to(RT)) return RT @@ -384,7 +387,7 @@ def voxelize(voxel_coords, P, V): # pragma: no cover return voxels -def project_verts(verts, P, eps=1e-1): # pragma: no cover +def project_verts(verts, P, eps: float = 1e-1): # pragma: no cover """ Copied from meshrcnn codebase: https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/coords.py#L159 diff --git a/pytorch3d/io/obj_io.py b/pytorch3d/io/obj_io.py index c563970c..b103490e 100644 --- a/pytorch3d/io/obj_io.py +++ b/pytorch3d/io/obj_io.py @@ -32,7 +32,7 @@ _Aux = namedtuple( ) -def _format_faces_indices(faces_indices, max_index, device, pad_value=None): +def _format_faces_indices(faces_indices, max_index: int, device, pad_value=None): """ Format indices and check for invalid values. Indices can refer to values in one of the face properties: vertices, textures or normals. @@ -57,6 +57,7 @@ def _format_faces_indices(faces_indices, max_index, device, pad_value=None): ) if pad_value is not None: + # pyre-fixme[28]: Unexpected keyword argument `dim`. mask = faces_indices.eq(pad_value).all(dim=-1) # Change to 0 based indexing. @@ -66,6 +67,7 @@ def _format_faces_indices(faces_indices, max_index, device, pad_value=None): faces_indices[(faces_indices < 0)] += max_index if pad_value is not None: + # pyre-fixme[61]: `mask` is undefined, or not always defined. faces_indices[mask] = pad_value return _check_faces_indices(faces_indices, max_index, pad_value) @@ -73,7 +75,7 @@ def _format_faces_indices(faces_indices, max_index, device, pad_value=None): def load_obj( f, - load_textures=True, + load_textures: bool = True, create_texture_atlas: bool = False, texture_atlas_size: int = 4, texture_wrap: Optional[str] = "repeat", @@ -351,7 +353,7 @@ def _parse_face( faces_normals_idx, faces_textures_idx, faces_materials_idx, -): +) -> None: face = tokens[1:] face_list = [f.split("/") for f in face] face_verts = [] @@ -546,7 +548,7 @@ def _load_materials( def _load_obj( f_obj, *, - data_dir, + data_dir: str, load_textures: bool = True, create_texture_atlas: bool = False, texture_atlas_size: int = 4, diff --git a/pytorch3d/io/ply_io.py b/pytorch3d/io/ply_io.py index a02a6448..a98ab168 100644 --- a/pytorch3d/io/ply_io.py +++ b/pytorch3d/io/ply_io.py @@ -463,7 +463,9 @@ def _read_ply_element_ascii(f, definition: _PlyElementType): return data -def _read_raw_array(f, aim: str, length: int, dtype: type = np.uint8, dtype_size=1): +def _read_raw_array( + f, aim: str, length: int, dtype: type = np.uint8, dtype_size: int = 1 +): """ Read [length] elements from a file. diff --git a/pytorch3d/io/utils.py b/pytorch3d/io/utils.py index eef9cf90..0e820fed 100644 --- a/pytorch3d/io/utils.py +++ b/pytorch3d/io/utils.py @@ -28,7 +28,7 @@ def nullcontext(x): PathOrStr = Union[pathlib.Path, str] -def _open_file(f, path_manager: PathManager, mode="r") -> ContextManager[IO]: +def _open_file(f, path_manager: PathManager, mode: str = "r") -> ContextManager[IO]: if isinstance(f, str): f = path_manager.open(f, mode) return contextlib.closing(f) diff --git a/pytorch3d/loss/chamfer.py b/pytorch3d/loss/chamfer.py index 3a3eab3c..744a1e2a 100644 --- a/pytorch3d/loss/chamfer.py +++ b/pytorch3d/loss/chamfer.py @@ -14,7 +14,7 @@ from pytorch3d.structures.pointclouds import Pointclouds def _validate_chamfer_reduction_inputs( batch_reduction: Union[str, None], point_reduction: str -): +) -> None: """Check the requested reductions are valid. Args: diff --git a/pytorch3d/ops/knn.py b/pytorch3d/ops/knn.py index d033ae10..bd6f673f 100644 --- a/pytorch3d/ops/knn.py +++ b/pytorch3d/ops/knn.py @@ -106,7 +106,7 @@ def knn_points( version: int = -1, return_nn: bool = False, return_sorted: bool = True, -): +) -> _KNN: """ K-Nearest neighbors on point clouds. diff --git a/pytorch3d/ops/points_normals.py b/pytorch3d/ops/points_normals.py index 702c0bb7..22dbb9b3 100644 --- a/pytorch3d/ops/points_normals.py +++ b/pytorch3d/ops/points_normals.py @@ -166,7 +166,7 @@ def estimate_pointcloud_local_coord_frames( return curvatures, local_coord_frames -def _disambiguate_vector_directions(pcl, knns, vecs): +def _disambiguate_vector_directions(pcl, knns, vecs: float) -> float: """ Disambiguates normal directions according to [1]. @@ -180,6 +180,7 @@ def _disambiguate_vector_directions(pcl, knns, vecs): # each element of the neighborhood df = knns - pcl[:, :, None] # projection of the difference on the principal direction + # pyre-fixme[16]: `float` has no attribute `__getitem__`. proj = (vecs[:, :, None] * df).sum(3) # check how many projections are positive n_pos = (proj > 0).type_as(knns).sum(2, keepdim=True) diff --git a/pytorch3d/ops/points_to_volumes.py b/pytorch3d/ops/points_to_volumes.py index 571bbcc5..cbfdd388 100644 --- a/pytorch3d/ops/points_to_volumes.py +++ b/pytorch3d/ops/points_to_volumes.py @@ -479,7 +479,7 @@ def _check_points_to_volumes_inputs( volume_features: torch.Tensor, grid_sizes: torch.LongTensor, mask: Optional[torch.Tensor] = None, -): +) -> None: max_grid_size = grid_sizes.max(dim=0).values if torch.prod(max_grid_size) > volume_densities.shape[1]: diff --git a/pytorch3d/ops/subdivide_meshes.py b/pytorch3d/ops/subdivide_meshes.py index dec3be74..2dd0cab4 100644 --- a/pytorch3d/ops/subdivide_meshes.py +++ b/pytorch3d/ops/subdivide_meshes.py @@ -400,7 +400,7 @@ def create_verts_index(verts_per_mesh, edges_per_mesh, device=None): return verts_idx -def create_faces_index(faces_per_mesh, device=None): +def create_faces_index(faces_per_mesh: int, device=None): """ Helper function to group the faces indices for each mesh. New faces are stacked at the end of the original faces tensor, so in order to have @@ -417,7 +417,9 @@ def create_faces_index(faces_per_mesh, device=None): """ # e.g. faces_per_mesh = [2, 5, 3] + # pyre-fixme[16]: `int` has no attribute `sum`. F = faces_per_mesh.sum() # e.g. 10 + # pyre-fixme[16]: `int` has no attribute `cumsum`. faces_per_mesh_cumsum = faces_per_mesh.cumsum(dim=0) # (N,) e.g. (2, 7, 10) switch1_idx = faces_per_mesh_cumsum.clone() diff --git a/pytorch3d/ops/utils.py b/pytorch3d/ops/utils.py index 10d3b84c..a6fb6d7f 100644 --- a/pytorch3d/ops/utils.py +++ b/pytorch3d/ops/utils.py @@ -150,7 +150,7 @@ def convert_pointclouds_to_tensor(pcl: Union[torch.Tensor, "Pointclouds"]): return X, num_points -def is_pointclouds(pcl: Union[torch.Tensor, "Pointclouds"]): +def is_pointclouds(pcl: Union[torch.Tensor, "Pointclouds"]) -> bool: """Checks whether the input `pcl` is an instance of `Pointclouds` by checking the existence of `points_padded` and `num_points_per_cloud` functions. diff --git a/pytorch3d/renderer/cameras.py b/pytorch3d/renderer/cameras.py index a840d54b..03d651da 100644 --- a/pytorch3d/renderer/cameras.py +++ b/pytorch3d/renderer/cameras.py @@ -427,10 +427,10 @@ class CamerasBase(TensorProperties): def OpenGLPerspectiveCameras( - znear=1.0, - zfar=100.0, - aspect_ratio=1.0, - fov=60.0, + znear: float = 1.0, + zfar: float = 100.0, + aspect_ratio: float = 1.0, + fov: float = 60.0, degrees: bool = True, R: torch.Tensor = _R, T: torch.Tensor = _T, @@ -709,12 +709,12 @@ class FoVPerspectiveCameras(CamerasBase): def OpenGLOrthographicCameras( - znear=1.0, - zfar=100.0, - top=1.0, - bottom=-1.0, - left=-1.0, - right=1.0, + znear: float = 1.0, + zfar: float = 100.0, + top: float = 1.0, + bottom: float = -1.0, + left: float = -1.0, + right: float = 1.0, scale_xyz=((1.0, 1.0, 1.0),), # (1, 3) R: torch.Tensor = _R, T: torch.Tensor = _T, @@ -956,7 +956,7 @@ Note that the MultiView Cameras accept parameters in NDC space. def SfMPerspectiveCameras( - focal_length=1.0, + focal_length: float = 1.0, principal_point=((0.0, 0.0),), R: torch.Tensor = _R, T: torch.Tensor = _T, @@ -1194,7 +1194,7 @@ class PerspectiveCameras(CamerasBase): def SfMOrthographicCameras( - focal_length=1.0, + focal_length: float = 1.0, principal_point=((0.0, 0.0),), R: torch.Tensor = _R, T: torch.Tensor = _T, @@ -1645,9 +1645,9 @@ def look_at_rotation( def look_at_view_transform( - dist=1.0, - elev=0.0, - azim=0.0, + dist: float = 1.0, + elev: float = 0.0, + azim: float = 0.0, degrees: bool = True, eye: Optional[Sequence] = None, at=((0, 0, 0),), # (1, 3) diff --git a/pytorch3d/renderer/implicit/raymarching.py b/pytorch3d/renderer/implicit/raymarching.py index 7b3289bf..5802b23c 100644 --- a/pytorch3d/renderer/implicit/raymarching.py +++ b/pytorch3d/renderer/implicit/raymarching.py @@ -162,7 +162,7 @@ class AbsorptionOnlyRaymarcher(torch.nn.Module): return opacities -def _shifted_cumprod(x, shift=1): +def _shifted_cumprod(x, shift: int = 1): """ Computes `torch.cumprod(x, dim=-1)` and prepends `shift` number of ones and removes `shift` trailing elements to/from the last dimension @@ -177,7 +177,7 @@ def _shifted_cumprod(x, shift=1): def _check_density_bounds( rays_densities: torch.Tensor, bounds: Tuple[float, float] = (0.0, 1.0) -): +) -> None: """ Checks whether the elements of `rays_densities` range within `bounds`. If not issues a warning. @@ -197,7 +197,7 @@ def _check_raymarcher_inputs( features_can_be_none: bool = False, z_can_be_none: bool = False, density_1d: bool = True, -): +) -> None: """ Checks the validity of the inputs to raymarching algorithms. """ diff --git a/pytorch3d/renderer/implicit/utils.py b/pytorch3d/renderer/implicit/utils.py index 6abdb09e..4d26391b 100644 --- a/pytorch3d/renderer/implicit/utils.py +++ b/pytorch3d/renderer/implicit/utils.py @@ -98,7 +98,7 @@ def _validate_ray_bundle_variables( rays_origins: torch.Tensor, rays_directions: torch.Tensor, rays_lengths: torch.Tensor, -): +) -> None: """ Validate the shapes of RayBundle variables `rays_origins`, `rays_directions`, and `rays_lengths`. diff --git a/pytorch3d/renderer/lighting.py b/pytorch3d/renderer/lighting.py index 399ea96a..4cc93fc5 100644 --- a/pytorch3d/renderer/lighting.py +++ b/pytorch3d/renderer/lighting.py @@ -323,7 +323,7 @@ class AmbientLights(TensorProperties): return torch.zeros_like(points) -def _validate_light_properties(obj): +def _validate_light_properties(obj) -> None: props = ("ambient_color", "diffuse_color", "specular_color") for n in props: t = getattr(obj, n) diff --git a/pytorch3d/renderer/mesh/shader.py b/pytorch3d/renderer/mesh/shader.py index a36ebe5a..e1fd3e64 100644 --- a/pytorch3d/renderer/mesh/shader.py +++ b/pytorch3d/renderer/mesh/shader.py @@ -301,7 +301,7 @@ def TexturedSoftPhongShader( lights: Optional[TensorProperties] = None, materials: Optional[Materials] = None, blend_params: Optional[BlendParams] = None, -): +) -> SoftPhongShader: """ TexturedSoftPhongShader class has been DEPRECATED. Use SoftPhongShader instead. Preserving TexturedSoftPhongShader as a function for backwards compatibility. diff --git a/pytorch3d/structures/meshes.py b/pytorch3d/structures/meshes.py index fe36e2b0..f498ac50 100644 --- a/pytorch3d/structures/meshes.py +++ b/pytorch3d/structures/meshes.py @@ -1557,7 +1557,7 @@ class Meshes: raise ValueError("Meshes does not have textures") -def join_meshes_as_batch(meshes: List[Meshes], include_textures: bool = True): +def join_meshes_as_batch(meshes: List[Meshes], include_textures: bool = True) -> Meshes: """ Merge multiple Meshes objects, i.e. concatenate the meshes objects. They must all be on the same device. If include_textures is true, they must all diff --git a/pytorch3d/structures/pointclouds.py b/pytorch3d/structures/pointclouds.py index 74f401c8..6847ef3d 100644 --- a/pytorch3d/structures/pointclouds.py +++ b/pytorch3d/structures/pointclouds.py @@ -1224,7 +1224,7 @@ class Pointclouds: return coord_inside.all(dim=-1) -def join_pointclouds_as_batch(pointclouds: Sequence[Pointclouds]): +def join_pointclouds_as_batch(pointclouds: Sequence[Pointclouds]) -> Pointclouds: """ Merge a list of Pointclouds objects into a single batched Pointclouds object. All pointclouds must be on the same device. diff --git a/pytorch3d/transforms/math.py b/pytorch3d/transforms/math.py index 88346467..7740d6f2 100644 --- a/pytorch3d/transforms/math.py +++ b/pytorch3d/transforms/math.py @@ -10,7 +10,7 @@ from typing import Tuple import torch -DEFAULT_ACOS_BOUND = 1.0 - 1e-4 +DEFAULT_ACOS_BOUND: float = 1.0 - 1e-4 def acos_linear_extrapolation( diff --git a/pytorch3d/transforms/transform3d.py b/pytorch3d/transforms/transform3d.py index 9f55b5ab..db30ec48 100644 --- a/pytorch3d/transforms/transform3d.py +++ b/pytorch3d/transforms/transform3d.py @@ -754,7 +754,7 @@ def _broadcast_bmm(a, b): @torch.no_grad() -def _check_valid_rotation_matrix(R, tol: float = 1e-7): +def _check_valid_rotation_matrix(R, tol: float = 1e-7) -> None: """ Determine if R is a valid rotation matrix by checking it satisfies the following conditions: diff --git a/pytorch3d/vis/plotly_vis.py b/pytorch3d/vis/plotly_vis.py index 7c57cfb4..b83877e0 100644 --- a/pytorch3d/vis/plotly_vis.py +++ b/pytorch3d/vis/plotly_vis.py @@ -24,7 +24,7 @@ from pytorch3d.structures import Meshes, Pointclouds, join_meshes_as_scene Struct = Union[CamerasBase, Meshes, Pointclouds, RayBundle] -def _get_struct_len(struct: Struct): # pragma: no cover +def _get_struct_len(struct: Struct) -> int: # pragma: no cover """ Returns the length (usually corresponds to the batch size) of the input structure. """ @@ -358,8 +358,14 @@ def plot_scene( up_y = _scale_camera_to_bounds(up_y, y_range, False) up_z = _scale_camera_to_bounds(up_z, z_range, False) + # pyre-fixme[6]: For 2nd param expected `Dict[str, int]` but got + # `Dict[str, float]`. camera["eye"] = {"x": eye_x, "y": eye_y, "z": eye_z} + # pyre-fixme[6]: For 2nd param expected `Dict[str, int]` but got + # `Dict[str, float]`. camera["center"] = {"x": at_x, "y": at_y, "z": at_z} + # pyre-fixme[6]: For 2nd param expected `Dict[str, int]` but got + # `Dict[str, float]`. camera["up"] = {"x": up_x, "y": up_y, "z": up_z} current_layout.update( @@ -510,7 +516,7 @@ def _add_struct_from_batch( subplot_title: str, scene_dictionary: Dict[str, Dict[str, Struct]], trace_idx: int = 1, -): # pragma: no cover +) -> None: # pragma: no cover """ Adds the struct corresponding to the given scene_num index to a provided scene_dictionary to be passed in to plot_scene @@ -567,7 +573,7 @@ def _add_mesh_trace( subplot_idx: int, ncols: int, lighting: Lighting, -): # pragma: no cover +) -> None: # pragma: no cover """ Adds a trace rendering a Meshes object to the passed in figure, with a given name and in a specific subplot. @@ -641,7 +647,7 @@ def _add_pointcloud_trace( ncols: int, max_points_per_pointcloud: int, marker_size: int, -): # pragma: no cover +) -> None: # pragma: no cover """ Adds a trace rendering a Pointclouds object to the passed in figure, with a given name and in a specific subplot. @@ -703,7 +709,7 @@ def _add_camera_trace( subplot_idx: int, ncols: int, camera_scale: float, -): # pragma: no cover +) -> None: # pragma: no cover """ Adds a trace rendering a Cameras object to the passed in figure, with a given name and in a specific subplot. @@ -761,7 +767,7 @@ def _add_ray_bundle_trace( max_points_per_ray: int, marker_size: int, line_width: int, -): # pragma: no cover +) -> None: # pragma: no cover """ Adds a trace rendering a RayBundle object to the passed in figure, with a given name and in a specific subplot. @@ -918,7 +924,7 @@ def _update_axes_bounds( verts_center: torch.Tensor, max_expand: float, current_layout: go.Scene, # pyre-ignore[11] -): # pragma: no cover +) -> None: # pragma: no cover """ Takes in the vertices' center point and max spread, and the current plotly figure layout and updates the layout to have bounds that include all traces for that subplot. @@ -956,7 +962,7 @@ def _update_axes_bounds( def _scale_camera_to_bounds( coordinate: float, axis_bounds: Tuple[float, float], is_position: bool -): # pragma: no cover +) -> float: # pragma: no cover """ We set our plotly plot's axes' bounding box to [-1,1]x[-1,1]x[-1,1]. As such, the plotly camera location has to be scaled accordingly to have its world coordinates diff --git a/pytorch3d/vis/texture_vis.py b/pytorch3d/vis/texture_vis.py index 5ca8d812..a3b0fcd5 100644 --- a/pytorch3d/vis/texture_vis.py +++ b/pytorch3d/vis/texture_vis.py @@ -19,7 +19,7 @@ def texturesuv_image_matplotlib( color=(1.0, 0.0, 0.0), subsample: Optional[int] = 10000, origin: str = "upper", -): # pragma: no cover +) -> None: # pragma: no cover """ Plot the texture image for one element of a TexturesUV with matplotlib together with verts_uvs positions circled. @@ -65,7 +65,7 @@ def texturesuv_image_PIL( *, texture_index: int = 0, radius: float = 1, - color="red", + color: str = "red", subsample: Optional[int] = 10000, ): # pragma: no cover """