diff --git a/projects/nerf/nerf/nerf_renderer.py b/projects/nerf/nerf/nerf_renderer.py index d7208973..da8545aa 100644 --- a/projects/nerf/nerf/nerf_renderer.py +++ b/projects/nerf/nerf/nerf_renderer.py @@ -343,12 +343,14 @@ class RadianceFieldRenderer(torch.nn.Module): # For a full render pass concatenate the output chunks, # and reshape to image size. out = { - k: torch.cat( - [ch_o[k] for ch_o in chunk_outputs], - dim=1, - ).view(-1, *self._image_size, 3) - if chunk_outputs[0][k] is not None - else None + k: ( + torch.cat( + [ch_o[k] for ch_o in chunk_outputs], + dim=1, + ).view(-1, *self._image_size, 3) + if chunk_outputs[0][k] is not None + else None + ) for k in ("rgb_fine", "rgb_coarse", "rgb_gt") } else: diff --git a/pytorch3d/implicitron/dataset/frame_data.py b/pytorch3d/implicitron/dataset/frame_data.py index e32c0864..e5fbb6c9 100644 --- a/pytorch3d/implicitron/dataset/frame_data.py +++ b/pytorch3d/implicitron/dataset/frame_data.py @@ -576,11 +576,11 @@ class GenericFrameDataBuilder(FrameDataBuilderBase[FrameDataSubtype], ABC): camera_quality_score=safe_as_tensor( sequence_annotation.viewpoint_quality_score, torch.float ), - point_cloud_quality_score=safe_as_tensor( - point_cloud.quality_score, torch.float - ) - if point_cloud is not None - else None, + point_cloud_quality_score=( + safe_as_tensor(point_cloud.quality_score, torch.float) + if point_cloud is not None + else None + ), ) fg_mask_np: Optional[np.ndarray] = None diff --git a/pytorch3d/implicitron/dataset/json_index_dataset.py b/pytorch3d/implicitron/dataset/json_index_dataset.py index 8caf581d..fbbd564e 100644 --- a/pytorch3d/implicitron/dataset/json_index_dataset.py +++ b/pytorch3d/implicitron/dataset/json_index_dataset.py @@ -124,9 +124,9 @@ class JsonIndexDataset(DatasetBase, ReplaceableBase): dimension of the cropping bounding box, relative to box size. """ - frame_annotations_type: ClassVar[ - Type[types.FrameAnnotation] - ] = types.FrameAnnotation + frame_annotations_type: ClassVar[Type[types.FrameAnnotation]] = ( + types.FrameAnnotation + ) path_manager: Any = None frame_annotations_file: str = "" diff --git a/pytorch3d/implicitron/dataset/visualize.py b/pytorch3d/implicitron/dataset/visualize.py index 4ac633f6..9f2f5c4e 100644 --- a/pytorch3d/implicitron/dataset/visualize.py +++ b/pytorch3d/implicitron/dataset/visualize.py @@ -88,9 +88,11 @@ def get_implicitron_sequence_pointcloud( frame_data.camera, frame_data.image_rgb, frame_data.depth_map, - (cast(torch.Tensor, frame_data.fg_probability) > 0.5).float() - if mask_points and frame_data.fg_probability is not None - else None, + ( + (cast(torch.Tensor, frame_data.fg_probability) > 0.5).float() + if mask_points and frame_data.fg_probability is not None + else None + ), ) return point_cloud, frame_data diff --git a/pytorch3d/implicitron/evaluation/evaluate_new_view_synthesis.py b/pytorch3d/implicitron/evaluation/evaluate_new_view_synthesis.py index decf938b..6f13fd00 100644 --- a/pytorch3d/implicitron/evaluation/evaluate_new_view_synthesis.py +++ b/pytorch3d/implicitron/evaluation/evaluate_new_view_synthesis.py @@ -282,9 +282,9 @@ def eval_batch( image_rgb_masked=image_rgb_masked, depth_render=cloned_render["depth_render"], depth_map=frame_data.depth_map, - depth_mask=frame_data.depth_mask[:1] - if frame_data.depth_mask is not None - else None, + depth_mask=( + frame_data.depth_mask[:1] if frame_data.depth_mask is not None else None + ), visdom_env=visualize_visdom_env, ) diff --git a/pytorch3d/implicitron/models/generic_model.py b/pytorch3d/implicitron/models/generic_model.py index 7d319594..a8df7dbb 100644 --- a/pytorch3d/implicitron/models/generic_model.py +++ b/pytorch3d/implicitron/models/generic_model.py @@ -395,9 +395,11 @@ class GenericModel(ImplicitronModelBase): # pyre-ignore: 13 n_targets = ( 1 if evaluation_mode == EvaluationMode.EVALUATION - else batch_size - if self.n_train_target_views <= 0 - else min(self.n_train_target_views, batch_size) + else ( + batch_size + if self.n_train_target_views <= 0 + else min(self.n_train_target_views, batch_size) + ) ) # A helper function for selecting n_target first elements from the input @@ -422,9 +424,12 @@ class GenericModel(ImplicitronModelBase): # pyre-ignore: 13 ray_bundle: ImplicitronRayBundle = self.raysampler( target_cameras, evaluation_mode, - mask=mask_crop[:n_targets] - if mask_crop is not None and sampling_mode == RenderSamplingMode.MASK_SAMPLE - else None, + mask=( + mask_crop[:n_targets] + if mask_crop is not None + and sampling_mode == RenderSamplingMode.MASK_SAMPLE + else None + ), ) # custom_args hold additional arguments to the implicit function. diff --git a/pytorch3d/implicitron/models/implicit_function/idr_feature_field.py b/pytorch3d/implicitron/models/implicit_function/idr_feature_field.py index cb70c957..328c0c2e 100644 --- a/pytorch3d/implicitron/models/implicit_function/idr_feature_field.py +++ b/pytorch3d/implicitron/models/implicit_function/idr_feature_field.py @@ -102,9 +102,7 @@ class IdrFeatureField(ImplicitFunctionBase, torch.nn.Module): elif self.n_harmonic_functions_xyz >= 0 and layer_idx == 0: torch.nn.init.constant_(lin.bias, 0.0) torch.nn.init.constant_(lin.weight[:, 3:], 0.0) - torch.nn.init.normal_( - lin.weight[:, :3], 0.0, 2**0.5 / out_dim**0.5 - ) + torch.nn.init.normal_(lin.weight[:, :3], 0.0, 2**0.5 / out_dim**0.5) elif self.n_harmonic_functions_xyz >= 0 and layer_idx in self.skip_in: torch.nn.init.constant_(lin.bias, 0.0) torch.nn.init.normal_(lin.weight, 0.0, 2**0.5 / out_dim**0.5) diff --git a/pytorch3d/implicitron/models/implicit_function/neural_radiance_field.py b/pytorch3d/implicitron/models/implicit_function/neural_radiance_field.py index 0706d9a8..d61d10d1 100644 --- a/pytorch3d/implicitron/models/implicit_function/neural_radiance_field.py +++ b/pytorch3d/implicitron/models/implicit_function/neural_radiance_field.py @@ -193,9 +193,9 @@ class NeuralRadianceFieldBase(ImplicitFunctionBase, torch.nn.Module): embeds = create_embeddings_for_implicit_function( xyz_world=rays_points_world, # for 2nd param but got `Union[None, torch.Tensor, torch.nn.Module]`. - xyz_embedding_function=self.harmonic_embedding_xyz - if self.input_xyz - else None, + xyz_embedding_function=( + self.harmonic_embedding_xyz if self.input_xyz else None + ), global_code=global_code, fun_viewpool=fun_viewpool, xyz_in_camera_coords=self.xyz_ray_dir_in_camera_coords, diff --git a/pytorch3d/implicitron/models/overfit_model.py b/pytorch3d/implicitron/models/overfit_model.py index 40ee5a1b..0b6d590c 100644 --- a/pytorch3d/implicitron/models/overfit_model.py +++ b/pytorch3d/implicitron/models/overfit_model.py @@ -356,9 +356,12 @@ class OverfitModel(ImplicitronModelBase): # pyre-ignore: 13 ray_bundle: ImplicitronRayBundle = self.raysampler( camera, evaluation_mode, - mask=mask_crop - if mask_crop is not None and sampling_mode == RenderSamplingMode.MASK_SAMPLE - else None, + mask=( + mask_crop + if mask_crop is not None + and sampling_mode == RenderSamplingMode.MASK_SAMPLE + else None + ), ) inputs_to_be_chunked = {} @@ -381,10 +384,12 @@ class OverfitModel(ImplicitronModelBase): # pyre-ignore: 13 frame_timestamp=frame_timestamp, ) implicit_functions = [ - functools.partial(implicit_function, global_code=global_code) - if isinstance(implicit_function, Callable) - else functools.partial( - implicit_function.forward, global_code=global_code + ( + functools.partial(implicit_function, global_code=global_code) + if isinstance(implicit_function, Callable) + else functools.partial( + implicit_function.forward, global_code=global_code + ) ) for implicit_function in implicit_functions ] diff --git a/pytorch3d/implicitron/models/renderer/ray_sampler.py b/pytorch3d/implicitron/models/renderer/ray_sampler.py index fe464f67..f64cdc70 100644 --- a/pytorch3d/implicitron/models/renderer/ray_sampler.py +++ b/pytorch3d/implicitron/models/renderer/ray_sampler.py @@ -145,10 +145,12 @@ class AbstractMaskRaySampler(RaySamplerBase, torch.nn.Module): n_pts_per_ray=n_pts_per_ray_training, min_depth=0.0, max_depth=0.0, - n_rays_per_image=self.n_rays_per_image_sampled_from_mask - if self._sampling_mode[EvaluationMode.TRAINING] - == RenderSamplingMode.MASK_SAMPLE - else None, + n_rays_per_image=( + self.n_rays_per_image_sampled_from_mask + if self._sampling_mode[EvaluationMode.TRAINING] + == RenderSamplingMode.MASK_SAMPLE + else None + ), n_rays_total=self.n_rays_total_training, unit_directions=True, stratified_sampling=self.stratified_point_sampling_training, @@ -160,10 +162,12 @@ class AbstractMaskRaySampler(RaySamplerBase, torch.nn.Module): n_pts_per_ray=n_pts_per_ray_evaluation, min_depth=0.0, max_depth=0.0, - n_rays_per_image=self.n_rays_per_image_sampled_from_mask - if self._sampling_mode[EvaluationMode.EVALUATION] - == RenderSamplingMode.MASK_SAMPLE - else None, + n_rays_per_image=( + self.n_rays_per_image_sampled_from_mask + if self._sampling_mode[EvaluationMode.EVALUATION] + == RenderSamplingMode.MASK_SAMPLE + else None + ), unit_directions=True, stratified_sampling=self.stratified_point_sampling_evaluation, ) diff --git a/pytorch3d/implicitron/models/renderer/ray_tracing.py b/pytorch3d/implicitron/models/renderer/ray_tracing.py index 5c0dd0a4..417bc869 100644 --- a/pytorch3d/implicitron/models/renderer/ray_tracing.py +++ b/pytorch3d/implicitron/models/renderer/ray_tracing.py @@ -415,7 +415,7 @@ class RayTracing(Configurable, nn.Module): ] sampler_dists[mask_intersect_idx[p_out_mask]] = pts_intervals[ p_out_mask, - : + :, # pyre-fixme[6]: For 1st param expected `Union[bool, float, int]` but # got `Tensor`. ][torch.arange(n_p_out), out_pts_idx] diff --git a/pytorch3d/implicitron/models/renderer/sdf_renderer.py b/pytorch3d/implicitron/models/renderer/sdf_renderer.py index 12e54b9d..95de20f1 100644 --- a/pytorch3d/implicitron/models/renderer/sdf_renderer.py +++ b/pytorch3d/implicitron/models/renderer/sdf_renderer.py @@ -43,9 +43,9 @@ class SignedDistanceFunctionRenderer(BaseRenderer, torch.nn.Module): # pyre-ign run_auto_creation(self) - self.ray_normal_coloring_network_args[ - "feature_vector_size" - ] = render_features_dimensions + self.ray_normal_coloring_network_args["feature_vector_size"] = ( + render_features_dimensions + ) self._rgb_network = RayNormalColoringNetwork( **self.ray_normal_coloring_network_args ) @@ -201,15 +201,15 @@ class SignedDistanceFunctionRenderer(BaseRenderer, torch.nn.Module): # pyre-ign None, :, 0, : ] normals_full.view(-1, 3)[surface_mask] = normals - render_full.view(-1, self.render_features_dimensions)[ - surface_mask - ] = self._rgb_network( - features, - differentiable_surface_points[None], - normals, - ray_bundle, - surface_mask[None, :, None], - pooling_fn=None, # TODO + render_full.view(-1, self.render_features_dimensions)[surface_mask] = ( + self._rgb_network( + features, + differentiable_surface_points[None], + normals, + ray_bundle, + surface_mask[None, :, None], + pooling_fn=None, # TODO + ) ) mask_full.view(-1, 1)[~surface_mask] = torch.sigmoid( # pyre-fixme[6]: For 1st param expected `Tensor` but got `float`. diff --git a/pytorch3d/implicitron/tools/config.py b/pytorch3d/implicitron/tools/config.py index 0fb4012e..b0459e82 100644 --- a/pytorch3d/implicitron/tools/config.py +++ b/pytorch3d/implicitron/tools/config.py @@ -241,9 +241,9 @@ class _Registry: """ def __init__(self) -> None: - self._mapping: Dict[ - Type[ReplaceableBase], Dict[str, Type[ReplaceableBase]] - ] = defaultdict(dict) + self._mapping: Dict[Type[ReplaceableBase], Dict[str, Type[ReplaceableBase]]] = ( + defaultdict(dict) + ) def register(self, some_class: Type[_X]) -> Type[_X]: """ diff --git a/pytorch3d/implicitron/tools/eval_video_trajectory.py b/pytorch3d/implicitron/tools/eval_video_trajectory.py index bda9ec29..554b88a1 100644 --- a/pytorch3d/implicitron/tools/eval_video_trajectory.py +++ b/pytorch3d/implicitron/tools/eval_video_trajectory.py @@ -139,9 +139,11 @@ def generate_eval_video_cameras( fit = fit_circle_in_3d( cam_centers, angles=angle, - offset=angle.new_tensor(traj_offset_canonical) - if traj_offset_canonical is not None - else None, + offset=( + angle.new_tensor(traj_offset_canonical) + if traj_offset_canonical is not None + else None + ), up=angle.new_tensor(up), ) traj = fit.generated_points diff --git a/pytorch3d/implicitron/tools/utils.py b/pytorch3d/implicitron/tools/utils.py index 6cb0d4ec..895a8727 100644 --- a/pytorch3d/implicitron/tools/utils.py +++ b/pytorch3d/implicitron/tools/utils.py @@ -146,9 +146,11 @@ def cat_dataclass(batch, tensor_collator: Callable): ) elif isinstance(elem_f, collections.abc.Mapping): collated[f.name] = { - k: tensor_collator([getattr(e, f.name)[k] for e in batch]) - if elem_f[k] is not None - else None + k: ( + tensor_collator([getattr(e, f.name)[k] for e in batch]) + if elem_f[k] is not None + else None + ) for k in elem_f } else: diff --git a/pytorch3d/renderer/fisheyecameras.py b/pytorch3d/renderer/fisheyecameras.py index 3da558df..fe4bcef3 100644 --- a/pytorch3d/renderer/fisheyecameras.py +++ b/pytorch3d/renderer/fisheyecameras.py @@ -81,7 +81,6 @@ class FishEyeCameras(CamerasBase): device: Device = "cpu", image_size: Optional[Union[List, Tuple, torch.Tensor]] = None, ) -> None: - """ Args: diff --git a/pytorch3d/renderer/mesh/clip.py b/pytorch3d/renderer/mesh/clip.py index 6261f9c5..eca5c94e 100644 --- a/pytorch3d/renderer/mesh/clip.py +++ b/pytorch3d/renderer/mesh/clip.py @@ -712,9 +712,9 @@ def convert_clipped_rasterization_to_original_faces( ) bary_coords_unclipped_subset = bary_coords_unclipped_subset.reshape([N * 3]) - bary_coords_unclipped[ - faces_to_convert_mask_expanded - ] = bary_coords_unclipped_subset + bary_coords_unclipped[faces_to_convert_mask_expanded] = ( + bary_coords_unclipped_subset + ) # dists for case 4 faces will be handled in the rasterizer # so no need to modify them here. diff --git a/pytorch3d/renderer/mesh/rasterize_meshes.py b/pytorch3d/renderer/mesh/rasterize_meshes.py index afcd7496..878519e4 100644 --- a/pytorch3d/renderer/mesh/rasterize_meshes.py +++ b/pytorch3d/renderer/mesh/rasterize_meshes.py @@ -605,7 +605,10 @@ def rasterize_meshes_python( # noqa: C901 # If faces were clipped, map the rasterization result to be in terms of the # original unclipped faces. This may involve converting barycentric # coordinates - (face_idxs, bary_coords,) = convert_clipped_rasterization_to_original_faces( + ( + face_idxs, + bary_coords, + ) = convert_clipped_rasterization_to_original_faces( face_idxs, bary_coords, # pyre-fixme[61]: `clipped_faces` may not be initialized here. diff --git a/pytorch3d/renderer/opengl/__init__.py b/pytorch3d/renderer/opengl/__init__.py index f0f6b4c1..6e363be3 100644 --- a/pytorch3d/renderer/opengl/__init__.py +++ b/pytorch3d/renderer/opengl/__init__.py @@ -4,6 +4,7 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. + # If we can access EGL, import MeshRasterizerOpenGL. def _can_import_egl_and_pycuda(): import os diff --git a/pytorch3d/renderer/opengl/rasterizer_opengl.py b/pytorch3d/renderer/opengl/rasterizer_opengl.py index cf61d0d7..c7b0073f 100644 --- a/pytorch3d/renderer/opengl/rasterizer_opengl.py +++ b/pytorch3d/renderer/opengl/rasterizer_opengl.py @@ -292,9 +292,11 @@ class _OpenGLMachinery: pix_to_face, bary_coord, zbuf = self._rasterize_mesh( mesh, image_size, - projection_matrix=projection_matrix[mesh_id] - if projection_matrix.shape[0] > 1 - else None, + projection_matrix=( + projection_matrix[mesh_id] + if projection_matrix.shape[0] > 1 + else None + ), ) pix_to_faces.append(pix_to_face) bary_coords.append(bary_coord) diff --git a/tests/implicitron/test_extending_orm_types.py b/tests/implicitron/test_extending_orm_types.py index abfb3aab..e6ceb861 100644 --- a/tests/implicitron/test_extending_orm_types.py +++ b/tests/implicitron/test_extending_orm_types.py @@ -61,9 +61,9 @@ class ExtendedSqlFrameAnnotation(SqlFrameAnnotation): class ExtendedSqlIndexDataset(SqlIndexDataset): - frame_annotations_type: ClassVar[ - Type[SqlFrameAnnotation] - ] = ExtendedSqlFrameAnnotation + frame_annotations_type: ClassVar[Type[SqlFrameAnnotation]] = ( + ExtendedSqlFrameAnnotation + ) class CanineFrameData(FrameData): @@ -96,9 +96,9 @@ class CanineFrameDataBuilder( class CanineSqlIndexDataset(SqlIndexDataset): - frame_annotations_type: ClassVar[ - Type[SqlFrameAnnotation] - ] = ExtendedSqlFrameAnnotation + frame_annotations_type: ClassVar[Type[SqlFrameAnnotation]] = ( + ExtendedSqlFrameAnnotation + ) frame_data_builder_class_type: str = "CanineFrameDataBuilder" diff --git a/tests/implicitron/test_frame_data_builder.py b/tests/implicitron/test_frame_data_builder.py index 73145815..28810455 100644 --- a/tests/implicitron/test_frame_data_builder.py +++ b/tests/implicitron/test_frame_data_builder.py @@ -85,11 +85,11 @@ class TestFrameDataBuilder(TestCaseMixin, unittest.TestCase): camera_quality_score=safe_as_tensor( self.seq_annotation.viewpoint_quality_score, torch.float ), - point_cloud_quality_score=safe_as_tensor( - point_cloud.quality_score, torch.float - ) - if point_cloud is not None - else None, + point_cloud_quality_score=( + safe_as_tensor(point_cloud.quality_score, torch.float) + if point_cloud is not None + else None + ), ) def test_frame_data_builder_args(self): diff --git a/tests/implicitron/test_json_index_dataset_provider_v2.py b/tests/implicitron/test_json_index_dataset_provider_v2.py index c99481a4..456b44fa 100644 --- a/tests/implicitron/test_json_index_dataset_provider_v2.py +++ b/tests/implicitron/test_json_index_dataset_provider_v2.py @@ -168,7 +168,10 @@ def _make_random_json_dataset_map_provider_v2_data( mask_path = os.path.join(maskdir, f"frame{i:05d}.png") mask = np.zeros((H, W)) mask[H // 2 :, W // 2 :] = 1 - Image.fromarray((mask * 255.0).astype(np.uint8), mode="L",).convert( + Image.fromarray( + (mask * 255.0).astype(np.uint8), + mode="L", + ).convert( "L" ).save(mask_path) diff --git a/tests/implicitron/test_models_renderer_base.py b/tests/implicitron/test_models_renderer_base.py index 2a4ff16d..cafd06a8 100644 --- a/tests/implicitron/test_models_renderer_base.py +++ b/tests/implicitron/test_models_renderer_base.py @@ -222,10 +222,7 @@ class TestRendererBase(TestCaseMixin, unittest.TestCase): np.testing.assert_allclose( (delta**2) / 3 - (4 / 15) - * ( - (delta**4 * (12 * mu**2 - delta**2)) - / (3 * mu**2 + delta**2) ** 2 - ), + * ((delta**4 * (12 * mu**2 - delta**2)) / (3 * mu**2 + delta**2) ** 2), t_var.numpy(), ) np.testing.assert_allclose( diff --git a/tests/test_meshes.py b/tests/test_meshes.py index ed87437e..564694a5 100644 --- a/tests/test_meshes.py +++ b/tests/test_meshes.py @@ -983,7 +983,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): verts_list = [] faces_list = [] verts_faces = [(10, 100), (20, 200)] - for (V, F) in verts_faces: + for V, F in verts_faces: verts = torch.rand((V, 3), dtype=torch.float32, device=device) faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device) verts_list.append(verts) @@ -1007,7 +1007,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): device = torch.device("cuda:0") verts_list = [] faces_list = [] - for (V, F) in [(10, 100)]: + for V, F in [(10, 100)]: verts = torch.rand((V, 3), dtype=torch.float32, device=device) faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device) verts_list.append(verts) @@ -1025,7 +1025,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): verts_list = [] faces_list = [] verts_faces = [(10, 100), (20, 200), (30, 300)] - for (V, F) in verts_faces: + for V, F in verts_faces: verts = torch.rand((V, 3), dtype=torch.float32, device=device) faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device) verts_list.append(verts) @@ -1047,7 +1047,7 @@ class TestMeshes(TestCaseMixin, unittest.TestCase): verts_list = [] faces_list = [] verts_faces = [(10, 100), (20, 200), (30, 300)] - for (V, F) in verts_faces: + for V, F in verts_faces: verts = torch.rand((V, 3), dtype=torch.float32, device=device) faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device) verts_list.append(verts) diff --git a/tests/test_render_implicit.py b/tests/test_render_implicit.py index cc797b7f..4609cbc5 100644 --- a/tests/test_render_implicit.py +++ b/tests/test_render_implicit.py @@ -284,7 +284,7 @@ class TestRenderImplicit(TestCaseMixin, unittest.TestCase): os.makedirs(outdir, exist_ok=True) frames = [] - for (image_opacity, image_opacity_mesh) in zip( + for image_opacity, image_opacity_mesh in zip( images_opacities, images_opacities_meshes ): image, opacity = image_opacity.split([3, 1], dim=-1) diff --git a/tests/test_render_meshes.py b/tests/test_render_meshes.py index 0528909f..31627d99 100644 --- a/tests/test_render_meshes.py +++ b/tests/test_render_meshes.py @@ -303,7 +303,6 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): self.test_simple_sphere(check_depth=True) def test_simple_sphere_screen(self): - """ Test output when rendering with PerspectiveCameras & OrthographicCameras in NDC vs screen space. @@ -1221,7 +1220,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase): "flat": HardFlatShader, "splatter": SplatterPhongShader, } - for (name, shader_init) in shaders.items(): + for name, shader_init in shaders.items(): if rasterizer_type == MeshRasterizerOpenGL and name != "splatter": continue if rasterizer_type == MeshRasterizer and name == "splatter": diff --git a/tests/test_render_meshes_clipped.py b/tests/test_render_meshes_clipped.py index 153d10bd..28241fe1 100644 --- a/tests/test_render_meshes_clipped.py +++ b/tests/test_render_meshes_clipped.py @@ -620,7 +620,7 @@ class TestRenderMeshesClipping(TestCaseMixin, unittest.TestCase): plane into a quadrilateral, there shouldn't be duplicates indices of the face in the pix_to_face output of rasterization. """ - for (device, bin_size) in [("cpu", 0), ("cuda:0", 0), ("cuda:0", None)]: + for device, bin_size in [("cpu", 0), ("cuda:0", 0), ("cuda:0", None)]: verts = torch.tensor( [[0.0, -10.0, 1.0], [-1.0, 2.0, -2.0], [1.0, 5.0, -10.0]], dtype=torch.float32, @@ -673,7 +673,7 @@ class TestRenderMeshesClipping(TestCaseMixin, unittest.TestCase): device = "cuda:0" mesh1 = torus(20.0, 85.0, 32, 16, device=device) mesh2 = torus(2.0, 3.0, 32, 16, device=device) - for (mesh, z_clip) in [(mesh1, None), (mesh2, 5.0)]: + for mesh, z_clip in [(mesh1, None), (mesh2, 5.0)]: tex = TexturesVertex(verts_features=torch.rand_like(mesh.verts_padded())) mesh.textures = tex raster_settings = RasterizationSettings( diff --git a/tests/test_render_points.py b/tests/test_render_points.py index eede6ebc..84dc70dd 100644 --- a/tests/test_render_points.py +++ b/tests/test_render_points.py @@ -384,7 +384,7 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase): (AlphaCompositor, alpha_composite), ] - for (compositor_class, composite_func) in compositor_funcs: + for compositor_class, composite_func in compositor_funcs: compositor = compositor_class(background_color) @@ -435,7 +435,7 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase): (AlphaCompositor, alpha_composite), ] - for (compositor_class, composite_func) in compositor_funcs: + for compositor_class, composite_func in compositor_funcs: compositor = compositor_class(background_color) diff --git a/tests/test_render_volumes.py b/tests/test_render_volumes.py index d1cf4760..6dc10e30 100644 --- a/tests/test_render_volumes.py +++ b/tests/test_render_volumes.py @@ -392,7 +392,7 @@ class TestRenderVolumes(TestCaseMixin, unittest.TestCase): os.makedirs(outdir, exist_ok=True) frames = [] - for (image, image_pts) in zip(images, images_pts): + for image, image_pts in zip(images, images_pts): diff_image = ( ((image - image_pts) * 0.5 + 0.5) .mean(dim=2, keepdim=True) diff --git a/tests/test_vert_align.py b/tests/test_vert_align.py index c264521c..56b156b1 100644 --- a/tests/test_vert_align.py +++ b/tests/test_vert_align.py @@ -100,7 +100,7 @@ class TestVertAlign(TestCaseMixin, unittest.TestCase): def init_feats(batch_size: int = 10, num_channels: int = 256, device: str = "cuda"): H, W = [14, 28], [14, 28] feats = [] - for (h, w) in zip(H, W): + for h, w in zip(H, W): feats.append(torch.rand((batch_size, num_channels, h, w), device=device)) return feats