From bef959c75587f4cdba02c1a7e87581916bca5e29 Mon Sep 17 00:00:00 2001 From: John Reese Date: Wed, 11 May 2022 19:55:56 -0700 Subject: [PATCH] formatting changes from black 22.3.0 Summary: Applies the black-fbsource codemod with the new build of pyfmt. paintitblack Reviewed By: lisroach Differential Revision: D36324783 fbshipit-source-id: 280c09e88257e5e569ab729691165d8dedd767bc --- projects/nerf/nerf/eval_video_utils.py | 2 +- pytorch3d/common/datatypes.py | 1 - pytorch3d/common/workaround/symeig3x3.py | 4 ++-- pytorch3d/implicitron/models/autodecoder.py | 2 +- .../models/implicit_function/idr_feature_field.py | 10 +++++----- .../models/implicit_function/neural_radiance_field.py | 2 +- pytorch3d/implicitron/models/renderer/base.py | 2 +- pytorch3d/implicitron/models/renderer/ray_tracing.py | 6 +++--- pytorch3d/implicitron/tools/camera_utils.py | 2 +- pytorch3d/implicitron/tools/circle_fitting.py | 4 ++-- pytorch3d/implicitron/tools/metric_utils.py | 2 +- pytorch3d/ops/cameras_alignment.py | 2 +- pytorch3d/ops/marching_cubes.py | 2 +- pytorch3d/ops/perspective_n_points.py | 2 +- pytorch3d/ops/sample_farthest_points.py | 2 +- pytorch3d/transforms/se3.py | 6 +++--- setup.py | 1 - tests/benchmarks/bm_render_implicit.py | 2 +- tests/benchmarks/bm_render_volumes.py | 2 +- tests/implicitron/test_evaluation.py | 6 +++--- tests/test_acos_linear_extrapolation.py | 2 +- tests/test_lighting.py | 2 +- tests/test_rasterize_meshes.py | 8 ++++---- tests/test_render_implicit.py | 4 ++-- tests/test_sample_points_from_meshes.py | 2 +- 25 files changed, 39 insertions(+), 41 deletions(-) diff --git a/projects/nerf/nerf/eval_video_utils.py b/projects/nerf/nerf/eval_video_utils.py index a84e4b17..0b2f0be9 100644 --- a/projects/nerf/nerf/eval_video_utils.py +++ b/projects/nerf/nerf/eval_video_utils.py @@ -97,7 +97,7 @@ def generate_eval_video_cameras( cam_centers_on_plane.t() @ cam_centers_on_plane ) / cam_centers_on_plane.shape[0] _, e_vec = torch.symeig(cov, eigenvectors=True) - traj_radius = (cam_centers_on_plane ** 2).sum(dim=1).sqrt().mean() + traj_radius = (cam_centers_on_plane**2).sum(dim=1).sqrt().mean() angle = torch.linspace(0, 2.0 * math.pi, n_eval_cams) traj = traj_radius * torch.stack( (torch.zeros_like(angle), angle.cos(), angle.sin()), dim=-1 diff --git a/pytorch3d/common/datatypes.py b/pytorch3d/common/datatypes.py index 489d60ce..63e698b3 100644 --- a/pytorch3d/common/datatypes.py +++ b/pytorch3d/common/datatypes.py @@ -71,6 +71,5 @@ elif sys.version_info >= (3, 7, 0): def get_args(cls): # pragma: no cover return getattr(cls, "__args__", None) - else: raise ImportError("This module requires Python 3.7+") diff --git a/pytorch3d/common/workaround/symeig3x3.py b/pytorch3d/common/workaround/symeig3x3.py index 21ad358c..692bc9f4 100644 --- a/pytorch3d/common/workaround/symeig3x3.py +++ b/pytorch3d/common/workaround/symeig3x3.py @@ -80,7 +80,7 @@ class _SymEig3x3(nn.Module): q = inputs_trace / 3.0 # Calculate squared sum of elements outside the main diagonal / 2 - p1 = ((inputs ** 2).sum(dim=(-1, -2)) - (inputs_diag ** 2).sum(-1)) / 2 + p1 = ((inputs**2).sum(dim=(-1, -2)) - (inputs_diag**2).sum(-1)) / 2 p2 = ((inputs_diag - q[..., None]) ** 2).sum(dim=-1) + 2.0 * p1.clamp(self._eps) p = torch.sqrt(p2 / 6.0) @@ -195,7 +195,7 @@ class _SymEig3x3(nn.Module): cross_products[..., :1, :] ) - norms_sq = (cross_products ** 2).sum(dim=-1) + norms_sq = (cross_products**2).sum(dim=-1) max_norms_index = norms_sq.argmax(dim=-1) # pyre-ignore[16] # Pick only the cross-product with highest squared norm for each input diff --git a/pytorch3d/implicitron/models/autodecoder.py b/pytorch3d/implicitron/models/autodecoder.py index 8b1dd4fb..bac6a416 100644 --- a/pytorch3d/implicitron/models/autodecoder.py +++ b/pytorch3d/implicitron/models/autodecoder.py @@ -73,7 +73,7 @@ class Autodecoder(Configurable, torch.nn.Module): def calc_squared_encoding_norm(self): if self.n_instances <= 0: return None - return (self._autodecoder_codes.weight ** 2).mean() + return (self._autodecoder_codes.weight**2).mean() def get_encoding_dim(self) -> int: if self.n_instances <= 0: diff --git a/pytorch3d/implicitron/models/implicit_function/idr_feature_field.py b/pytorch3d/implicitron/models/implicit_function/idr_feature_field.py index 1cf39275..17faf9c3 100644 --- a/pytorch3d/implicitron/models/implicit_function/idr_feature_field.py +++ b/pytorch3d/implicitron/models/implicit_function/idr_feature_field.py @@ -59,7 +59,7 @@ class IdrFeatureField(ImplicitFunctionBase, torch.nn.Module): if layer_idx == self.num_layers - 2: torch.nn.init.normal_( lin.weight, - mean=math.pi ** 0.5 / dims[layer_idx] ** 0.5, + mean=math.pi**0.5 / dims[layer_idx] ** 0.5, std=0.0001, ) torch.nn.init.constant_(lin.bias, -self.bias) @@ -67,15 +67,15 @@ class IdrFeatureField(ImplicitFunctionBase, torch.nn.Module): torch.nn.init.constant_(lin.bias, 0.0) torch.nn.init.constant_(lin.weight[:, 3:], 0.0) torch.nn.init.normal_( - lin.weight[:, :3], 0.0, 2 ** 0.5 / out_dim ** 0.5 + lin.weight[:, :3], 0.0, 2**0.5 / out_dim**0.5 ) elif self.n_harmonic_functions_xyz > 0 and layer_idx in self.skip_in: torch.nn.init.constant_(lin.bias, 0.0) - torch.nn.init.normal_(lin.weight, 0.0, 2 ** 0.5 / out_dim ** 0.5) + torch.nn.init.normal_(lin.weight, 0.0, 2**0.5 / out_dim**0.5) torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3) :], 0.0) else: torch.nn.init.constant_(lin.bias, 0.0) - torch.nn.init.normal_(lin.weight, 0.0, 2 ** 0.5 / out_dim ** 0.5) + torch.nn.init.normal_(lin.weight, 0.0, 2**0.5 / out_dim**0.5) if self.weight_norm: lin = nn.utils.weight_norm(lin) @@ -130,7 +130,7 @@ class IdrFeatureField(ImplicitFunctionBase, torch.nn.Module): x = embedding for layer_idx in range(self.num_layers - 1): if layer_idx in self.skip_in: - x = torch.cat([x, embedding], dim=-1) / 2 ** 0.5 + x = torch.cat([x, embedding], dim=-1) / 2**0.5 # pyre-fixme[29]: `Union[torch.Tensor, torch.nn.Module]` is not a function. x = self.linear_layers[layer_idx](x) diff --git a/pytorch3d/implicitron/models/implicit_function/neural_radiance_field.py b/pytorch3d/implicitron/models/implicit_function/neural_radiance_field.py index 0c6e6e30..8592f6de 100644 --- a/pytorch3d/implicitron/models/implicit_function/neural_radiance_field.py +++ b/pytorch3d/implicitron/models/implicit_function/neural_radiance_field.py @@ -386,7 +386,7 @@ class TransformerWithInputSkips(torch.nn.Module): layers_pool, layers_ray = [], [] dimout = 0 for layeri in range(n_layers): - dimin = int(round(hidden_dim / (dim_down_factor ** layeri))) + dimin = int(round(hidden_dim / (dim_down_factor**layeri))) dimout = int(round(hidden_dim / (dim_down_factor ** (layeri + 1)))) logger.info(f"Tr: {dimin} -> {dimout}") for _i, l in enumerate((layers_pool, layers_ray)): diff --git a/pytorch3d/implicitron/models/renderer/base.py b/pytorch3d/implicitron/models/renderer/base.py index 3b5f2d83..b57c1de6 100644 --- a/pytorch3d/implicitron/models/renderer/base.py +++ b/pytorch3d/implicitron/models/renderer/base.py @@ -87,7 +87,7 @@ class BaseRenderer(ABC, ReplaceableBase): ray_bundle, implicit_functions: List[ImplicitFunctionWrapper], evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION, - **kwargs + **kwargs, ) -> RendererOutput: """ Each Renderer should implement its own forward function diff --git a/pytorch3d/implicitron/models/renderer/ray_tracing.py b/pytorch3d/implicitron/models/renderer/ray_tracing.py index 4dbc64c4..85e2248d 100644 --- a/pytorch3d/implicitron/models/renderer/ray_tracing.py +++ b/pytorch3d/implicitron/models/renderer/ray_tracing.py @@ -295,7 +295,7 @@ class RayTracing(Configurable, nn.Module): ) and not_proj_iters < self.line_step_iters: # Step backwards acc_start_dis[not_projected_start] -= ( - (1 - self.line_search_step) / (2 ** not_proj_iters) + (1 - self.line_search_step) / (2**not_proj_iters) ) * curr_sdf_start[not_projected_start] curr_start_points[not_projected_start] = ( cam_loc @@ -303,7 +303,7 @@ class RayTracing(Configurable, nn.Module): ).reshape(-1, 3)[not_projected_start] acc_end_dis[not_projected_end] += ( - (1 - self.line_search_step) / (2 ** not_proj_iters) + (1 - self.line_search_step) / (2**not_proj_iters) ) * curr_sdf_end[not_projected_end] curr_end_points[not_projected_end] = ( cam_loc @@ -553,7 +553,7 @@ def _get_sphere_intersection( # cam_loc = cam_loc.unsqueeze(-1) # ray_cam_dot = torch.bmm(ray_directions, cam_loc).squeeze() ray_cam_dot = (ray_directions * cam_loc).sum(-1) # n_images x n_rays - under_sqrt = ray_cam_dot ** 2 - (cam_loc.norm(2, dim=-1) ** 2 - r ** 2) + under_sqrt = ray_cam_dot**2 - (cam_loc.norm(2, dim=-1) ** 2 - r**2) under_sqrt = under_sqrt.reshape(-1) mask_intersect = under_sqrt > 0 diff --git a/pytorch3d/implicitron/tools/camera_utils.py b/pytorch3d/implicitron/tools/camera_utils.py index 3148adf9..ecf6e9fa 100644 --- a/pytorch3d/implicitron/tools/camera_utils.py +++ b/pytorch3d/implicitron/tools/camera_utils.py @@ -101,7 +101,7 @@ def volumetric_camera_overlaps( """ device = cameras.device ba = cameras.R.shape[0] - n_vox = int(resol ** 3) + n_vox = int(resol**3) grid = pt3d.structures.Volumes( densities=torch.zeros([1, 1, resol, resol, resol], device=device), volume_translation=-torch.FloatTensor(scene_center)[None].to(device), diff --git a/pytorch3d/implicitron/tools/circle_fitting.py b/pytorch3d/implicitron/tools/circle_fitting.py index 77e22608..05f16e74 100644 --- a/pytorch3d/implicitron/tools/circle_fitting.py +++ b/pytorch3d/implicitron/tools/circle_fitting.py @@ -102,13 +102,13 @@ def fit_circle_in_2d( Circle2D object """ design = torch.cat([points2d, torch.ones_like(points2d[:, :1])], dim=1) - rhs = (points2d ** 2).sum(1) + rhs = (points2d**2).sum(1) n_provided = points2d.shape[0] if n_provided < 3: raise ValueError(f"{n_provided} points are not enough to determine a circle") solution = lstsq(design, rhs[:, None]) center = solution[:2, 0] / 2 - radius = torch.sqrt(solution[2, 0] + (center ** 2).sum()) + radius = torch.sqrt(solution[2, 0] + (center**2).sum()) if n_points > 0: if angles is not None: warnings.warn("n_points ignored because angles provided") diff --git a/pytorch3d/implicitron/tools/metric_utils.py b/pytorch3d/implicitron/tools/metric_utils.py index 05433027..9c565d6e 100644 --- a/pytorch3d/implicitron/tools/metric_utils.py +++ b/pytorch3d/implicitron/tools/metric_utils.py @@ -65,7 +65,7 @@ def eval_depth( df = gt - pred - mse_depth = (dmask * (df ** 2)).sum((1, 2, 3)) / dmask_mass + mse_depth = (dmask * (df**2)).sum((1, 2, 3)) / dmask_mass abs_depth = (dmask * df.abs()).sum((1, 2, 3)) / dmask_mass return mse_depth, abs_depth diff --git a/pytorch3d/ops/cameras_alignment.py b/pytorch3d/ops/cameras_alignment.py index 148bf252..4534341a 100644 --- a/pytorch3d/ops/cameras_alignment.py +++ b/pytorch3d/ops/cameras_alignment.py @@ -217,7 +217,7 @@ def _align_camera_extrinsics( # of centered A and centered B Ac = A - Amu Bc = B - Bmu - align_t_s = (Ac * Bc).mean() / (Ac ** 2).mean().clamp(eps) + align_t_s = (Ac * Bc).mean() / (Ac**2).mean().clamp(eps) else: # set the scale to identity align_t_s = 1.0 diff --git a/pytorch3d/ops/marching_cubes.py b/pytorch3d/ops/marching_cubes.py index 24074527..8a5d93ae 100644 --- a/pytorch3d/ops/marching_cubes.py +++ b/pytorch3d/ops/marching_cubes.py @@ -240,7 +240,7 @@ def _get_edge_indices(edges: int) -> List[int]: edge_indices = [] for i in range(12): - if edges & (2 ** i): + if edges & (2**i): edge_indices.append(i) return edge_indices diff --git a/pytorch3d/ops/perspective_n_points.py b/pytorch3d/ops/perspective_n_points.py index 92cf4dfb..2f552a6e 100644 --- a/pytorch3d/ops/perspective_n_points.py +++ b/pytorch3d/ops/perspective_n_points.py @@ -206,7 +206,7 @@ def _kernel_vec_distances(v): # this should produce B x 6 x (D choose 2) tensor # we should take dot-product of all (i,i) - rows_ii = (dv ** 2).sum(dim=-2) + rows_ii = (dv**2).sum(dim=-2) # this should produce B x 6 x D tensor return torch.cat((rows_ii, rows_2ij), dim=-1) diff --git a/pytorch3d/ops/sample_farthest_points.py b/pytorch3d/ops/sample_farthest_points.py index 7b735104..47b5a96e 100644 --- a/pytorch3d/ops/sample_farthest_points.py +++ b/pytorch3d/ops/sample_farthest_points.py @@ -151,7 +151,7 @@ def sample_farthest_points_naive( # and all the other points. If a point has already been selected # it's distance will be 0.0 so it will not be selected again as the max. dist = points[n, selected_idx, :] - points[n, : lengths[n], :] - dist_to_last_selected = (dist ** 2).sum(-1) # (P - i) + dist_to_last_selected = (dist**2).sum(-1) # (P - i) # If closer than currently saved distance to one of the selected # points, then updated closest_dists diff --git a/pytorch3d/transforms/se3.py b/pytorch3d/transforms/se3.py index 7717a8dc..f334a194 100644 --- a/pytorch3d/transforms/se3.py +++ b/pytorch3d/transforms/se3.py @@ -194,10 +194,10 @@ def _se3_V_matrix( V = ( torch.eye(3, dtype=log_rotation.dtype, device=log_rotation.device)[None] + log_rotation_hat - * ((1 - torch.cos(rotation_angles)) / (rotation_angles ** 2))[:, None, None] + * ((1 - torch.cos(rotation_angles)) / (rotation_angles**2))[:, None, None] + ( log_rotation_hat_square - * ((rotation_angles - torch.sin(rotation_angles)) / (rotation_angles ** 3))[ + * ((rotation_angles - torch.sin(rotation_angles)) / (rotation_angles**3))[ :, None, None ] ) @@ -211,7 +211,7 @@ def _get_se3_V_input(log_rotation: torch.Tensor, eps: float = 1e-4): A helper function that computes the input variables to the `_se3_V_matrix` function. """ - nrms = (log_rotation ** 2).sum(-1) + nrms = (log_rotation**2).sum(-1) rotation_angles = torch.clamp(nrms, eps).sqrt() log_rotation_hat = hat(log_rotation) log_rotation_hat_square = torch.bmm(log_rotation_hat, log_rotation_hat) diff --git a/setup.py b/setup.py index 98b83410..d4a0f94c 100755 --- a/setup.py +++ b/setup.py @@ -125,7 +125,6 @@ if os.getenv("PYTORCH3D_NO_NINJA", "0") == "1": def __init__(self, *args, **kwargs): super().__init__(use_ninja=False, *args, **kwargs) - else: BuildExtension = torch.utils.cpp_extension.BuildExtension diff --git a/tests/benchmarks/bm_render_implicit.py b/tests/benchmarks/bm_render_implicit.py index 3854aed9..d0d0c454 100644 --- a/tests/benchmarks/bm_render_implicit.py +++ b/tests/benchmarks/bm_render_implicit.py @@ -15,7 +15,7 @@ def bm_render_volumes() -> None: case_grid = { "batch_size": [1, 5], "raymarcher_type": [EmissionAbsorptionRaymarcher, AbsorptionOnlyRaymarcher], - "n_rays_per_image": [64 ** 2, 256 ** 2], + "n_rays_per_image": [64**2, 256**2], "n_pts_per_ray": [16, 128], } test_cases = itertools.product(*case_grid.values()) diff --git a/tests/benchmarks/bm_render_volumes.py b/tests/benchmarks/bm_render_volumes.py index 1ec5b3bc..78e0c53a 100644 --- a/tests/benchmarks/bm_render_volumes.py +++ b/tests/benchmarks/bm_render_volumes.py @@ -17,7 +17,7 @@ def bm_render_volumes() -> None: "batch_size": [1, 5], "shape": ["sphere", "cube"], "raymarcher_type": [EmissionAbsorptionRaymarcher, AbsorptionOnlyRaymarcher], - "n_rays_per_image": [64 ** 2, 256 ** 2], + "n_rays_per_image": [64**2, 256**2], "n_pts_per_ray": [16, 128], } test_cases = itertools.product(*case_grid.values()) diff --git a/tests/implicitron/test_evaluation.py b/tests/implicitron/test_evaluation.py index 95f4c9d3..52c4773a 100644 --- a/tests/implicitron/test_evaluation.py +++ b/tests/implicitron/test_evaluation.py @@ -124,7 +124,7 @@ class TestEvaluation(unittest.TestCase): ) self.assertGreater( float(mse_depth_unmasked.sum()), - float(diff ** 2), + float(diff**2), ) self.assertGreater( float(abs_depth_unmasked.sum()), @@ -143,7 +143,7 @@ class TestEvaluation(unittest.TestCase): ) if _mask_gt is not None: expected_err_abs = diff - expected_err_mse = diff ** 2 + expected_err_mse = diff**2 else: err_mask = (gt > 0.0).float() * mask if crop > 0: @@ -195,7 +195,7 @@ class TestEvaluation(unittest.TestCase): ) self.assertAlmostEqual(float(psnr), float(psnr_cv2), delta=1e-4) # check that all PSNRs are bigger than the minimum possible PSNR - max_mse = max_diff ** 2 + max_mse = max_diff**2 min_psnr = 10 * math.log10(1.0 / max_mse) for _im1, _im2 in zip(im1, im2): _psnr = calc_psnr(_im1, _im2) diff --git a/tests/test_acos_linear_extrapolation.py b/tests/test_acos_linear_extrapolation.py index 374ca2de..4be85bfd 100644 --- a/tests/test_acos_linear_extrapolation.py +++ b/tests/test_acos_linear_extrapolation.py @@ -66,7 +66,7 @@ class TestAcosLinearExtrapolation(TestCaseMixin, unittest.TestCase): # fit a line: slope * x + bias = y x_1 = torch.stack([x, torch.ones_like(x)], dim=-1) slope, bias = lstsq(x_1, y[:, None]).view(-1)[:2] - desired_slope = (-1.0) / torch.sqrt(1.0 - bound_t ** 2) + desired_slope = (-1.0) / torch.sqrt(1.0 - bound_t**2) # test that the desired slope is the same as the fitted one self.assertClose(desired_slope.view(1), slope.view(1), atol=1e-2) # test that the autograd's slope is the same as the desired one diff --git a/tests/test_lighting.py b/tests/test_lighting.py index dabafdca..baf245ee 100644 --- a/tests/test_lighting.py +++ b/tests/test_lighting.py @@ -412,7 +412,7 @@ class TestSpecularLighting(TestCaseMixin, unittest.TestCase): camera_position=camera_position[None, :], shininess=torch.tensor(10), ) - self.assertClose(output_light, expected_output ** 10) + self.assertClose(output_light, expected_output**10) def test_specular_batched(self): batch_size = 10 diff --git a/tests/test_rasterize_meshes.py b/tests/test_rasterize_meshes.py index 8fada036..ba5da11d 100644 --- a/tests/test_rasterize_meshes.py +++ b/tests/test_rasterize_meshes.py @@ -62,7 +62,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase): torch.manual_seed(231) device = torch.device("cpu") image_size = 32 - blur_radius = 0.1 ** 2 + blur_radius = 0.1**2 faces_per_pixel = 3 for d in ["cpu", get_random_cuda_device()]: @@ -167,7 +167,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase): torch.manual_seed(231) image_size = 64 - radius = 0.1 ** 2 + radius = 0.1**2 faces_per_pixel = 3 device = torch.device("cpu") meshes_cpu = ico_sphere(0, device) @@ -224,7 +224,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase): # Make sure that the backward pass runs for all pathways image_size = 64 # test is too slow for very large images. N = 1 - radius = 0.1 ** 2 + radius = 0.1**2 faces_per_pixel = 3 grad_zbuf = torch.randn(N, image_size, image_size, faces_per_pixel) @@ -997,7 +997,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase): ordering of faces. """ image_size = 10 - blur_radius = 0.12 ** 2 + blur_radius = 0.12**2 faces_per_pixel = 1 # fmt: off diff --git a/tests/test_render_implicit.py b/tests/test_render_implicit.py index 62d6ee8c..1c9bb302 100644 --- a/tests/test_render_implicit.py +++ b/tests/test_render_implicit.py @@ -60,13 +60,13 @@ def spherical_volumetric_function( # the squared distance of each ray point to the centroid of the sphere surface_dist = ( - (surface_vectors ** 2) + (surface_vectors**2) .sum(-1, keepdim=True) .view(*rays_points_world.shape[:-1], 1) ) # set all ray densities within the sphere_diameter distance from the centroid to 1 - rays_densities = torch.sigmoid(-100.0 * (surface_dist - sphere_diameter ** 2)) + rays_densities = torch.sigmoid(-100.0 * (surface_dist - sphere_diameter**2)) # ray colors are proportional to the normalized surface_vectors rays_features = ( diff --git a/tests/test_sample_points_from_meshes.py b/tests/test_sample_points_from_meshes.py index c0f3bddd..4268a488 100644 --- a/tests/test_sample_points_from_meshes.py +++ b/tests/test_sample_points_from_meshes.py @@ -128,7 +128,7 @@ class TestSamplePoints(TestCaseMixin, unittest.TestCase): # Sphere: points should have radius 1. x, y, z = samples[1, :].unbind(1) - radius = torch.sqrt(x ** 2 + y ** 2 + z ** 2) + radius = torch.sqrt(x**2 + y**2 + z**2) self.assertClose(radius, torch.ones(num_samples))