formatting changes from black 22.3.0

Summary:
Applies the black-fbsource codemod with the new build of pyfmt.

paintitblack

Reviewed By: lisroach

Differential Revision: D36324783

fbshipit-source-id: 280c09e88257e5e569ab729691165d8dedd767bc
This commit is contained in:
John Reese 2022-05-11 19:55:56 -07:00 committed by Facebook GitHub Bot
parent c21ba144e7
commit bef959c755
25 changed files with 39 additions and 41 deletions

View File

@ -97,7 +97,7 @@ def generate_eval_video_cameras(
cam_centers_on_plane.t() @ cam_centers_on_plane cam_centers_on_plane.t() @ cam_centers_on_plane
) / cam_centers_on_plane.shape[0] ) / cam_centers_on_plane.shape[0]
_, e_vec = torch.symeig(cov, eigenvectors=True) _, e_vec = torch.symeig(cov, eigenvectors=True)
traj_radius = (cam_centers_on_plane ** 2).sum(dim=1).sqrt().mean() traj_radius = (cam_centers_on_plane**2).sum(dim=1).sqrt().mean()
angle = torch.linspace(0, 2.0 * math.pi, n_eval_cams) angle = torch.linspace(0, 2.0 * math.pi, n_eval_cams)
traj = traj_radius * torch.stack( traj = traj_radius * torch.stack(
(torch.zeros_like(angle), angle.cos(), angle.sin()), dim=-1 (torch.zeros_like(angle), angle.cos(), angle.sin()), dim=-1

View File

@ -71,6 +71,5 @@ elif sys.version_info >= (3, 7, 0):
def get_args(cls): # pragma: no cover def get_args(cls): # pragma: no cover
return getattr(cls, "__args__", None) return getattr(cls, "__args__", None)
else: else:
raise ImportError("This module requires Python 3.7+") raise ImportError("This module requires Python 3.7+")

View File

@ -80,7 +80,7 @@ class _SymEig3x3(nn.Module):
q = inputs_trace / 3.0 q = inputs_trace / 3.0
# Calculate squared sum of elements outside the main diagonal / 2 # Calculate squared sum of elements outside the main diagonal / 2
p1 = ((inputs ** 2).sum(dim=(-1, -2)) - (inputs_diag ** 2).sum(-1)) / 2 p1 = ((inputs**2).sum(dim=(-1, -2)) - (inputs_diag**2).sum(-1)) / 2
p2 = ((inputs_diag - q[..., None]) ** 2).sum(dim=-1) + 2.0 * p1.clamp(self._eps) p2 = ((inputs_diag - q[..., None]) ** 2).sum(dim=-1) + 2.0 * p1.clamp(self._eps)
p = torch.sqrt(p2 / 6.0) p = torch.sqrt(p2 / 6.0)
@ -195,7 +195,7 @@ class _SymEig3x3(nn.Module):
cross_products[..., :1, :] cross_products[..., :1, :]
) )
norms_sq = (cross_products ** 2).sum(dim=-1) norms_sq = (cross_products**2).sum(dim=-1)
max_norms_index = norms_sq.argmax(dim=-1) # pyre-ignore[16] max_norms_index = norms_sq.argmax(dim=-1) # pyre-ignore[16]
# Pick only the cross-product with highest squared norm for each input # Pick only the cross-product with highest squared norm for each input

View File

@ -73,7 +73,7 @@ class Autodecoder(Configurable, torch.nn.Module):
def calc_squared_encoding_norm(self): def calc_squared_encoding_norm(self):
if self.n_instances <= 0: if self.n_instances <= 0:
return None return None
return (self._autodecoder_codes.weight ** 2).mean() return (self._autodecoder_codes.weight**2).mean()
def get_encoding_dim(self) -> int: def get_encoding_dim(self) -> int:
if self.n_instances <= 0: if self.n_instances <= 0:

View File

@ -59,7 +59,7 @@ class IdrFeatureField(ImplicitFunctionBase, torch.nn.Module):
if layer_idx == self.num_layers - 2: if layer_idx == self.num_layers - 2:
torch.nn.init.normal_( torch.nn.init.normal_(
lin.weight, lin.weight,
mean=math.pi ** 0.5 / dims[layer_idx] ** 0.5, mean=math.pi**0.5 / dims[layer_idx] ** 0.5,
std=0.0001, std=0.0001,
) )
torch.nn.init.constant_(lin.bias, -self.bias) torch.nn.init.constant_(lin.bias, -self.bias)
@ -67,15 +67,15 @@ class IdrFeatureField(ImplicitFunctionBase, torch.nn.Module):
torch.nn.init.constant_(lin.bias, 0.0) torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.constant_(lin.weight[:, 3:], 0.0) torch.nn.init.constant_(lin.weight[:, 3:], 0.0)
torch.nn.init.normal_( torch.nn.init.normal_(
lin.weight[:, :3], 0.0, 2 ** 0.5 / out_dim ** 0.5 lin.weight[:, :3], 0.0, 2**0.5 / out_dim**0.5
) )
elif self.n_harmonic_functions_xyz > 0 and layer_idx in self.skip_in: elif self.n_harmonic_functions_xyz > 0 and layer_idx in self.skip_in:
torch.nn.init.constant_(lin.bias, 0.0) torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(lin.weight, 0.0, 2 ** 0.5 / out_dim ** 0.5) torch.nn.init.normal_(lin.weight, 0.0, 2**0.5 / out_dim**0.5)
torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3) :], 0.0) torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3) :], 0.0)
else: else:
torch.nn.init.constant_(lin.bias, 0.0) torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(lin.weight, 0.0, 2 ** 0.5 / out_dim ** 0.5) torch.nn.init.normal_(lin.weight, 0.0, 2**0.5 / out_dim**0.5)
if self.weight_norm: if self.weight_norm:
lin = nn.utils.weight_norm(lin) lin = nn.utils.weight_norm(lin)
@ -130,7 +130,7 @@ class IdrFeatureField(ImplicitFunctionBase, torch.nn.Module):
x = embedding x = embedding
for layer_idx in range(self.num_layers - 1): for layer_idx in range(self.num_layers - 1):
if layer_idx in self.skip_in: if layer_idx in self.skip_in:
x = torch.cat([x, embedding], dim=-1) / 2 ** 0.5 x = torch.cat([x, embedding], dim=-1) / 2**0.5
# pyre-fixme[29]: `Union[torch.Tensor, torch.nn.Module]` is not a function. # pyre-fixme[29]: `Union[torch.Tensor, torch.nn.Module]` is not a function.
x = self.linear_layers[layer_idx](x) x = self.linear_layers[layer_idx](x)

View File

@ -386,7 +386,7 @@ class TransformerWithInputSkips(torch.nn.Module):
layers_pool, layers_ray = [], [] layers_pool, layers_ray = [], []
dimout = 0 dimout = 0
for layeri in range(n_layers): for layeri in range(n_layers):
dimin = int(round(hidden_dim / (dim_down_factor ** layeri))) dimin = int(round(hidden_dim / (dim_down_factor**layeri)))
dimout = int(round(hidden_dim / (dim_down_factor ** (layeri + 1)))) dimout = int(round(hidden_dim / (dim_down_factor ** (layeri + 1))))
logger.info(f"Tr: {dimin} -> {dimout}") logger.info(f"Tr: {dimin} -> {dimout}")
for _i, l in enumerate((layers_pool, layers_ray)): for _i, l in enumerate((layers_pool, layers_ray)):

View File

@ -87,7 +87,7 @@ class BaseRenderer(ABC, ReplaceableBase):
ray_bundle, ray_bundle,
implicit_functions: List[ImplicitFunctionWrapper], implicit_functions: List[ImplicitFunctionWrapper],
evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION, evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION,
**kwargs **kwargs,
) -> RendererOutput: ) -> RendererOutput:
""" """
Each Renderer should implement its own forward function Each Renderer should implement its own forward function

View File

@ -295,7 +295,7 @@ class RayTracing(Configurable, nn.Module):
) and not_proj_iters < self.line_step_iters: ) and not_proj_iters < self.line_step_iters:
# Step backwards # Step backwards
acc_start_dis[not_projected_start] -= ( acc_start_dis[not_projected_start] -= (
(1 - self.line_search_step) / (2 ** not_proj_iters) (1 - self.line_search_step) / (2**not_proj_iters)
) * curr_sdf_start[not_projected_start] ) * curr_sdf_start[not_projected_start]
curr_start_points[not_projected_start] = ( curr_start_points[not_projected_start] = (
cam_loc cam_loc
@ -303,7 +303,7 @@ class RayTracing(Configurable, nn.Module):
).reshape(-1, 3)[not_projected_start] ).reshape(-1, 3)[not_projected_start]
acc_end_dis[not_projected_end] += ( acc_end_dis[not_projected_end] += (
(1 - self.line_search_step) / (2 ** not_proj_iters) (1 - self.line_search_step) / (2**not_proj_iters)
) * curr_sdf_end[not_projected_end] ) * curr_sdf_end[not_projected_end]
curr_end_points[not_projected_end] = ( curr_end_points[not_projected_end] = (
cam_loc cam_loc
@ -553,7 +553,7 @@ def _get_sphere_intersection(
# cam_loc = cam_loc.unsqueeze(-1) # cam_loc = cam_loc.unsqueeze(-1)
# ray_cam_dot = torch.bmm(ray_directions, cam_loc).squeeze() # ray_cam_dot = torch.bmm(ray_directions, cam_loc).squeeze()
ray_cam_dot = (ray_directions * cam_loc).sum(-1) # n_images x n_rays ray_cam_dot = (ray_directions * cam_loc).sum(-1) # n_images x n_rays
under_sqrt = ray_cam_dot ** 2 - (cam_loc.norm(2, dim=-1) ** 2 - r ** 2) under_sqrt = ray_cam_dot**2 - (cam_loc.norm(2, dim=-1) ** 2 - r**2)
under_sqrt = under_sqrt.reshape(-1) under_sqrt = under_sqrt.reshape(-1)
mask_intersect = under_sqrt > 0 mask_intersect = under_sqrt > 0

View File

@ -101,7 +101,7 @@ def volumetric_camera_overlaps(
""" """
device = cameras.device device = cameras.device
ba = cameras.R.shape[0] ba = cameras.R.shape[0]
n_vox = int(resol ** 3) n_vox = int(resol**3)
grid = pt3d.structures.Volumes( grid = pt3d.structures.Volumes(
densities=torch.zeros([1, 1, resol, resol, resol], device=device), densities=torch.zeros([1, 1, resol, resol, resol], device=device),
volume_translation=-torch.FloatTensor(scene_center)[None].to(device), volume_translation=-torch.FloatTensor(scene_center)[None].to(device),

View File

@ -102,13 +102,13 @@ def fit_circle_in_2d(
Circle2D object Circle2D object
""" """
design = torch.cat([points2d, torch.ones_like(points2d[:, :1])], dim=1) design = torch.cat([points2d, torch.ones_like(points2d[:, :1])], dim=1)
rhs = (points2d ** 2).sum(1) rhs = (points2d**2).sum(1)
n_provided = points2d.shape[0] n_provided = points2d.shape[0]
if n_provided < 3: if n_provided < 3:
raise ValueError(f"{n_provided} points are not enough to determine a circle") raise ValueError(f"{n_provided} points are not enough to determine a circle")
solution = lstsq(design, rhs[:, None]) solution = lstsq(design, rhs[:, None])
center = solution[:2, 0] / 2 center = solution[:2, 0] / 2
radius = torch.sqrt(solution[2, 0] + (center ** 2).sum()) radius = torch.sqrt(solution[2, 0] + (center**2).sum())
if n_points > 0: if n_points > 0:
if angles is not None: if angles is not None:
warnings.warn("n_points ignored because angles provided") warnings.warn("n_points ignored because angles provided")

View File

@ -65,7 +65,7 @@ def eval_depth(
df = gt - pred df = gt - pred
mse_depth = (dmask * (df ** 2)).sum((1, 2, 3)) / dmask_mass mse_depth = (dmask * (df**2)).sum((1, 2, 3)) / dmask_mass
abs_depth = (dmask * df.abs()).sum((1, 2, 3)) / dmask_mass abs_depth = (dmask * df.abs()).sum((1, 2, 3)) / dmask_mass
return mse_depth, abs_depth return mse_depth, abs_depth

View File

@ -217,7 +217,7 @@ def _align_camera_extrinsics(
# of centered A and centered B # of centered A and centered B
Ac = A - Amu Ac = A - Amu
Bc = B - Bmu Bc = B - Bmu
align_t_s = (Ac * Bc).mean() / (Ac ** 2).mean().clamp(eps) align_t_s = (Ac * Bc).mean() / (Ac**2).mean().clamp(eps)
else: else:
# set the scale to identity # set the scale to identity
align_t_s = 1.0 align_t_s = 1.0

View File

@ -240,7 +240,7 @@ def _get_edge_indices(edges: int) -> List[int]:
edge_indices = [] edge_indices = []
for i in range(12): for i in range(12):
if edges & (2 ** i): if edges & (2**i):
edge_indices.append(i) edge_indices.append(i)
return edge_indices return edge_indices

View File

@ -206,7 +206,7 @@ def _kernel_vec_distances(v):
# this should produce B x 6 x (D choose 2) tensor # this should produce B x 6 x (D choose 2) tensor
# we should take dot-product of all (i,i) # we should take dot-product of all (i,i)
rows_ii = (dv ** 2).sum(dim=-2) rows_ii = (dv**2).sum(dim=-2)
# this should produce B x 6 x D tensor # this should produce B x 6 x D tensor
return torch.cat((rows_ii, rows_2ij), dim=-1) return torch.cat((rows_ii, rows_2ij), dim=-1)

View File

@ -151,7 +151,7 @@ def sample_farthest_points_naive(
# and all the other points. If a point has already been selected # and all the other points. If a point has already been selected
# it's distance will be 0.0 so it will not be selected again as the max. # it's distance will be 0.0 so it will not be selected again as the max.
dist = points[n, selected_idx, :] - points[n, : lengths[n], :] dist = points[n, selected_idx, :] - points[n, : lengths[n], :]
dist_to_last_selected = (dist ** 2).sum(-1) # (P - i) dist_to_last_selected = (dist**2).sum(-1) # (P - i)
# If closer than currently saved distance to one of the selected # If closer than currently saved distance to one of the selected
# points, then updated closest_dists # points, then updated closest_dists

View File

@ -194,10 +194,10 @@ def _se3_V_matrix(
V = ( V = (
torch.eye(3, dtype=log_rotation.dtype, device=log_rotation.device)[None] torch.eye(3, dtype=log_rotation.dtype, device=log_rotation.device)[None]
+ log_rotation_hat + log_rotation_hat
* ((1 - torch.cos(rotation_angles)) / (rotation_angles ** 2))[:, None, None] * ((1 - torch.cos(rotation_angles)) / (rotation_angles**2))[:, None, None]
+ ( + (
log_rotation_hat_square log_rotation_hat_square
* ((rotation_angles - torch.sin(rotation_angles)) / (rotation_angles ** 3))[ * ((rotation_angles - torch.sin(rotation_angles)) / (rotation_angles**3))[
:, None, None :, None, None
] ]
) )
@ -211,7 +211,7 @@ def _get_se3_V_input(log_rotation: torch.Tensor, eps: float = 1e-4):
A helper function that computes the input variables to the `_se3_V_matrix` A helper function that computes the input variables to the `_se3_V_matrix`
function. function.
""" """
nrms = (log_rotation ** 2).sum(-1) nrms = (log_rotation**2).sum(-1)
rotation_angles = torch.clamp(nrms, eps).sqrt() rotation_angles = torch.clamp(nrms, eps).sqrt()
log_rotation_hat = hat(log_rotation) log_rotation_hat = hat(log_rotation)
log_rotation_hat_square = torch.bmm(log_rotation_hat, log_rotation_hat) log_rotation_hat_square = torch.bmm(log_rotation_hat, log_rotation_hat)

View File

@ -125,7 +125,6 @@ if os.getenv("PYTORCH3D_NO_NINJA", "0") == "1":
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(use_ninja=False, *args, **kwargs) super().__init__(use_ninja=False, *args, **kwargs)
else: else:
BuildExtension = torch.utils.cpp_extension.BuildExtension BuildExtension = torch.utils.cpp_extension.BuildExtension

View File

@ -15,7 +15,7 @@ def bm_render_volumes() -> None:
case_grid = { case_grid = {
"batch_size": [1, 5], "batch_size": [1, 5],
"raymarcher_type": [EmissionAbsorptionRaymarcher, AbsorptionOnlyRaymarcher], "raymarcher_type": [EmissionAbsorptionRaymarcher, AbsorptionOnlyRaymarcher],
"n_rays_per_image": [64 ** 2, 256 ** 2], "n_rays_per_image": [64**2, 256**2],
"n_pts_per_ray": [16, 128], "n_pts_per_ray": [16, 128],
} }
test_cases = itertools.product(*case_grid.values()) test_cases = itertools.product(*case_grid.values())

View File

@ -17,7 +17,7 @@ def bm_render_volumes() -> None:
"batch_size": [1, 5], "batch_size": [1, 5],
"shape": ["sphere", "cube"], "shape": ["sphere", "cube"],
"raymarcher_type": [EmissionAbsorptionRaymarcher, AbsorptionOnlyRaymarcher], "raymarcher_type": [EmissionAbsorptionRaymarcher, AbsorptionOnlyRaymarcher],
"n_rays_per_image": [64 ** 2, 256 ** 2], "n_rays_per_image": [64**2, 256**2],
"n_pts_per_ray": [16, 128], "n_pts_per_ray": [16, 128],
} }
test_cases = itertools.product(*case_grid.values()) test_cases = itertools.product(*case_grid.values())

View File

@ -124,7 +124,7 @@ class TestEvaluation(unittest.TestCase):
) )
self.assertGreater( self.assertGreater(
float(mse_depth_unmasked.sum()), float(mse_depth_unmasked.sum()),
float(diff ** 2), float(diff**2),
) )
self.assertGreater( self.assertGreater(
float(abs_depth_unmasked.sum()), float(abs_depth_unmasked.sum()),
@ -143,7 +143,7 @@ class TestEvaluation(unittest.TestCase):
) )
if _mask_gt is not None: if _mask_gt is not None:
expected_err_abs = diff expected_err_abs = diff
expected_err_mse = diff ** 2 expected_err_mse = diff**2
else: else:
err_mask = (gt > 0.0).float() * mask err_mask = (gt > 0.0).float() * mask
if crop > 0: if crop > 0:
@ -195,7 +195,7 @@ class TestEvaluation(unittest.TestCase):
) )
self.assertAlmostEqual(float(psnr), float(psnr_cv2), delta=1e-4) self.assertAlmostEqual(float(psnr), float(psnr_cv2), delta=1e-4)
# check that all PSNRs are bigger than the minimum possible PSNR # check that all PSNRs are bigger than the minimum possible PSNR
max_mse = max_diff ** 2 max_mse = max_diff**2
min_psnr = 10 * math.log10(1.0 / max_mse) min_psnr = 10 * math.log10(1.0 / max_mse)
for _im1, _im2 in zip(im1, im2): for _im1, _im2 in zip(im1, im2):
_psnr = calc_psnr(_im1, _im2) _psnr = calc_psnr(_im1, _im2)

View File

@ -66,7 +66,7 @@ class TestAcosLinearExtrapolation(TestCaseMixin, unittest.TestCase):
# fit a line: slope * x + bias = y # fit a line: slope * x + bias = y
x_1 = torch.stack([x, torch.ones_like(x)], dim=-1) x_1 = torch.stack([x, torch.ones_like(x)], dim=-1)
slope, bias = lstsq(x_1, y[:, None]).view(-1)[:2] slope, bias = lstsq(x_1, y[:, None]).view(-1)[:2]
desired_slope = (-1.0) / torch.sqrt(1.0 - bound_t ** 2) desired_slope = (-1.0) / torch.sqrt(1.0 - bound_t**2)
# test that the desired slope is the same as the fitted one # test that the desired slope is the same as the fitted one
self.assertClose(desired_slope.view(1), slope.view(1), atol=1e-2) self.assertClose(desired_slope.view(1), slope.view(1), atol=1e-2)
# test that the autograd's slope is the same as the desired one # test that the autograd's slope is the same as the desired one

View File

@ -412,7 +412,7 @@ class TestSpecularLighting(TestCaseMixin, unittest.TestCase):
camera_position=camera_position[None, :], camera_position=camera_position[None, :],
shininess=torch.tensor(10), shininess=torch.tensor(10),
) )
self.assertClose(output_light, expected_output ** 10) self.assertClose(output_light, expected_output**10)
def test_specular_batched(self): def test_specular_batched(self):
batch_size = 10 batch_size = 10

View File

@ -62,7 +62,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
torch.manual_seed(231) torch.manual_seed(231)
device = torch.device("cpu") device = torch.device("cpu")
image_size = 32 image_size = 32
blur_radius = 0.1 ** 2 blur_radius = 0.1**2
faces_per_pixel = 3 faces_per_pixel = 3
for d in ["cpu", get_random_cuda_device()]: for d in ["cpu", get_random_cuda_device()]:
@ -167,7 +167,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
torch.manual_seed(231) torch.manual_seed(231)
image_size = 64 image_size = 64
radius = 0.1 ** 2 radius = 0.1**2
faces_per_pixel = 3 faces_per_pixel = 3
device = torch.device("cpu") device = torch.device("cpu")
meshes_cpu = ico_sphere(0, device) meshes_cpu = ico_sphere(0, device)
@ -224,7 +224,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
# Make sure that the backward pass runs for all pathways # Make sure that the backward pass runs for all pathways
image_size = 64 # test is too slow for very large images. image_size = 64 # test is too slow for very large images.
N = 1 N = 1
radius = 0.1 ** 2 radius = 0.1**2
faces_per_pixel = 3 faces_per_pixel = 3
grad_zbuf = torch.randn(N, image_size, image_size, faces_per_pixel) grad_zbuf = torch.randn(N, image_size, image_size, faces_per_pixel)
@ -997,7 +997,7 @@ class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
ordering of faces. ordering of faces.
""" """
image_size = 10 image_size = 10
blur_radius = 0.12 ** 2 blur_radius = 0.12**2
faces_per_pixel = 1 faces_per_pixel = 1
# fmt: off # fmt: off

View File

@ -60,13 +60,13 @@ def spherical_volumetric_function(
# the squared distance of each ray point to the centroid of the sphere # the squared distance of each ray point to the centroid of the sphere
surface_dist = ( surface_dist = (
(surface_vectors ** 2) (surface_vectors**2)
.sum(-1, keepdim=True) .sum(-1, keepdim=True)
.view(*rays_points_world.shape[:-1], 1) .view(*rays_points_world.shape[:-1], 1)
) )
# set all ray densities within the sphere_diameter distance from the centroid to 1 # set all ray densities within the sphere_diameter distance from the centroid to 1
rays_densities = torch.sigmoid(-100.0 * (surface_dist - sphere_diameter ** 2)) rays_densities = torch.sigmoid(-100.0 * (surface_dist - sphere_diameter**2))
# ray colors are proportional to the normalized surface_vectors # ray colors are proportional to the normalized surface_vectors
rays_features = ( rays_features = (

View File

@ -128,7 +128,7 @@ class TestSamplePoints(TestCaseMixin, unittest.TestCase):
# Sphere: points should have radius 1. # Sphere: points should have radius 1.
x, y, z = samples[1, :].unbind(1) x, y, z = samples[1, :].unbind(1)
radius = torch.sqrt(x ** 2 + y ** 2 + z ** 2) radius = torch.sqrt(x**2 + y**2 + z**2)
self.assertClose(radius, torch.ones(num_samples)) self.assertClose(radius, torch.ones(num_samples))