From b87058c62a99012e00687b2bd34fa54e9c96ac67 Mon Sep 17 00:00:00 2001 From: Jeremy Reizenstein Date: Mon, 6 Apr 2020 06:38:50 -0700 Subject: [PATCH] fix recent lint Summary: lint clean again Reviewed By: patricklabatut Differential Revision: D20868775 fbshipit-source-id: ade4301c1012c5c6943186432465215701d635a9 --- pytorch3d/ops/points_alignment.py | 16 ++++------ pytorch3d/ops/utils.py | 7 ++--- tests/bm_points_alignment.py | 2 +- tests/common_testing.py | 7 ++--- tests/test_ops_utils.py | 6 ++-- tests/test_points_alignment.py | 51 +++++++++---------------------- 6 files changed, 29 insertions(+), 60 deletions(-) diff --git a/pytorch3d/ops/points_alignment.py b/pytorch3d/ops/points_alignment.py index 15b39d78..80100f5b 100644 --- a/pytorch3d/ops/points_alignment.py +++ b/pytorch3d/ops/points_alignment.py @@ -1,12 +1,12 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import warnings -from typing import List, Optional, Tuple, Union -import torch +from typing import List, Tuple, Union -from pytorch3d.structures.pointclouds import Pointclouds -from pytorch3d.structures import utils as strutil +import torch from pytorch3d.ops import utils as oputil +from pytorch3d.structures import utils as strutil +from pytorch3d.structures.pointclouds import Pointclouds def corresponding_points_alignment( @@ -77,9 +77,7 @@ def corresponding_points_alignment( weights = strutil.list_to_padded(weights)[..., 0] if Xt.shape[:2] != weights.shape: - raise ValueError( - "weights should have the same first two dimensions as X." - ) + raise ValueError("weights should have the same first two dimensions as X.") b, n, dim = Xt.shape @@ -120,9 +118,7 @@ def corresponding_points_alignment( U, S, V = torch.svd(XYcov) # identity matrix used for fixing reflections - E = torch.eye(dim, dtype=XYcov.dtype, device=XYcov.device)[None].repeat( - b, 1, 1 - ) + E = torch.eye(dim, dtype=XYcov.dtype, device=XYcov.device)[None].repeat(b, 1, 1) if not allow_reflection: # reflection test: diff --git a/pytorch3d/ops/utils.py b/pytorch3d/ops/utils.py index 6813288c..fa690ee1 100644 --- a/pytorch3d/ops/utils.py +++ b/pytorch3d/ops/utils.py @@ -27,7 +27,7 @@ def wmean( * if `weights` is None => `mean(x, dim)`, * otherwise => `sum(x*w, dim) / max{sum(w, dim), eps}`. """ - args = dict(dim=dim, keepdim=keepdim) + args = {"dim": dim, "keepdim": keepdim} if weight is None: return x.mean(**args) @@ -38,7 +38,6 @@ def wmean( ): raise ValueError("wmean: weights are not compatible with the tensor") - return ( - (x * weight[..., None]).sum(**args) - / weight[..., None].sum(**args).clamp(eps) + return (x * weight[..., None]).sum(**args) / weight[..., None].sum(**args).clamp( + eps ) diff --git a/tests/bm_points_alignment.py b/tests/bm_points_alignment.py index 75464602..24f8d0d2 100644 --- a/tests/bm_points_alignment.py +++ b/tests/bm_points_alignment.py @@ -3,8 +3,8 @@ from copy import deepcopy from itertools import product -from fvcore.common.benchmark import benchmark +from fvcore.common.benchmark import benchmark from test_points_alignment import TestCorrespondingPointsAlignment diff --git a/tests/common_testing.py b/tests/common_testing.py index 9bbcb187..450c3c38 100644 --- a/tests/common_testing.py +++ b/tests/common_testing.py @@ -1,8 +1,7 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -from typing import Optional - import unittest +from typing import Optional import numpy as np import torch @@ -57,7 +56,5 @@ class TestCaseMixin(unittest.TestCase): input, other, rtol=rtol, atol=atol, equal_nan=equal_nan ) else: - close = np.allclose( - input, other, rtol=rtol, atol=atol, equal_nan=equal_nan - ) + close = np.allclose(input, other, rtol=rtol, atol=atol, equal_nan=equal_nan) self.assertTrue(close, msg) diff --git a/tests/test_ops_utils.py b/tests/test_ops_utils.py index 81099bdc..ebac01ec 100644 --- a/tests/test_ops_utils.py +++ b/tests/test_ops_utils.py @@ -3,11 +3,10 @@ import unittest import numpy as np import torch - from common_testing import TestCaseMixin - from pytorch3d.ops import utils as oputil + class TestOpsUtils(TestCaseMixin, unittest.TestCase): def setUp(self) -> None: super().setUp() @@ -62,8 +61,7 @@ class TestOpsUtils(TestCaseMixin, unittest.TestCase): # test dim weight = torch.rand(x.shape[0], n_points, device=device) weight_np = np.tile( - weight[:, :, None].cpu().data.numpy(), - (1, 1, x_np.shape[-1]), + weight[:, :, None].cpu().data.numpy(), (1, 1, x_np.shape[-1]) ) mean = oputil.wmean(x, dim=0, weight=weight, keepdim=False) mean_gt = np.average(x_np, axis=0, weights=weight_np) diff --git a/tests/test_points_alignment.py b/tests/test_points_alignment.py index 823f78cc..35a00b8e 100644 --- a/tests/test_points_alignment.py +++ b/tests/test_points_alignment.py @@ -2,12 +2,11 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import numpy as np import unittest + +import numpy as np import torch - from common_testing import TestCaseMixin - from pytorch3d.ops import points_alignment from pytorch3d.structures.pointclouds import Pointclouds from pytorch3d.transforms import rotation_conversions @@ -54,18 +53,14 @@ class TestCorrespondingPointsAlignment(TestCaseMixin, unittest.TestCase): # generate random rotation matrices with orthogonalization of # random normal square matrices, followed by a transformation # that ensures determinant(R)==1 - H = torch.randn( - batch_size, dim, dim, dtype=torch.float32, device=device - ) + H = torch.randn(batch_size, dim, dim, dtype=torch.float32, device=device) U, _, V = torch.svd(H) E = torch.eye(dim, dtype=torch.float32, device=device)[None].repeat( batch_size, 1, 1 ) E[:, -1, -1] = torch.det(torch.bmm(U, V.transpose(2, 1))) R = torch.bmm(torch.bmm(U, E), V.transpose(2, 1)) - assert torch.allclose( - torch.det(R), R.new_ones(batch_size), atol=1e-4 - ) + assert torch.allclose(torch.det(R), R.new_ones(batch_size), atol=1e-4) return R @@ -94,19 +89,13 @@ class TestCorrespondingPointsAlignment(TestCaseMixin, unittest.TestCase): dtype=torch.int64, ) X_list = [ - torch.randn( - int(n_pt), dim, device=device, dtype=torch.float32 - ) + torch.randn(int(n_pt), dim, device=device, dtype=torch.float32) for n_pt in n_points_per_batch ] X = Pointclouds(X_list) else: X = torch.randn( - batch_size, - n_points, - dim, - device=device, - dtype=torch.float32, + batch_size, n_points, dim, device=device, dtype=torch.float32 ) X = Pointclouds(list(X)) else: @@ -143,11 +132,7 @@ class TestCorrespondingPointsAlignment(TestCaseMixin, unittest.TestCase): # randomly select one of the dimensions to reflect for each # element in the batch dim_to_reflect = torch.randint( - low=0, - high=dim, - size=(batch_size,), - device=device, - dtype=torch.int64, + low=0, high=dim, size=(batch_size,), device=device, dtype=torch.int64 ) # convert dim_to_reflect to a batch of reflection matrices M @@ -211,8 +196,7 @@ class TestCorrespondingPointsAlignment(TestCaseMixin, unittest.TestCase): weights *= (weights * template.size()[1] > 0.3).to(weights) if use_pointclouds: # convert to List[Tensor] weights = [ - w[:npts] - for w, npts in zip(weights, X.num_points_per_cloud()) + w[:npts] for w, npts in zip(weights, X.num_points_per_cloud()) ] torch.cuda.synchronize() @@ -255,7 +239,7 @@ class TestCorrespondingPointsAlignment(TestCaseMixin, unittest.TestCase): use_point_clouds_cases = ( (True, False) if dim == 3 and n_points > 3 else (False,) ) - for random_weights in (False, True,): + for random_weights in (False, True): for use_pointclouds in use_point_clouds_cases: for estimate_scale in (False, True): for reflect in (False, True): @@ -325,8 +309,7 @@ class TestCorrespondingPointsAlignment(TestCaseMixin, unittest.TestCase): weights *= (weights * template.size()[1] > 0.3).to(weights) if use_pointclouds: # convert to List[Tensor] weights = [ - w[:npts] - for w, npts in zip(weights, X.num_points_per_cloud()) + w[:npts] for w, npts in zip(weights, X.num_points_per_cloud()) ] # apply the generated transformation to the generated @@ -374,9 +357,9 @@ class TestCorrespondingPointsAlignment(TestCaseMixin, unittest.TestCase): X_t_est = _apply_pcl_transformation(X_noisy, R_n, T_n, s=s_n) - return ( - ((X_t_est - X_t) * weights[..., None]) ** 2 - ).sum(dim=(1, 2)) / weights.sum(dim=-1) + return (((X_t_est - X_t) * weights[..., None]) ** 2).sum( + dim=(1, 2) + ) / weights.sum(dim=-1) # check that using weights leads to lower weighted_MSE(X_noisy, X_t) self.assertTrue( @@ -386,9 +369,7 @@ class TestCorrespondingPointsAlignment(TestCaseMixin, unittest.TestCase): if reflect and not allow_reflection: # check that all rotations have det=1 self._assert_all_close( - torch.det(R_est), - R_est.new_ones(batch_size), - assert_error_message, + torch.det(R_est), R_est.new_ones(batch_size), assert_error_message ) else: @@ -430,6 +411,4 @@ class TestCorrespondingPointsAlignment(TestCaseMixin, unittest.TestCase): if weights is None: self.assertClose(a_, b_, atol=atol, msg=err_message) else: - self.assertClose( - a_ * weights, b_ * weights, atol=atol, msg=err_message - ) + self.assertClose(a_ * weights, b_ * weights, atol=atol, msg=err_message)