use assertClose

Summary: use assertClose in some tests, which enforces shape equality. Fixes some small problems, including graph_conv on an empty graph.

Reviewed By: nikhilaravi

Differential Revision: D20556912

fbshipit-source-id: 60a61eafe3c03ce0f6c9c1a842685708fb10ac5b
This commit is contained in:
Jeremy Reizenstein
2020-03-23 11:33:10 -07:00
committed by Facebook GitHub Bot
parent 744ef0c2c8
commit 595aca27ea
13 changed files with 216 additions and 241 deletions

View File

@@ -111,7 +111,7 @@ def orthographic_project_naive(points, scale_xyz=(1.0, 1.0, 1.0)):
return points
class TestCameraHelpers(unittest.TestCase):
class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
def setUp(self) -> None:
super().setUp()
torch.manual_seed(42)
@@ -154,7 +154,7 @@ class TestCameraHelpers(unittest.TestCase):
[0.0, 2.7, 0.0], dtype=torch.float32
).view(1, 3)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertTrue(torch.allclose(position, expected_position, atol=2e-7))
self.assertClose(position, expected_position, atol=2e-7)
def test_camera_position_from_angles_python_scalar_radians(self):
dist = 2.7
@@ -165,7 +165,7 @@ class TestCameraHelpers(unittest.TestCase):
position = camera_position_from_spherical_angles(
dist, elev, azim, degrees=False
)
self.assertTrue(torch.allclose(position, expected_position, atol=2e-7))
self.assertClose(position, expected_position, atol=2e-7)
def test_camera_position_from_angles_torch_scalars(self):
dist = torch.tensor(2.7)
@@ -175,7 +175,7 @@ class TestCameraHelpers(unittest.TestCase):
[2.7, 0.0, 0.0], dtype=torch.float32
).view(1, 3)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertTrue(torch.allclose(position, expected_position, atol=2e-7))
self.assertClose(position, expected_position, atol=2e-7)
def test_camera_position_from_angles_mixed_scalars(self):
dist = 2.7
@@ -185,7 +185,7 @@ class TestCameraHelpers(unittest.TestCase):
[2.7, 0.0, 0.0], dtype=torch.float32
).view(1, 3)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertTrue(torch.allclose(position, expected_position, atol=2e-7))
self.assertClose(position, expected_position, atol=2e-7)
def test_camera_position_from_angles_torch_scalar_grads(self):
dist = torch.tensor(2.7, requires_grad=True)
@@ -210,8 +210,8 @@ class TestCameraHelpers(unittest.TestCase):
- torch.sin(elev) * torch.cos(azim)
)
grad_elev = dist * (math.pi / 180.0) * grad_elev
self.assertTrue(torch.allclose(elev_grad, grad_elev))
self.assertTrue(torch.allclose(dist_grad, grad_dist))
self.assertClose(elev_grad, grad_elev)
self.assertClose(dist_grad, grad_dist)
def test_camera_position_from_angles_vectors(self):
dist = torch.tensor([2.0, 2.0])
@@ -221,7 +221,7 @@ class TestCameraHelpers(unittest.TestCase):
[[2.0, 0.0, 0.0], [0.0, 2.0, 0.0]], dtype=torch.float32
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertTrue(torch.allclose(position, expected_position, atol=2e-7))
self.assertClose(position, expected_position, atol=2e-7)
def test_camera_position_from_angles_vectors_broadcast(self):
dist = torch.tensor([2.0, 3.0, 5.0])
@@ -232,7 +232,7 @@ class TestCameraHelpers(unittest.TestCase):
dtype=torch.float32,
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertTrue(torch.allclose(position, expected_position, atol=3e-7))
self.assertClose(position, expected_position, atol=3e-7)
def test_camera_position_from_angles_vectors_mixed_broadcast(self):
dist = torch.tensor([2.0, 3.0, 5.0])
@@ -243,7 +243,7 @@ class TestCameraHelpers(unittest.TestCase):
dtype=torch.float32,
)
position = camera_position_from_spherical_angles(dist, elev, azim)
self.assertTrue(torch.allclose(position, expected_position, atol=3e-7))
self.assertClose(position, expected_position, atol=3e-7)
def test_camera_position_from_angles_vectors_mixed_broadcast_grads(self):
dist = torch.tensor([2.0, 3.0, 5.0], requires_grad=True)
@@ -269,8 +269,8 @@ class TestCameraHelpers(unittest.TestCase):
- torch.sin(elev) * torch.cos(azim)
)
grad_elev = (dist * (math.pi / 180.0) * grad_elev).sum()
self.assertTrue(torch.allclose(elev_grad, grad_elev))
self.assertTrue(torch.allclose(dist_grad, grad_dist))
self.assertClose(elev_grad, grad_elev)
self.assertClose(dist_grad, torch.full([3], grad_dist))
def test_camera_position_from_angles_vectors_bad_broadcast(self):
# Batch dim for broadcast must be N or 1
@@ -283,7 +283,7 @@ class TestCameraHelpers(unittest.TestCase):
def test_look_at_rotation_python_list(self):
camera_position = [[0.0, 0.0, -1.0]] # camera pointing along negative z
rot_mat = look_at_rotation(camera_position)
self.assertTrue(torch.allclose(rot_mat, torch.eye(3)[None], atol=2e-7))
self.assertClose(rot_mat, torch.eye(3)[None], atol=2e-7)
def test_look_at_rotation_input_fail(self):
camera_position = [-1.0] # expected to have xyz positions
@@ -310,7 +310,7 @@ class TestCameraHelpers(unittest.TestCase):
)
# fmt: on
rot_mats = look_at_rotation(camera_positions)
self.assertTrue(torch.allclose(rot_mats, rot_mats_expected, atol=2e-7))
self.assertClose(rot_mats, rot_mats_expected, atol=2e-7)
def test_look_at_rotation_tensor_broadcast(self):
# fmt: off
@@ -335,19 +335,15 @@ class TestCameraHelpers(unittest.TestCase):
)
# fmt: on
rot_mats = look_at_rotation(camera_positions)
self.assertTrue(torch.allclose(rot_mats, rot_mats_expected, atol=2e-7))
self.assertClose(rot_mats, rot_mats_expected, atol=2e-7)
def test_look_at_rotation_tensor_grad(self):
camera_position = torch.tensor([[0.0, 0.0, -1.0]], requires_grad=True)
rot_mat = look_at_rotation(camera_position)
rot_mat.sum().backward()
self.assertTrue(hasattr(camera_position, "grad"))
self.assertTrue(
torch.allclose(
camera_position.grad,
torch.zeros_like(camera_position),
atol=2e-7,
)
self.assertClose(
camera_position.grad, torch.zeros_like(camera_position), atol=2e-7
)
def test_view_transform(self):
@@ -403,9 +399,9 @@ class TestPerspectiveProjection(TestCaseMixin, unittest.TestCase):
vertices = vertices[None, None, :]
v1 = P.transform_points(vertices)
v2 = perspective_project_naive(vertices, fov=60.0)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(far * v1[..., 2], v2[..., 2]))
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
self.assertClose(v1[..., :2], v2[..., :2])
self.assertClose(far * v1[..., 2], v2[..., 2])
self.assertClose(v1.squeeze(), projected_verts)
# vertices are at the near clipping plane so z gets mapped to 0.0.
vertices[..., 2] = near
@@ -414,8 +410,8 @@ class TestPerspectiveProjection(TestCaseMixin, unittest.TestCase):
)
v1 = P.transform_points(vertices)
v2 = perspective_project_naive(vertices, fov=60.0)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
self.assertClose(v1[..., :2], v2[..., :2])
self.assertClose(v1.squeeze(), projected_verts)
def test_perspective_kwargs(self):
cameras = OpenGLPerspectiveCameras(znear=5.0, zfar=100.0, fov=0.0)
@@ -428,7 +424,7 @@ class TestPerspectiveProjection(TestCaseMixin, unittest.TestCase):
)
vertices = vertices[None, None, :]
v1 = P.transform_points(vertices)
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
self.assertClose(v1.squeeze(), projected_verts)
def test_perspective_mixed_inputs_broadcast(self):
far = torch.tensor([10.0, 20.0], dtype=torch.float32)
@@ -449,8 +445,8 @@ class TestPerspectiveProjection(TestCaseMixin, unittest.TestCase):
vertices = vertices[None, None, :]
v1 = P.transform_points(vertices)
v2 = perspective_project_naive(vertices, fov=60.0)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
self.assertClose(v1[..., :2], torch.cat([v2, v2])[..., :2])
self.assertClose(v1.squeeze(), projected_verts)
def test_perspective_mixed_inputs_grad(self):
far = torch.tensor([10.0])
@@ -468,7 +464,7 @@ class TestPerspectiveProjection(TestCaseMixin, unittest.TestCase):
grad_cotan = -(1.0 / (torch.sin(half_fov_rad) ** 2.0) * 1 / 2.0)
grad_fov = (math.pi / 180.0) * grad_cotan
grad_fov = (vertices[0] + vertices[1]) * grad_fov / 10.0
self.assertTrue(torch.allclose(fov_grad, grad_fov))
self.assertClose(fov_grad, grad_fov)
def test_camera_class_init(self):
device = torch.device("cuda:0")
@@ -496,8 +492,8 @@ class TestPerspectiveProjection(TestCaseMixin, unittest.TestCase):
R = look_at_rotation(T)
P = cam.get_full_projection_transform(R=R, T=T)
self.assertTrue(isinstance(P, Transform3d))
self.assertTrue(torch.allclose(cam.R, R))
self.assertTrue(torch.allclose(cam.T, T))
self.assertClose(cam.R, R)
self.assertClose(cam.T, T)
def test_transform_points(self):
# Check transform_points methods works with default settings for
@@ -511,7 +507,7 @@ class TestPerspectiveProjection(TestCaseMixin, unittest.TestCase):
)
projected_points = projected_points.view(1, 1, 3).expand(5, 10, -1)
new_points = cam.transform_points(points)
self.assertTrue(torch.allclose(new_points, projected_points))
self.assertClose(new_points, projected_points)
class TestOpenGLOrthographicProjection(TestCaseMixin, unittest.TestCase):
@@ -526,15 +522,15 @@ class TestOpenGLOrthographicProjection(TestCaseMixin, unittest.TestCase):
vertices = vertices[None, None, :]
v1 = P.transform_points(vertices)
v2 = orthographic_project_naive(vertices)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
self.assertClose(v1[..., :2], v2[..., :2])
self.assertClose(v1.squeeze(), projected_verts)
vertices[..., 2] = near
projected_verts[2] = 0.0
v1 = P.transform_points(vertices)
v2 = orthographic_project_naive(vertices)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
self.assertClose(v1[..., :2], v2[..., :2])
self.assertClose(v1.squeeze(), projected_verts)
def test_orthographic_scaled(self):
vertices = torch.tensor([1, 2, 0.5], dtype=torch.float32)
@@ -549,8 +545,8 @@ class TestOpenGLOrthographicProjection(TestCaseMixin, unittest.TestCase):
P = cameras.get_projection_transform()
v1 = P.transform_points(vertices)
v2 = orthographic_project_naive(vertices, scale)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1, projected_verts))
self.assertClose(v1[..., :2], v2[..., :2])
self.assertClose(v1, projected_verts[None, None])
def test_orthographic_kwargs(self):
cameras = OpenGLOrthographicCameras(znear=5.0, zfar=100.0)
@@ -560,14 +556,13 @@ class TestOpenGLOrthographicProjection(TestCaseMixin, unittest.TestCase):
projected_verts = torch.tensor([1, 2, 1], dtype=torch.float32)
vertices = vertices[None, None, :]
v1 = P.transform_points(vertices)
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
self.assertClose(v1.squeeze(), projected_verts)
def test_orthographic_mixed_inputs_broadcast(self):
far = torch.tensor([10.0, 20.0])
near = 1.0
cameras = OpenGLOrthographicCameras(znear=near, zfar=far)
P = cameras.get_projection_transform()
vertices = torch.tensor([1.0, 2.0, 10.0], dtype=torch.float32)
z2 = 1.0 / (20.0 - 1.0) * 10.0 + -(1.0) / (20.0 - 1.0)
projected_verts = torch.tensor(
@@ -576,8 +571,8 @@ class TestOpenGLOrthographicProjection(TestCaseMixin, unittest.TestCase):
vertices = vertices[None, None, :]
v1 = P.transform_points(vertices)
v2 = orthographic_project_naive(vertices)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1.squeeze(), projected_verts))
self.assertClose(v1[..., :2], torch.cat([v2, v2])[..., :2])
self.assertClose(v1.squeeze(), projected_verts)
def test_orthographic_mixed_inputs_grad(self):
far = torch.tensor([10.0])
@@ -602,7 +597,7 @@ class TestOpenGLOrthographicProjection(TestCaseMixin, unittest.TestCase):
]
]
)
self.assertTrue(torch.allclose(scale_grad, grad_scale))
self.assertClose(scale_grad, grad_scale)
class TestSfMOrthographicProjection(TestCaseMixin, unittest.TestCase):
@@ -615,8 +610,8 @@ class TestSfMOrthographicProjection(TestCaseMixin, unittest.TestCase):
v1 = P.transform_points(vertices)
v2 = orthographic_project_naive(vertices)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1, projected_verts))
self.assertClose(v1[..., :2], v2[..., :2])
self.assertClose(v1, projected_verts)
def test_orthographic_scaled(self):
focal_length_x = 10.0
@@ -636,9 +631,9 @@ class TestSfMOrthographicProjection(TestCaseMixin, unittest.TestCase):
vertices, scale_xyz=(focal_length_x, focal_length_y, 1.0)
)
v3 = cameras.transform_points(vertices)
self.assertTrue(torch.allclose(v1[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v3[..., :2], v2[..., :2]))
self.assertTrue(torch.allclose(v1, projected_verts))
self.assertClose(v1[..., :2], v2[..., :2])
self.assertClose(v3[..., :2], v2[..., :2])
self.assertClose(v1, projected_verts)
def test_orthographic_kwargs(self):
cameras = SfMOrthographicCameras(
@@ -653,7 +648,7 @@ class TestSfMOrthographicProjection(TestCaseMixin, unittest.TestCase):
projected_verts[:, :, 0] += 2.5
projected_verts[:, :, 1] += 3.5
v1 = P.transform_points(vertices)
self.assertTrue(torch.allclose(v1, projected_verts))
self.assertClose(v1, projected_verts)
class TestSfMPerspectiveProjection(TestCaseMixin, unittest.TestCase):
@@ -664,7 +659,7 @@ class TestSfMPerspectiveProjection(TestCaseMixin, unittest.TestCase):
vertices = torch.randn([3, 4, 3], dtype=torch.float32)
v1 = P.transform_points(vertices)
v2 = sfm_perspective_project_naive(vertices)
self.assertTrue(torch.allclose(v1, v2))
self.assertClose(v1, v2)
def test_perspective_scaled(self):
focal_length_x = 10.0
@@ -684,8 +679,8 @@ class TestSfMPerspectiveProjection(TestCaseMixin, unittest.TestCase):
vertices, fx=focal_length_x, fy=focal_length_y, p0x=p0x, p0y=p0y
)
v3 = cameras.transform_points(vertices)
self.assertTrue(torch.allclose(v1, v2))
self.assertTrue(torch.allclose(v3[..., :2], v2[..., :2]))
self.assertClose(v1, v2)
self.assertClose(v3[..., :2], v2[..., :2])
def test_perspective_kwargs(self):
cameras = SfMPerspectiveCameras(
@@ -699,4 +694,4 @@ class TestSfMPerspectiveProjection(TestCaseMixin, unittest.TestCase):
v2 = sfm_perspective_project_naive(
vertices, fx=2.0, fy=2.0, p0x=2.5, p0y=3.5
)
self.assertTrue(torch.allclose(v1, v2))
self.assertClose(v1, v2)