Return R2N2 R,T,K

Summary: Return rotation, translation and intrinsic matrices necessary to reproduce R2N2's own renderings.

Reviewed By: nikhilaravi

Differential Revision: D22462520

fbshipit-source-id: 46a3859743ebc43c7a24f75827d2be3adf3f486b
This commit is contained in:
Luya Gao
2020-08-07 13:21:26 -07:00
committed by Facebook GitHub Bot
parent c122ccb13c
commit 326e4ccb5b
8 changed files with 277 additions and 20 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

View File

@@ -93,10 +93,18 @@ class TestR2N2(TestCaseMixin, unittest.TestCase):
self.assertEqual(faces.ndim, 2)
self.assertEqual(faces.shape[-1], 3)
# Check that the intrinsic matrix and extrinsic matrix have the
# correct shapes.
self.assertEqual(r2n2_obj["R"].shape[0], 24)
self.assertEqual(r2n2_obj["R"].shape[1:], (3, 3))
self.assertEqual(r2n2_obj["T"].ndim, 2)
self.assertEqual(r2n2_obj["T"].shape[1], 3)
self.assertEqual(r2n2_obj["K"].ndim, 3)
self.assertEqual(r2n2_obj["K"].shape[1:], (4, 4))
# Check that image batch returned by __getitem__ has the correct shape.
self.assertEqual(r2n2_obj["images"].shape[0], 24)
self.assertEqual(r2n2_obj["images"].shape[1], 137)
self.assertEqual(r2n2_obj["images"].shape[2], 137)
self.assertEqual(r2n2_obj["images"].shape[1:-1], (137, 137))
self.assertEqual(r2n2_obj["images"].shape[-1], 3)
self.assertEqual(r2n2_dataset[39, [21]]["images"].shape[0], 1)
self.assertEqual(r2n2_dataset[39, torch.tensor([12, 21])]["images"].shape[0], 2)
@@ -113,7 +121,7 @@ class TestR2N2(TestCaseMixin, unittest.TestCase):
the correct shapes and types are returned.
"""
# Load dataset in the train split.
r2n2_dataset = R2N2("train", SHAPENET_PATH, R2N2_PATH, SPLITS_PATH)
r2n2_dataset = R2N2("val", SHAPENET_PATH, R2N2_PATH, SPLITS_PATH)
# Randomly retrieve several objects from the dataset and collate them.
collated_meshes = collate_batched_meshes(
@@ -147,6 +155,9 @@ class TestR2N2(TestCaseMixin, unittest.TestCase):
self.assertEqual(object_batch["mesh"].verts_padded().shape[0], batch_size)
self.assertEqual(object_batch["mesh"].faces_padded().shape[0], batch_size)
self.assertEqual(object_batch["images"].shape[0], batch_size)
self.assertEqual(object_batch["R"].shape[0], batch_size)
self.assertEqual(object_batch["T"].shape[0], batch_size)
self.assertEqual(object_batch["K"].shape[0], batch_size)
def test_catch_render_arg_errors(self):
"""
@@ -166,6 +177,13 @@ class TestR2N2(TestCaseMixin, unittest.TestCase):
r2n2_dataset.render(idxs=[1000000])
self.assertTrue("are out of bounds" in str(err.exception))
blend_cameras = BlenderCamera(
R=torch.rand((3, 3, 3)), T=torch.rand((3, 3)), K=torch.rand((3, 4, 4))
)
with self.assertRaises(ValueError) as err:
r2n2_dataset.render(idxs=[10, 11], cameras=blend_cameras)
self.assertTrue("Mismatch between batch dims" in str(err.exception))
def test_render_r2n2(self):
"""
Test rendering objects from R2N2 selected both by indices and model_ids.
@@ -279,3 +297,44 @@ class TestR2N2(TestCaseMixin, unittest.TestCase):
C = cam.get_camera_center()
C_ = -torch.bmm(R, T[:, :, None])[:, :, 0]
self.assertTrue(torch.allclose(C, C_, atol=1e-05))
def test_render_by_r2n2_calibration(self):
"""
Test rendering R2N2 models with calibration matrices from R2N2's own Blender
in batches.
"""
# Set up device and seed for random selections.
device = torch.device("cuda:0")
torch.manual_seed(39)
# Load dataset in the train split.
r2n2_dataset = R2N2("train", SHAPENET_PATH, R2N2_PATH, SPLITS_PATH)
model_idxs = torch.randint(1000, (2,)).tolist()
view_idxs = torch.randint(24, (2,)).tolist()
raster_settings = RasterizationSettings(image_size=512)
lights = PointLights(
location=torch.tensor([0.0, 1.0, -2.0], device=device)[None],
# TODO(nikhilar): debug the source of the discrepancy in two images when
# rendering on GPU.
diffuse_color=((0, 0, 0),),
specular_color=((0, 0, 0),),
device=device,
)
r2n2_batch = r2n2_dataset.render(
idxs=model_idxs,
view_idxs=view_idxs,
device=device,
raster_settings=raster_settings,
lights=lights,
)
for idx in range(4):
r2n2_batch_rgb = r2n2_batch[idx, ..., :3].squeeze().cpu()
if DEBUG:
Image.fromarray((r2n2_batch_rgb.numpy() * 255).astype(np.uint8)).save(
DATA_DIR
/ ("DEBUG_r2n2_render_with_blender_calibrations_%s.png" % idx)
)
image_ref = load_rgb_image(
"test_r2n2_render_with_blender_calibrations_%s.png" % idx, DATA_DIR
)
self.assertClose(r2n2_batch_rgb, image_ref, atol=0.05)