fix pts scale, save ply

Summary:
Fix:
* Scaling of point clouds for scalars
* save_ply compatible cat

Reviewed By: nikhilaravi

Differential Revision: D22298609

fbshipit-source-id: abe94a5b64baf325587202d20adfc36912cc1478
This commit is contained in:
Georgia Gkioxari 2020-07-03 10:19:58 -07:00 committed by Facebook GitHub Bot
parent 275ddade66
commit 2f0fd60186
4 changed files with 41 additions and 27 deletions

View File

@ -783,7 +783,11 @@ def save_ply(
decimal_places: Number of decimal places for saving.
"""
verts_normals = torch.FloatTensor([]) if verts_normals is None else verts_normals
verts_normals = (
torch.tensor([], dtype=torch.float32, device=verts.device)
if verts_normals is None
else verts_normals
)
faces = torch.LongTensor([]) if faces is None else faces
if len(verts) and not (verts.dim() == 2 and verts.size(1) == 3):

View File

@ -511,7 +511,6 @@ class Pointclouds(object):
Returns:
1D tensor of indices.
"""
self._compute_packed()
if self._padded_to_packed_idx is not None:
return self._padded_to_packed_idx
if self._N == 0:
@ -520,7 +519,7 @@ class Pointclouds(object):
self._padded_to_packed_idx = torch.cat(
[
torch.arange(v, dtype=torch.int64, device=self.device) + i * self._P
for (i, v) in enumerate(self._num_points_per_cloud)
for (i, v) in enumerate(self.num_points_per_cloud())
],
dim=0,
)
@ -797,7 +796,7 @@ class Pointclouds(object):
self.
"""
if not torch.is_tensor(scale):
scale = torch.full(len(self), scale)
scale = torch.full((len(self),), scale, device=self.device)
new_points_list = []
points_list = self.points_list()
for i, old_points in enumerate(points_list):

View File

@ -510,8 +510,8 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
N = 5
for test in ["tensor", "scalar"]:
mesh = TestMeshes.init_mesh(N, 10, 100)
for force in [0, 1]:
for force in (False, True):
mesh = TestMeshes.init_mesh(N, 10, 100)
if force:
# force mesh to have computed attributes
mesh.verts_packed()

View File

@ -460,7 +460,7 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
def test_scale(self):
def naive_scale(cloud, scale):
if not torch.is_tensor(scale):
scale = torch.full(len(cloud), scale)
scale = torch.full((len(cloud),), scale, device=cloud.device)
new_points_list = [
scale[i] * points.clone()
for (i, points) in enumerate(cloud.points_list())
@ -470,26 +470,37 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
)
N = 5
clouds = self.init_cloud(N, 100, 10)
for force in (False, True):
if force:
clouds._compute_packed(refresh=True)
clouds._compute_padded()
clouds.padded_to_packed_idx()
scales = torch.rand(N)
new_clouds_naive = naive_scale(clouds, scales)
new_clouds = clouds.scale(scales)
for i in range(N):
self.assertClose(
scales[i] * clouds.points_list()[i], new_clouds.points_list()[i]
)
self.assertClose(
clouds.normals_list()[i], new_clouds_naive.normals_list()[i]
)
self.assertClose(
clouds.features_list()[i], new_clouds_naive.features_list()[i]
)
self.assertCloudsEqual(new_clouds, new_clouds_naive)
for test in ["tensor", "scalar"]:
for force in (False, True):
clouds = self.init_cloud(N, 100, 10)
if force:
clouds._compute_packed(refresh=True)
clouds._compute_padded()
clouds.padded_to_packed_idx()
if test == "tensor":
scales = torch.rand(N)
elif test == "scalar":
scales = torch.rand(1)[0].item()
new_clouds_naive = naive_scale(clouds, scales)
new_clouds = clouds.scale(scales)
for i in range(N):
if test == "tensor":
self.assertClose(
scales[i] * clouds.points_list()[i],
new_clouds.points_list()[i],
)
else:
self.assertClose(
scales * clouds.points_list()[i],
new_clouds.points_list()[i],
)
self.assertClose(
clouds.normals_list()[i], new_clouds_naive.normals_list()[i]
)
self.assertClose(
clouds.features_list()[i], new_clouds_naive.features_list()[i]
)
self.assertCloudsEqual(new_clouds, new_clouds_naive)
def test_extend_list(self):
N = 10