mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-12-20 14:20:38 +08:00
Add utils to approximate the conical frustums as multivariate gaussians.
Summary: Introduce methods to approximate the radii of conical frustums along rays as described in [MipNerf](https://arxiv.org/abs/2103.13415): - Two new attributes are added to ImplicitronRayBundle: bins and radii. Bins is of size n_pts_per_ray + 1. It allows us to manipulate easily and n_pts_per_ray intervals. For example we need the intervals coordinates in the radii computation for \(t_{\mu}, t_{\delta}\). Radii are used to store the radii of the conical frustums. - Add 3 new methods to compute the radii: - approximate_conical_frustum_as_gaussians: It computes the mean along the ray direction, the variance of the conical frustum with respect to t and variance of the conical frustum with respect to its radius. This implementation follows the stable computation defined in the paper. - compute_3d_diagonal_covariance_gaussian: Will leverage the two previously computed variances to find the diagonal covariance of the Gaussian. - conical_frustum_to_gaussian: Mix everything together to compute the means and the diagonal covariances along the ray of the Gaussians. - In AbstractMaskRaySampler, introduces the attribute `cast_ray_bundle_as_cone`. If False it won't change the previous behaviour of the RaySampler. However if True, the samplers will sample `n_pts_per_ray +1` instead of `n_pts_per_ray`. This points are then used to set the bins attribute of ImplicitronRayBundle. The support of HeterogeneousRayBundle has not been added since the current code does not allow it. A safeguard has been added to avoid a silent bug in the future. Reviewed By: shapovalov Differential Revision: D45269190 fbshipit-source-id: bf22fad12d71d55392f054e3f680013aa0d59b78
This commit is contained in:
committed by
Facebook GitHub Bot
parent
4e7715ce66
commit
29b8ebd802
@@ -32,7 +32,6 @@
|
||||
|
||||
import math
|
||||
import pickle
|
||||
import typing
|
||||
import unittest
|
||||
from itertools import product
|
||||
|
||||
@@ -60,6 +59,8 @@ from pytorch3d.transforms import Transform3d
|
||||
from pytorch3d.transforms.rotation_conversions import random_rotations
|
||||
from pytorch3d.transforms.so3 import so3_exp_map
|
||||
|
||||
from .common_camera_utils import init_random_cameras
|
||||
|
||||
from .common_testing import TestCaseMixin
|
||||
|
||||
|
||||
@@ -151,60 +152,6 @@ def ndc_to_screen_points_naive(points, imsize):
|
||||
return torch.stack((x, y, z), dim=2)
|
||||
|
||||
|
||||
def init_random_cameras(
|
||||
cam_type: typing.Type[CamerasBase],
|
||||
batch_size: int,
|
||||
random_z: bool = False,
|
||||
device: Device = "cpu",
|
||||
):
|
||||
cam_params = {}
|
||||
T = torch.randn(batch_size, 3) * 0.03
|
||||
if not random_z:
|
||||
T[:, 2] = 4
|
||||
R = so3_exp_map(torch.randn(batch_size, 3) * 3.0)
|
||||
cam_params = {"R": R, "T": T, "device": device}
|
||||
if cam_type in (OpenGLPerspectiveCameras, OpenGLOrthographicCameras):
|
||||
cam_params["znear"] = torch.rand(batch_size) * 10 + 0.1
|
||||
cam_params["zfar"] = torch.rand(batch_size) * 4 + 1 + cam_params["znear"]
|
||||
if cam_type == OpenGLPerspectiveCameras:
|
||||
cam_params["fov"] = torch.rand(batch_size) * 60 + 30
|
||||
cam_params["aspect_ratio"] = torch.rand(batch_size) * 0.5 + 0.5
|
||||
else:
|
||||
cam_params["top"] = torch.rand(batch_size) * 0.2 + 0.9
|
||||
cam_params["bottom"] = -(torch.rand(batch_size)) * 0.2 - 0.9
|
||||
cam_params["left"] = -(torch.rand(batch_size)) * 0.2 - 0.9
|
||||
cam_params["right"] = torch.rand(batch_size) * 0.2 + 0.9
|
||||
elif cam_type in (FoVPerspectiveCameras, FoVOrthographicCameras):
|
||||
cam_params["znear"] = torch.rand(batch_size) * 10 + 0.1
|
||||
cam_params["zfar"] = torch.rand(batch_size) * 4 + 1 + cam_params["znear"]
|
||||
if cam_type == FoVPerspectiveCameras:
|
||||
cam_params["fov"] = torch.rand(batch_size) * 60 + 30
|
||||
cam_params["aspect_ratio"] = torch.rand(batch_size) * 0.5 + 0.5
|
||||
else:
|
||||
cam_params["max_y"] = torch.rand(batch_size) * 0.2 + 0.9
|
||||
cam_params["min_y"] = -(torch.rand(batch_size)) * 0.2 - 0.9
|
||||
cam_params["min_x"] = -(torch.rand(batch_size)) * 0.2 - 0.9
|
||||
cam_params["max_x"] = torch.rand(batch_size) * 0.2 + 0.9
|
||||
elif cam_type in (
|
||||
SfMOrthographicCameras,
|
||||
SfMPerspectiveCameras,
|
||||
OrthographicCameras,
|
||||
PerspectiveCameras,
|
||||
):
|
||||
cam_params["focal_length"] = torch.rand(batch_size) * 10 + 0.1
|
||||
cam_params["principal_point"] = torch.randn((batch_size, 2))
|
||||
elif cam_type == FishEyeCameras:
|
||||
cam_params["focal_length"] = torch.rand(batch_size, 1) * 10 + 0.1
|
||||
cam_params["principal_point"] = torch.randn((batch_size, 2))
|
||||
cam_params["radial_params"] = torch.randn((batch_size, 6))
|
||||
cam_params["tangential_params"] = torch.randn((batch_size, 2))
|
||||
cam_params["thin_prism_params"] = torch.randn((batch_size, 4))
|
||||
|
||||
else:
|
||||
raise ValueError(str(cam_type))
|
||||
return cam_type(**cam_params)
|
||||
|
||||
|
||||
class TestCameraHelpers(TestCaseMixin, unittest.TestCase):
|
||||
def setUp(self) -> None:
|
||||
super().setUp()
|
||||
|
||||
Reference in New Issue
Block a user