mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-08-01 03:12:49 +08:00
Benchmark Cameras
Summary: Address comments to add benchmarkings for cameras and the new fisheye cameras. The dependency functions in test_cameras have been updated in Diff 1. The following two snapshots show benchmarking results. Reviewed By: kjchalup Differential Revision: D38991914 fbshipit-source-id: 51fe9bb7237543e4ee112c9f5068a4cf12a9d482
This commit is contained in:
parent
2283c292a9
commit
03562d87f5
59
tests/benchmarks/bm_cameras.py
Normal file
59
tests/benchmarks/bm_cameras.py
Normal file
@ -0,0 +1,59 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import itertools
|
||||
|
||||
from fvcore.common.benchmark import benchmark
|
||||
from tests.test_cameras import TestCamerasCommon
|
||||
|
||||
|
||||
def _setUp():
|
||||
case_grid = {
|
||||
"cam_type": [
|
||||
"OpenGLOrthographicCameras",
|
||||
"OpenGLPerspectiveCameras",
|
||||
"SfMOrthographicCameras",
|
||||
"SfMPerspectiveCameras",
|
||||
"FoVOrthographicCameras",
|
||||
"FoVPerspectiveCameras",
|
||||
"OrthographicCameras",
|
||||
"PerspectiveCameras",
|
||||
"FishEyeCameras",
|
||||
],
|
||||
"batch_size": [1, 10],
|
||||
"num_points": [10, 100],
|
||||
"device": ["cpu", "cuda:0"],
|
||||
}
|
||||
test_cases = itertools.product(*case_grid.values())
|
||||
kwargs_list = [dict(zip(case_grid.keys(), case)) for case in test_cases]
|
||||
return kwargs_list
|
||||
|
||||
|
||||
def _bm_cameras_project() -> None:
|
||||
kwargs_list = _setUp()
|
||||
benchmark(
|
||||
TestCamerasCommon.transform_points,
|
||||
"TEST_TRANSFORM_POINTS",
|
||||
kwargs_list,
|
||||
)
|
||||
|
||||
|
||||
def _bm_cameras_unproject() -> None:
|
||||
kwargs_list = _setUp()
|
||||
benchmark(
|
||||
TestCamerasCommon.unproject_points,
|
||||
"TEST_UNPROJECT_POINTS",
|
||||
kwargs_list,
|
||||
)
|
||||
|
||||
|
||||
def bm_cameras() -> None:
|
||||
_bm_cameras_project()
|
||||
_bm_cameras_unproject()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
bm_cameras()
|
@ -36,6 +36,7 @@ import unittest
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from pytorch3d.common.datatypes import Device
|
||||
from pytorch3d.renderer.camera_utils import join_cameras_as_batch
|
||||
from pytorch3d.renderer.cameras import (
|
||||
camera_position_from_spherical_angles,
|
||||
@ -149,14 +150,17 @@ def ndc_to_screen_points_naive(points, imsize):
|
||||
|
||||
|
||||
def init_random_cameras(
|
||||
cam_type: typing.Type[CamerasBase], batch_size: int, random_z: bool = False
|
||||
cam_type: typing.Type[CamerasBase],
|
||||
batch_size: int,
|
||||
random_z: bool = False,
|
||||
device: Device = "cpu",
|
||||
):
|
||||
cam_params = {}
|
||||
T = torch.randn(batch_size, 3) * 0.03
|
||||
if not random_z:
|
||||
T[:, 2] = 4
|
||||
R = so3_exp_map(torch.randn(batch_size, 3) * 3.0)
|
||||
cam_params = {"R": R, "T": T}
|
||||
cam_params = {"R": R, "T": T, "device": device}
|
||||
if cam_type in (OpenGLPerspectiveCameras, OpenGLOrthographicCameras):
|
||||
cam_params["znear"] = torch.rand(batch_size) * 10 + 0.1
|
||||
cam_params["zfar"] = torch.rand(batch_size) * 4 + 1 + cam_params["znear"]
|
||||
@ -613,15 +617,33 @@ class TestCamerasCommon(TestCaseMixin, unittest.TestCase):
|
||||
self.assertTrue(torch.allclose(xyz_unproj, matching_xyz, atol=1e-4))
|
||||
|
||||
@staticmethod
|
||||
def unproject_points(cam_type, batch_size=50, num_points=100):
|
||||
def unproject_points(
|
||||
cam_type, batch_size=50, num_points=100, device: Device = "cpu"
|
||||
):
|
||||
"""
|
||||
Checks that an unprojection of a randomly projected point cloud
|
||||
stays the same.
|
||||
"""
|
||||
if device == "cuda":
|
||||
device = torch.device("cuda:0")
|
||||
else:
|
||||
device = torch.device("cpu")
|
||||
|
||||
str2cls = { # noqa
|
||||
"OpenGLOrthographicCameras": OpenGLOrthographicCameras,
|
||||
"OpenGLPerspectiveCameras": OpenGLPerspectiveCameras,
|
||||
"SfMOrthographicCameras": SfMOrthographicCameras,
|
||||
"SfMPerspectiveCameras": SfMPerspectiveCameras,
|
||||
"FoVOrthographicCameras": FoVOrthographicCameras,
|
||||
"FoVPerspectiveCameras": FoVPerspectiveCameras,
|
||||
"OrthographicCameras": OrthographicCameras,
|
||||
"PerspectiveCameras": PerspectiveCameras,
|
||||
"FishEyeCameras": FishEyeCameras,
|
||||
}
|
||||
|
||||
def run_cameras():
|
||||
# init the cameras
|
||||
cameras = init_random_cameras(cam_type, batch_size)
|
||||
cameras = init_random_cameras(str2cls[cam_type], batch_size, device=device)
|
||||
# xyz - the ground truth point cloud
|
||||
xyz = torch.randn(num_points, 3) * 0.3
|
||||
xyz = cameras.unproject_points(xyz, scaled_depth_input=True)
|
||||
@ -666,15 +688,33 @@ class TestCamerasCommon(TestCaseMixin, unittest.TestCase):
|
||||
self.assertClose(xyz_project_screen, xyz_project_screen_naive, atol=1e-4)
|
||||
|
||||
@staticmethod
|
||||
def transform_points(cam_type, batch_size=50, num_points=100):
|
||||
def transform_points(
|
||||
cam_type, batch_size=50, num_points=100, device: Device = "cpu"
|
||||
):
|
||||
"""
|
||||
Checks that an unprojection of a randomly projected point cloud
|
||||
stays the same.
|
||||
"""
|
||||
|
||||
if device == "cuda":
|
||||
device = torch.device("cuda:0")
|
||||
else:
|
||||
device = torch.device("cpu")
|
||||
str2cls = { # noqa
|
||||
"OpenGLOrthographicCameras": OpenGLOrthographicCameras,
|
||||
"OpenGLPerspectiveCameras": OpenGLPerspectiveCameras,
|
||||
"SfMOrthographicCameras": SfMOrthographicCameras,
|
||||
"SfMPerspectiveCameras": SfMPerspectiveCameras,
|
||||
"FoVOrthographicCameras": FoVOrthographicCameras,
|
||||
"FoVPerspectiveCameras": FoVPerspectiveCameras,
|
||||
"OrthographicCameras": OrthographicCameras,
|
||||
"PerspectiveCameras": PerspectiveCameras,
|
||||
"FishEyeCameras": FishEyeCameras,
|
||||
}
|
||||
|
||||
def run_cameras():
|
||||
# init the cameras
|
||||
cameras = init_random_cameras(cam_type, batch_size)
|
||||
cameras = init_random_cameras(str2cls[cam_type], batch_size, device=device)
|
||||
# xyz - the ground truth point cloud
|
||||
xy = torch.randn(num_points, 2) * 2.0 - 1.0
|
||||
z = torch.randn(num_points, 1) * 3.0 + 1.0
|
||||
|
Loading…
x
Reference in New Issue
Block a user