mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-08-02 03:42:50 +08:00
Summary: This diff integrates the pulsar renderer source code into PyTorch3D as an alternative backend for the PyTorch3D point renderer. This diff is the first of a series of three diffs to complete that migration and focuses on the packaging and integration of the source code. For more information about the pulsar backend, see the release notes and the paper (https://arxiv.org/abs/2004.07484). For information on how to use the backend, see the point cloud rendering notebook and the examples in the folder `docs/examples`. Tasks addressed in the following diffs: * Add the PyTorch3D interface, * Add notebook examples and documentation (or adapt the existing ones to feature both interfaces). Reviewed By: nikhilaravi Differential Revision: D23947736 fbshipit-source-id: a5e77b53e6750334db22aefa89b4c079cda1b443
81 lines
2.6 KiB
Python
81 lines
2.6 KiB
Python
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
|
|
|
from itertools import product
|
|
|
|
import torch
|
|
from fvcore.common.benchmark import benchmark
|
|
from pytorch3d.ops.interp_face_attrs import (
|
|
interpolate_face_attributes,
|
|
interpolate_face_attributes_python,
|
|
)
|
|
|
|
|
|
def _generate_data(N, S, K, F, D, device, requires_grad=False):
|
|
pix_to_face = torch.randint(-10, F, (N, S, S, K), device=device)
|
|
barycentric_coords = torch.randn(
|
|
N, S, S, K, 3, device=device, requires_grad=requires_grad
|
|
)
|
|
face_attrs = torch.randn(F, 3, D, device=device, requires_grad=requires_grad)
|
|
grad_pix_attrs = torch.randn(N, S, S, K, D, device=device)
|
|
return pix_to_face, barycentric_coords, face_attrs, grad_pix_attrs
|
|
|
|
|
|
def _bm_forward(N, S, F, K, D, impl):
|
|
# The runtime depends on the values of pix_to_face. So for proper
|
|
# benchmarking we should probably take the average of multiple
|
|
# values of pix to face. But this doesn't easily fit into fvcore
|
|
# benchmarking, so instead we'll just set a manual seed to make sure
|
|
# that different impls will use the same data.
|
|
torch.manual_seed(0)
|
|
device = torch.device("cuda")
|
|
data = _generate_data(N, S, K, F, D, device, requires_grad=False)
|
|
args = data[:3]
|
|
torch.cuda.synchronize()
|
|
if impl == "cuda":
|
|
fun = interpolate_face_attributes
|
|
elif impl == "python":
|
|
fun = interpolate_face_attributes_python
|
|
return lambda: fun(*args)
|
|
|
|
|
|
def _bm_forward_backward(N, S, F, K, D, impl):
|
|
torch.manual_seed(0)
|
|
device = torch.device("cuda")
|
|
data = _generate_data(N, S, K, F, D, device, requires_grad=True)
|
|
args, grad = data[:3], data[3]
|
|
torch.cuda.synchronize()
|
|
if impl == "cuda":
|
|
fun = interpolate_face_attributes
|
|
elif impl == "python":
|
|
fun = interpolate_face_attributes_python
|
|
|
|
def run():
|
|
out = fun(*args)
|
|
out.backward(gradient=grad)
|
|
|
|
return run
|
|
|
|
|
|
def bm_interpolate_face_attribues() -> None:
|
|
# For now only benchmark on GPU
|
|
if not torch.cuda.is_available():
|
|
return
|
|
|
|
Ns = [1, 4]
|
|
Ss = [128]
|
|
Ks = [1, 10, 40]
|
|
Fs = [5000]
|
|
Ds = [1, 3, 16]
|
|
impls = ["python", "cuda"]
|
|
test_cases = product(Ns, Ss, Ks, Fs, Ds, impls)
|
|
kwargs_list = []
|
|
for case in test_cases:
|
|
N, S, K, F, D, impl = case
|
|
kwargs_list.append({"N": N, "S": S, "K": K, "F": F, "D": D, "impl": impl})
|
|
benchmark(_bm_forward, "FORWARD", kwargs_list, warmup_iters=3)
|
|
benchmark(_bm_forward_backward, "FORWARD+BACKWARD", kwargs_list, warmup_iters=3)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
bm_interpolate_face_attribues()
|