mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-08-02 20:02:49 +08:00
Summary: C++/CUDA implementation of forward and backward passes for the sigmoid alpha blending function. This is slightly faster than the vectorized implementation in Python, but more importantly uses less memory due to fewer tensors being created. Reviewed By: gkioxari Differential Revision: D19980671 fbshipit-source-id: 0779055d2c68b1f20fb0870e60046077ef4613ff
45 lines
1.1 KiB
Python
45 lines
1.1 KiB
Python
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
|
|
|
|
|
from itertools import product
|
|
|
|
from fvcore.common.benchmark import benchmark
|
|
from test_blending import TestBlending
|
|
|
|
|
|
def bm_blending() -> None:
|
|
devices = ["cuda"]
|
|
kwargs_list = []
|
|
num_meshes = [8]
|
|
image_size = [64, 128, 256]
|
|
faces_per_pixel = [50, 100]
|
|
backend = ["pytorch", "custom"]
|
|
test_cases = product(num_meshes, image_size, faces_per_pixel, devices, backend)
|
|
|
|
for case in test_cases:
|
|
n, s, k, d, b = case
|
|
kwargs_list.append(
|
|
{
|
|
"num_meshes": n,
|
|
"image_size": s,
|
|
"faces_per_pixel": k,
|
|
"device": d,
|
|
"backend": b,
|
|
}
|
|
)
|
|
|
|
benchmark(
|
|
TestBlending.bm_sigmoid_alpha_blending,
|
|
"SIGMOID_ALPHA_BLENDING_PYTORCH",
|
|
kwargs_list,
|
|
warmup_iters=1,
|
|
)
|
|
|
|
kwargs_list = [case for case in kwargs_list if case["backend"] == "pytorch"]
|
|
benchmark(
|
|
TestBlending.bm_softmax_blending,
|
|
"SOFTMAX_BLENDING_PYTORCH",
|
|
kwargs_list,
|
|
warmup_iters=1,
|
|
)
|