mirror of
				https://github.com/facebookresearch/pytorch3d.git
				synced 2025-11-04 18:02:14 +08:00 
			
		
		
		
	SplatterPhongShader Benchmarks
Summary: Benchmarking. We only use num_faces=2 for splatter, because as far as I can see one would never need to use more. Pose optimization and mesh optimization experiments (see next two diffs) showed that Splatter with 2 faces beats Softmax with 50 and 100 faces in terms of accuracy. Results: We're slower at 64px^2. At 128px and 256px, we're slower than Softmax+50faces, but faster than Softmax+100faces. We're also slower at 10 faces/pix, but expectation as well as results show that more then 2 faces shouldn't be necessary. See also more results in .https://fburl.com/gdoc/ttv7u7hp Reviewed By: jcjohnson Differential Revision: D36210575 fbshipit-source-id: c8de28c8a59ce5fe21a47263bd43d2757b15d123
This commit is contained in:
		
							parent
							
								
									c5a83f46ef
								
							
						
					
					
						commit
						7c25d34d22
					
				@ -16,7 +16,7 @@ def bm_blending() -> None:
 | 
			
		||||
    kwargs_list = []
 | 
			
		||||
    num_meshes = [8]
 | 
			
		||||
    image_size = [64, 128, 256]
 | 
			
		||||
    faces_per_pixel = [50, 100]
 | 
			
		||||
    faces_per_pixel = [2, 50, 100]
 | 
			
		||||
    backend = ["pytorch", "custom"]
 | 
			
		||||
    test_cases = product(num_meshes, image_size, faces_per_pixel, devices, backend)
 | 
			
		||||
 | 
			
		||||
@ -47,6 +47,28 @@ def bm_blending() -> None:
 | 
			
		||||
        warmup_iters=1,
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    kwargs_list = []
 | 
			
		||||
    faces_per_pixel = [2, 10]
 | 
			
		||||
    backend = ["pytorch"]
 | 
			
		||||
    test_cases = product(num_meshes, image_size, faces_per_pixel, devices, backend)
 | 
			
		||||
    for case in test_cases:
 | 
			
		||||
        n, s, k, d, b = case
 | 
			
		||||
        kwargs_list.append(
 | 
			
		||||
            {
 | 
			
		||||
                "num_meshes": n,
 | 
			
		||||
                "image_size": s,
 | 
			
		||||
                "faces_per_pixel": k,
 | 
			
		||||
                "device": d,
 | 
			
		||||
                "backend": b,
 | 
			
		||||
            }
 | 
			
		||||
        )
 | 
			
		||||
    benchmark(
 | 
			
		||||
        TestBlending.bm_splatter_blending,
 | 
			
		||||
        "SPLATTER_BLENDING_PYTORCH",
 | 
			
		||||
        kwargs_list,
 | 
			
		||||
        warmup_iters=1,
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    bm_blending()
 | 
			
		||||
 | 
			
		||||
@ -14,7 +14,9 @@ from pytorch3d.renderer.blending import (
 | 
			
		||||
    sigmoid_alpha_blend,
 | 
			
		||||
    softmax_rgb_blend,
 | 
			
		||||
)
 | 
			
		||||
from pytorch3d.renderer.cameras import FoVPerspectiveCameras
 | 
			
		||||
from pytorch3d.renderer.mesh.rasterizer import Fragments
 | 
			
		||||
from pytorch3d.renderer.splatter_blend import SplatterBlender
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def sigmoid_blend_naive_loop(colors, fragments, blend_params):
 | 
			
		||||
@ -412,6 +414,54 @@ class TestBlending(TestCaseMixin, unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
        return fn
 | 
			
		||||
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def bm_splatter_blending(
 | 
			
		||||
        num_meshes: int = 16,
 | 
			
		||||
        image_size: int = 128,
 | 
			
		||||
        faces_per_pixel: int = 2,
 | 
			
		||||
        use_jit: bool = False,
 | 
			
		||||
        device: str = "cpu",
 | 
			
		||||
        backend: str = "pytorch",
 | 
			
		||||
    ):
 | 
			
		||||
        if torch.cuda.is_available() and "cuda:" in device:
 | 
			
		||||
            # If a device other than the default is used, set the device explicity.
 | 
			
		||||
            torch.cuda.set_device(device)
 | 
			
		||||
 | 
			
		||||
        device = torch.device(device)
 | 
			
		||||
        torch.manual_seed(231)
 | 
			
		||||
 | 
			
		||||
        # Create dummy outputs of rasterization
 | 
			
		||||
        N, S, K = num_meshes, image_size, faces_per_pixel
 | 
			
		||||
        F = 32  # num faces in the mesh
 | 
			
		||||
 | 
			
		||||
        pixel_coords_camera = torch.randn(
 | 
			
		||||
            (N, S, S, K, 3), device=device, requires_grad=True
 | 
			
		||||
        )
 | 
			
		||||
        cameras = FoVPerspectiveCameras(device=device)
 | 
			
		||||
        colors = torch.randn((N, S, S, K, 3), device=device)
 | 
			
		||||
        background_mask = torch.randint(
 | 
			
		||||
            low=-1, high=F + 1, size=(N, S, S, K), device=device
 | 
			
		||||
        )
 | 
			
		||||
        background_mask = torch.full((N, S, S, K), False, dtype=bool, device=device)
 | 
			
		||||
        blend_params = BlendParams(sigma=0.5)
 | 
			
		||||
 | 
			
		||||
        torch.cuda.synchronize()
 | 
			
		||||
        splatter_blender = SplatterBlender((N, S, S, K), colors.device)
 | 
			
		||||
 | 
			
		||||
        def fn():
 | 
			
		||||
            # test forward and backward pass
 | 
			
		||||
            images = splatter_blender(
 | 
			
		||||
                colors,
 | 
			
		||||
                pixel_coords_camera,
 | 
			
		||||
                cameras,
 | 
			
		||||
                background_mask,
 | 
			
		||||
                blend_params,
 | 
			
		||||
            )
 | 
			
		||||
            images.sum().backward()
 | 
			
		||||
            torch.cuda.synchronize()
 | 
			
		||||
 | 
			
		||||
        return fn
 | 
			
		||||
 | 
			
		||||
    def test_blend_params(self):
 | 
			
		||||
        """Test color parameter of BlendParams().
 | 
			
		||||
        Assert passed value overrides default value.
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user