mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-12-21 06:40:35 +08:00
Cuda updates
Summary: Updates to: - enable cuda kernel launches on any GPU (not just the default) - cuda and contiguous checks for all kernels - checks to ensure all tensors are on the same device - error reporting in the cuda kernels - cuda tests now run on a random device not just the default Reviewed By: jcjohnson, gkioxari Differential Revision: D21215280 fbshipit-source-id: 1bedc9fe6c35e9e920bdc4d78ed12865b1005519
This commit is contained in:
committed by
Facebook GitHub Bot
parent
c9267ab7af
commit
c3d636dc8c
@@ -20,6 +20,18 @@ def load_rgb_image(filename: str, data_dir: Union[str, Path]):
|
||||
TensorOrArray = Union[torch.Tensor, np.ndarray]
|
||||
|
||||
|
||||
def get_random_cuda_device() -> str:
|
||||
"""
|
||||
Function to get a random GPU device from the
|
||||
available devices. This is useful for testing
|
||||
that custom cuda kernels can support inputs on
|
||||
any device without having to set the device explicitly.
|
||||
"""
|
||||
num_devices = torch.cuda.device_count()
|
||||
rand_device_id = torch.randint(high=num_devices, size=(1,)).item()
|
||||
return "cuda:%d" % rand_device_id
|
||||
|
||||
|
||||
class TestCaseMixin(unittest.TestCase):
|
||||
def assertSeparate(self, tensor1, tensor2) -> None:
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user