pytorch3d/tests/common_testing.py
Nikhila Ravi cf84dacf2e fix get cuda device test error
Summary:
Cuda test failing on circle with the error `random_ expects 'from' to be less than 'to', but got from=0 >= to=0`

This is because the `high` value in `torch.randint` is 1 more than the highest value in the distribution from which a value is drawn. So if there is only 1 cuda device available then the low and high are 0.

Reviewed By: gkioxari

Differential Revision: D21236669

fbshipit-source-id: 46c312d431c474f1f2c50747b1d5e7afbd7df3a9
2020-04-24 16:12:49 -07:00

145 lines
5.2 KiB
Python

# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
from pathlib import Path
from typing import Callable, Optional, Union
import numpy as np
import torch
from PIL import Image
def load_rgb_image(filename: str, data_dir: Union[str, Path]):
filepath = data_dir / filename
with Image.open(filepath) as raw_image:
image = torch.from_numpy(np.array(raw_image) / 255.0)
image = image.to(dtype=torch.float32)
return image[..., :3]
TensorOrArray = Union[torch.Tensor, np.ndarray]
def get_random_cuda_device() -> str:
"""
Function to get a random GPU device from the
available devices. This is useful for testing
that custom cuda kernels can support inputs on
any device without having to set the device explicitly.
"""
num_devices = torch.cuda.device_count()
device_id = (
torch.randint(high=num_devices, size=(1,)).item() if num_devices > 1 else 0
)
return "cuda:%d" % device_id
class TestCaseMixin(unittest.TestCase):
def assertSeparate(self, tensor1, tensor2) -> None:
"""
Verify that tensor1 and tensor2 have their data in distinct locations.
"""
self.assertNotEqual(tensor1.storage().data_ptr(), tensor2.storage().data_ptr())
def assertNotSeparate(self, tensor1, tensor2) -> None:
"""
Verify that tensor1 and tensor2 have their data in the same locations.
"""
self.assertEqual(tensor1.storage().data_ptr(), tensor2.storage().data_ptr())
def assertAllSeparate(self, tensor_list) -> None:
"""
Verify that all tensors in tensor_list have their data in
distinct locations.
"""
ptrs = [i.storage().data_ptr() for i in tensor_list]
self.assertCountEqual(ptrs, set(ptrs))
def assertNormsClose(
self,
input: TensorOrArray,
other: TensorOrArray,
norm_fn: Callable[[TensorOrArray], TensorOrArray],
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
msg: Optional[str] = None,
) -> None:
"""
Verifies that two tensors or arrays have the same shape and are close
given absolute and relative tolerance; raises AssertionError otherwise.
A custom norm function is computed before comparison. If no such pre-
processing needed, pass `torch.abs` or, equivalently, call `assertClose`.
Args:
input, other: two tensors or two arrays.
norm_fn: The function evaluates
`all(norm_fn(input - other) <= atol + rtol * norm_fn(other))`.
norm_fn is a tensor -> tensor function; the output has:
* all entries non-negative,
* shape defined by the input shape only.
rtol, atol, equal_nan: as for torch.allclose.
msg: message in case the assertion is violated.
Note:
Optional arguments here are all keyword-only, to avoid confusion
with msg arguments on other assert functions.
"""
self.assertEqual(np.shape(input), np.shape(other))
diff = norm_fn(input - other)
other_ = norm_fn(other)
# We want to generalise allclose(input, output), which is essentially
# all(diff <= atol + rtol * other)
# but with a sophisticated handling non-finite values.
# We work that around by calling allclose() with the following arguments:
# allclose(diff + other_, other_). This computes what we want because
# all(|diff + other_ - other_| <= atol + rtol * |other_|) ==
# all(|norm_fn(input - other)| <= atol + rtol * |norm_fn(other)|) ==
# all(norm_fn(input - other) <= atol + rtol * norm_fn(other)).
backend = torch if torch.is_tensor(input) else np
close = backend.allclose(
diff + other_, other_, rtol=rtol, atol=atol, equal_nan=equal_nan
)
self.assertTrue(close, msg)
def assertClose(
self,
input: TensorOrArray,
other: TensorOrArray,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
msg: Optional[str] = None,
) -> None:
"""
Verifies that two tensors or arrays have the same shape and are close
given absolute and relative tolerance, i.e. checks
`all(|input - other| <= atol + rtol * |other|)`;
raises AssertionError otherwise.
Args:
input, other: two tensors or two arrays.
rtol, atol, equal_nan: as for torch.allclose.
msg: message in case the assertion is violated.
Note:
Optional arguments here are all keyword-only, to avoid confusion
with msg arguments on other assert functions.
"""
self.assertEqual(np.shape(input), np.shape(other))
backend = torch if torch.is_tensor(input) else np
close = backend.allclose(
input, other, rtol=rtol, atol=atol, equal_nan=equal_nan
)
if not close and msg is None:
max_diff = backend.abs(input - other).max()
self.fail(f"Not close. max diff {max_diff}.")
self.assertTrue(close, msg)