Convert directory fbcode/vision to use the Ruff Formatter

Summary:
Converts the directory specified to use the Ruff formatter in pyfmt

ruff_dog

If this diff causes merge conflicts when rebasing, please run
`hg status -n -0 --change . -I '**/*.{py,pyi}' | xargs -0 arc pyfmt`
on your diff, and amend any changes before rebasing onto latest.
That should help reduce or eliminate any merge conflicts.

allow-large-files

Reviewed By: bottler

Differential Revision: D66472063

fbshipit-source-id: 35841cb397e4f8e066e2159550d2f56b403b1bef
This commit is contained in:
Thomas Polasek 2024-11-26 02:38:20 -08:00 committed by Facebook GitHub Bot
parent f6c2ca6bfc
commit 055ab3a2e3
92 changed files with 121 additions and 191 deletions

View File

@ -88,7 +88,6 @@ def workflow_pair(
upload=False,
filter_branch,
):
w = []
py = python_version.replace(".", "")
pyt = pytorch_version.replace(".", "")
@ -127,7 +126,6 @@ def generate_base_workflow(
btype,
filter_branch=None,
):
d = {
"name": base_workflow_name,
"python_version": python_version,

View File

@ -10,6 +10,7 @@ This example demonstrates the most trivial, direct interface of the pulsar
sphere renderer. It renders and saves an image with 10 random spheres.
Output: basic.png.
"""
import logging
import math
from os import path

View File

@ -11,6 +11,7 @@ interface for sphere renderering. It renders and saves an image with
10 random spheres.
Output: basic-pt3d.png.
"""
import logging
from os import path

View File

@ -14,6 +14,7 @@ distorted. Gradient-based optimization is used to converge towards the
original camera parameters.
Output: cam.gif.
"""
import logging
import math
from os import path

View File

@ -14,6 +14,7 @@ distorted. Gradient-based optimization is used to converge towards the
original camera parameters.
Output: cam-pt3d.gif
"""
import logging
from os import path

View File

@ -18,6 +18,7 @@ This example is not available yet through the 'unified' interface,
because opacity support has not landed in PyTorch3D for general data
structures yet.
"""
import logging
import math
from os import path

View File

@ -13,6 +13,7 @@ The scene is initialized with random spheres. Gradient-based
optimization is used to converge towards a faithful
scene representation.
"""
import logging
import math

View File

@ -13,6 +13,7 @@ The scene is initialized with random spheres. Gradient-based
optimization is used to converge towards a faithful
scene representation.
"""
import logging
import math

View File

@ -7,7 +7,7 @@
# pyre-unsafe
""""
""" "
This file is the entry point for launching experiments with Implicitron.
Launch Training
@ -44,6 +44,7 @@ The outputs of the experiment are saved and logged in multiple ways:
config file.
"""
import logging
import os
import warnings

View File

@ -26,7 +26,6 @@ logger = logging.getLogger(__name__)
class ModelFactoryBase(ReplaceableBase):
resume: bool = True # resume from the last checkpoint
def __call__(self, **kwargs) -> ImplicitronModelBase:

View File

@ -161,7 +161,6 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
for epoch in range(start_epoch, self.max_epochs):
# automatic new_epoch and plotting of stats at every epoch start
with stats:
# Make sure to re-seed random generators to ensure reproducibility
# even after restart.
seed_all_random_engines(seed + epoch)

View File

@ -53,12 +53,8 @@ class TestExperiment(unittest.TestCase):
cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_class_type = (
"JsonIndexDatasetMapProvider"
)
dataset_args = (
cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
)
dataloader_args = (
cfg.data_source_ImplicitronDataSource_args.data_loader_map_provider_SequenceDataLoaderMapProvider_args
)
dataset_args = cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
dataloader_args = cfg.data_source_ImplicitronDataSource_args.data_loader_map_provider_SequenceDataLoaderMapProvider_args
dataset_args.category = "skateboard"
dataset_args.test_restrict_sequence_id = 0
dataset_args.dataset_root = "manifold://co3d/tree/extracted"
@ -94,12 +90,8 @@ class TestExperiment(unittest.TestCase):
cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_class_type = (
"JsonIndexDatasetMapProvider"
)
dataset_args = (
cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
)
dataloader_args = (
cfg.data_source_ImplicitronDataSource_args.data_loader_map_provider_SequenceDataLoaderMapProvider_args
)
dataset_args = cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
dataloader_args = cfg.data_source_ImplicitronDataSource_args.data_loader_map_provider_SequenceDataLoaderMapProvider_args
dataset_args.category = "skateboard"
dataset_args.test_restrict_sequence_id = 0
dataset_args.dataset_root = "manifold://co3d/tree/extracted"
@ -111,9 +103,7 @@ class TestExperiment(unittest.TestCase):
cfg.training_loop_ImplicitronTrainingLoop_args.max_epochs = 2
cfg.training_loop_ImplicitronTrainingLoop_args.store_checkpoints = False
cfg.optimizer_factory_ImplicitronOptimizerFactory_args.lr_policy = "Exponential"
cfg.optimizer_factory_ImplicitronOptimizerFactory_args.exponential_lr_step_size = (
2
)
cfg.optimizer_factory_ImplicitronOptimizerFactory_args.exponential_lr_step_size = 2
if DEBUG:
experiment.dump_cfg(cfg)

View File

@ -81,8 +81,9 @@ class TestOptimizerFactory(unittest.TestCase):
def test_param_overrides_self_param_group_assignment(self):
pa, pb, pc = [torch.nn.Parameter(data=torch.tensor(i * 1.0)) for i in range(3)]
na, nb = Node(params=[pa]), Node(
params=[pb], param_groups={"self": "pb_self", "p1": "pb_param"}
na, nb = (
Node(params=[pa]),
Node(params=[pb], param_groups={"self": "pb_self", "p1": "pb_param"}),
)
root = Node(children=[na, nb], params=[pc], param_groups={"m1": "pb_member"})
param_groups = self._get_param_groups(root)

View File

@ -194,7 +194,6 @@ class Stats:
it = self.it[stat_set]
for stat in self.log_vars:
if stat not in self.stats[stat_set]:
self.stats[stat_set][stat] = AverageMeter()

View File

@ -24,7 +24,6 @@ CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs"
@hydra.main(config_path=CONFIG_DIR, config_name="lego")
def main(cfg: DictConfig):
# Device on which to run.
if torch.cuda.is_available():
device = "cuda"

View File

@ -42,7 +42,6 @@ class TestRaysampler(unittest.TestCase):
cameras, rays = [], []
for _ in range(batch_size):
R = random_rotations(1)
T = torch.randn(1, 3)
focal_length = torch.rand(1, 2) + 0.5

View File

@ -25,7 +25,6 @@ CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs"
@hydra.main(config_path=CONFIG_DIR, config_name="lego")
def main(cfg: DictConfig):
# Set the relevant seeds for reproducibility.
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
@ -219,7 +218,6 @@ def main(cfg: DictConfig):
# Validation
if epoch % cfg.validation_epoch_interval == 0 and epoch > 0:
# Sample a validation camera/image.
val_batch = next(val_dataloader.__iter__())
val_image, val_camera, camera_idx = val_batch[0].values()

View File

@ -17,7 +17,7 @@ Some functions which depend on PyTorch or Python versions.
def meshgrid_ij(
*A: Union[torch.Tensor, Sequence[torch.Tensor]]
*A: Union[torch.Tensor, Sequence[torch.Tensor]],
) -> Tuple[torch.Tensor, ...]: # pragma: no cover
"""
Like torch.meshgrid was before PyTorch 1.10.0, i.e. with indexing set to ij

View File

@ -83,7 +83,7 @@ class ShapeNetCore(ShapeNetBase): # pragma: no cover
):
synset_set.add(synset)
elif (synset in self.synset_inv.keys()) and (
(path.isdir(path.join(data_dir, self.synset_inv[synset])))
path.isdir(path.join(data_dir, self.synset_inv[synset]))
):
synset_set.add(self.synset_inv[synset])
else:

View File

@ -36,7 +36,6 @@ def collate_batched_meshes(batch: List[Dict]): # pragma: no cover
collated_dict["mesh"] = None
if {"verts", "faces"}.issubset(collated_dict.keys()):
textures = None
if "textures" in collated_dict:
textures = TexturesAtlas(atlas=collated_dict["textures"])

View File

@ -222,7 +222,6 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase):
self.dataset_map = dataset_map
def _load_category(self, category: str) -> DatasetMap:
frame_file = os.path.join(self.dataset_root, category, "frame_annotations.jgz")
sequence_file = os.path.join(
self.dataset_root, category, "sequence_annotations.jgz"

View File

@ -75,7 +75,6 @@ def _minify(basedir, path_manager, factors=(), resolutions=()):
def _load_data(
basedir, factor=None, width=None, height=None, load_imgs=True, path_manager=None
):
poses_arr = np.load(
_local_path(path_manager, os.path.join(basedir, "poses_bounds.npy"))
)
@ -164,7 +163,6 @@ def ptstocam(pts, c2w):
def poses_avg(poses):
hwf = poses[0, :3, -1:]
center = poses[:, :3, 3].mean(0)
@ -192,7 +190,6 @@ def render_path_spiral(c2w, up, rads, focal, zdelta, zrate, rots, N):
def recenter_poses(poses):
poses_ = poses + 0
bottom = np.reshape([0, 0, 0, 1.0], [1, 4])
c2w = poses_avg(poses)
@ -256,7 +253,6 @@ def spherify_poses(poses, bds):
new_poses = []
for th in np.linspace(0.0, 2.0 * np.pi, 120):
camorigin = np.array([radcircle * np.cos(th), radcircle * np.sin(th), zh])
up = np.array([0, 0, -1.0])
@ -311,7 +307,6 @@ def load_llff_data(
path_zflat=False,
path_manager=None,
):
poses, bds, imgs = _load_data(
basedir, factor=factor, path_manager=path_manager
) # factor=8 downsamples original imgs by 8x

View File

@ -201,7 +201,6 @@ def resize_image(
image_width: Optional[int],
mode: str = "bilinear",
) -> Tuple[torch.Tensor, float, torch.Tensor]:
if isinstance(image, np.ndarray):
image = torch.from_numpy(image)

View File

@ -299,7 +299,6 @@ def eval_batch(
)
for loss_fg_mask, name_postfix in zip((mask_crop, mask_fg), ("_masked", "_fg")):
loss_mask_now = mask_crop * loss_fg_mask
for rgb_metric_name, rgb_metric_fun in zip(

View File

@ -306,7 +306,6 @@ class SRNRaymarchHyperNet(Configurable, torch.nn.Module):
global_code=None,
**kwargs,
):
if global_code is None:
raise ValueError("SRN Hypernetwork requires a non-trivial global code.")

View File

@ -40,7 +40,6 @@ def create_embeddings_for_implicit_function(
xyz_embedding_function: Optional[Callable],
diag_cov: Optional[torch.Tensor] = None,
) -> torch.Tensor:
bs, *spatial_size, pts_per_ray, _ = xyz_world.shape
if xyz_in_camera_coords:
@ -64,7 +63,6 @@ def create_embeddings_for_implicit_function(
0,
)
else:
embeds = xyz_embedding_function(ray_points_for_embed, diag_cov=diag_cov)
embeds = embeds.reshape(
bs,

View File

@ -220,8 +220,7 @@ class SignedDistanceFunctionRenderer(BaseRenderer, torch.nn.Module):
)
mask_full.view(-1, 1)[~surface_mask] = torch.sigmoid(
# pyre-fixme[6]: For 1st param expected `Tensor` but got `float`.
-self.soft_mask_alpha
* sdf_output[~surface_mask]
-self.soft_mask_alpha * sdf_output[~surface_mask]
)
# scatter points with surface_mask

View File

@ -21,7 +21,6 @@ def cleanup_eval_depth(
sigma: float = 0.01,
image=None,
):
ba, _, H, W = depth.shape
pcl = point_cloud.points_padded()

View File

@ -100,7 +100,6 @@ def render_point_cloud_pytorch3d(
bin_size: Optional[int] = None,
**kwargs,
):
# feature dimension
featdim = point_cloud.features_packed().shape[-1]

View File

@ -37,7 +37,6 @@ class AverageMeter:
self.count = 0
def update(self, val, n=1, epoch=0):
# make sure the history is of the same len as epoch
while len(self.history) <= epoch:
self.history.append([])
@ -115,7 +114,6 @@ class Stats:
visdom_server="http://localhost",
visdom_port=8097,
):
self.log_vars = log_vars
self.visdom_env = visdom_env
self.visdom_server = visdom_server
@ -202,7 +200,6 @@ class Stats:
self.log_vars.append(add_log_var)
def update(self, preds, time_start=None, freeze_iter=False, stat_set="train"):
if self.epoch == -1: # uninitialized
logger.warning(
"epoch==-1 means uninitialized stats structure -> new_epoch() called"
@ -219,7 +216,6 @@ class Stats:
epoch = self.epoch
for stat in self.log_vars:
if stat not in self.stats[stat_set]:
self.stats[stat_set][stat] = AverageMeter()
@ -248,7 +244,6 @@ class Stats:
self.stats[stat_set][stat].update(val, epoch=epoch, n=1)
def get_epoch_averages(self, epoch=None):
stat_sets = list(self.stats.keys())
if epoch is None:
@ -345,7 +340,6 @@ class Stats:
def plot_stats(
self, visdom_env=None, plot_file=None, visdom_server=None, visdom_port=None
):
# use the cached visdom env if none supplied
if visdom_env is None:
visdom_env = self.visdom_env
@ -449,7 +443,6 @@ class Stats:
warnings.warn("Cant dump stats due to insufficient permissions!")
def synchronize_logged_vars(self, log_vars, default_val=float("NaN")):
stat_sets = list(self.stats.keys())
# remove the additional log_vars
@ -490,11 +483,12 @@ class Stats:
for ep in range(lastep):
self.stats[stat_set][stat].update(default_val, n=1, epoch=ep)
epoch_generated = self.stats[stat_set][stat].get_epoch()
assert (
epoch_generated == self.epoch + 1
), "bad epoch of synchronized log_var! %d vs %d" % (
self.epoch + 1,
epoch_generated,
assert epoch_generated == self.epoch + 1, (
"bad epoch of synchronized log_var! %d vs %d"
% (
self.epoch + 1,
epoch_generated,
)
)

View File

@ -7,6 +7,7 @@
# pyre-unsafe
"""This module implements utility functions for loading .mtl files and textures."""
import os
import warnings
from typing import Dict, List, Optional, Tuple

View File

@ -8,6 +8,7 @@
"""This module implements utility functions for loading and saving meshes."""
import os
import warnings
from collections import namedtuple
@ -813,7 +814,6 @@ def _save(
save_texture: bool = False,
save_normals: bool = False,
) -> None:
if len(verts) and (verts.dim() != 2 or verts.size(1) != 3):
message = "'verts' should either be empty or of shape (num_verts, 3)."
raise ValueError(message)

View File

@ -14,6 +14,7 @@ meshes as .off files.
This format is introduced, for example, at
http://www.geomview.org/docs/html/OFF.html .
"""
import warnings
from typing import cast, Optional, Tuple, Union

View File

@ -11,6 +11,7 @@
This module implements utility functions for loading and saving
meshes and point clouds as PLY files.
"""
import itertools
import os
import struct

View File

@ -62,7 +62,7 @@ def cubify(
*,
feats: Optional[torch.Tensor] = None,
device=None,
align: str = "topleft"
align: str = "topleft",
) -> Meshes:
r"""
Converts a voxel to a mesh by replacing each occupied voxel with a cube

View File

@ -85,7 +85,6 @@ class _points_to_volumes_function(Function):
align_corners: bool,
splat: bool,
):
ctx.mark_dirty(volume_densities, volume_features)
N, P, D = points_3d.shape
@ -497,7 +496,6 @@ def _check_points_to_volumes_inputs(
grid_sizes: torch.LongTensor,
mask: Optional[torch.Tensor] = None,
) -> None:
max_grid_size = grid_sizes.max(dim=0).values
if torch.prod(max_grid_size) > volume_densities.shape[1]:
raise ValueError(

View File

@ -11,6 +11,7 @@
This module implements utility functions for sampling points from
batches of meshes.
"""
import sys
from typing import Tuple, Union

View File

@ -110,30 +110,32 @@ def _pulsar_from_opencv_projection(
# Validate parameters.
image_size_wh = image_size.to(R).flip(dims=(1,))
assert torch.all(
image_size_wh > 0
), "height and width must be positive but min is: %s" % (
str(image_size_wh.min().item())
assert torch.all(image_size_wh > 0), (
"height and width must be positive but min is: %s"
% (str(image_size_wh.min().item()))
)
assert (
camera_matrix.size(1) == 3 and camera_matrix.size(2) == 3
), "Incorrect camera matrix shape: expected 3x3 but got %dx%d" % (
camera_matrix.size(1),
camera_matrix.size(2),
assert camera_matrix.size(1) == 3 and camera_matrix.size(2) == 3, (
"Incorrect camera matrix shape: expected 3x3 but got %dx%d"
% (
camera_matrix.size(1),
camera_matrix.size(2),
)
)
assert (
R.size(1) == 3 and R.size(2) == 3
), "Incorrect R shape: expected 3x3 but got %dx%d" % (
R.size(1),
R.size(2),
assert R.size(1) == 3 and R.size(2) == 3, (
"Incorrect R shape: expected 3x3 but got %dx%d"
% (
R.size(1),
R.size(2),
)
)
if len(tvec.size()) == 2:
tvec = tvec.unsqueeze(2)
assert (
tvec.size(1) == 3 and tvec.size(2) == 1
), "Incorrect tvec shape: expected 3x1 but got %dx%d" % (
tvec.size(1),
tvec.size(2),
assert tvec.size(1) == 3 and tvec.size(2) == 1, (
"Incorrect tvec shape: expected 3x1 but got %dx%d"
% (
tvec.size(1),
tvec.size(2),
)
)
# Check batch size.
batch_size = camera_matrix.size(0)
@ -141,11 +143,12 @@ def _pulsar_from_opencv_projection(
batch_size,
R.size(0),
)
assert (
tvec.size(0) == batch_size
), "Expected tvec to have batch size %d. Has size %d." % (
batch_size,
tvec.size(0),
assert tvec.size(0) == batch_size, (
"Expected tvec to have batch size %d. Has size %d."
% (
batch_size,
tvec.size(0),
)
)
# Check image sizes.
image_w = image_size_wh[0, 0]

View File

@ -1176,7 +1176,12 @@ class PerspectiveCameras(CamerasBase):
unprojection_transform = to_camera_transform.inverse()
xy_inv_depth = torch.cat(
(xy_depth[..., :2], 1.0 / xy_depth[..., 2:3]), dim=-1 # type: ignore
# pyre-fixme[6]: For 1st argument expected `Union[List[Tensor],
# tuple[Tensor, ...]]` but got `Tuple[Tensor, float]`.
# pyre-fixme[58]: `/` is not supported for operand types `float` and
# `Tensor`.
(xy_depth[..., :2], 1.0 / xy_depth[..., 2:3]),
dim=-1, # type: ignore
)
return unprojection_transform.transform_points(xy_inv_depth)

View File

@ -281,8 +281,10 @@ class FishEyeCameras(CamerasBase):
# project from camera space to image space
N = len(self.radial_params)
if not self.check_input(points, N):
msg = "Expected points of (P, 3) with batch_size 1 or N, or shape (M, P, 3) \
msg = (
"Expected points of (P, 3) with batch_size 1 or N, or shape (M, P, 3) \
with batch_size 1; got points of shape %r and batch_size %r"
)
raise ValueError(msg % (points.shape, N))
if N == 1:

View File

@ -67,7 +67,7 @@ class HeterogeneousRayBundle:
def ray_bundle_to_ray_points(
ray_bundle: Union[RayBundle, HeterogeneousRayBundle]
ray_bundle: Union[RayBundle, HeterogeneousRayBundle],
) -> torch.Tensor:
"""
Converts rays parametrized with a `ray_bundle` (an instance of the `RayBundle`

View File

@ -12,6 +12,7 @@ Proper Python support for pytorch requires creating a torch.autograd.function
(independent of whether this is being done within the C++ module). This is done
here and a torch.nn.Module is exposed for the use in more complex models.
"""
import logging
import warnings
from typing import Optional, Tuple, Union

View File

@ -133,8 +133,7 @@ def _get_splat_kernel_normalization(
epsilon = 0.05
normalization_constant = torch.exp(
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
-(offsets**2).sum(dim=1)
/ (2 * sigma**2)
-(offsets**2).sum(dim=1) / (2 * sigma**2)
).sum()
# We add an epsilon to the normalization constant to ensure the gradient will travel

View File

@ -114,7 +114,6 @@ class TensorProperties(nn.Module):
self.device = make_device(device)
self._N = 0
if kwargs is not None:
# broadcast all inputs which are float/int/list/tuple/tensor/array
# set as attributes anything else e.g. strings, bools
args_to_broadcast = {}
@ -439,7 +438,7 @@ def ndc_to_grid_sample_coords(
def parse_image_size(
image_size: Union[List[int], Tuple[int, int], int]
image_size: Union[List[int], Tuple[int, int], int],
) -> Tuple[int, int]:
"""
Args:

View File

@ -1531,7 +1531,6 @@ class Meshes:
def sample_textures(self, fragments):
if self.textures is not None:
# Check dimensions of textures match that of meshes
shape_ok = self.textures.check_shapes(self._N, self._V, self._F)
if not shape_ok:

View File

@ -1274,7 +1274,7 @@ def join_pointclouds_as_batch(pointclouds: Sequence[Pointclouds]) -> Pointclouds
def join_pointclouds_as_scene(
pointclouds: Union[Pointclouds, List[Pointclouds]]
pointclouds: Union[Pointclouds, List[Pointclouds]],
) -> Pointclouds:
"""
Joins a batch of point cloud in the form of a Pointclouds object or a list of Pointclouds

View File

@ -311,9 +311,7 @@ def plot_scene(
)
else:
msg = "Invalid number {} of viewpoint cameras were provided. Either 1 \
or {} cameras are required".format(
len(viewpoint_cameras), len(subplots)
)
or {} cameras are required".format(len(viewpoint_cameras), len(subplots))
warnings.warn(msg)
for subplot_idx in range(len(subplots)):

View File

@ -11,7 +11,6 @@ from tests.test_ball_query import TestBallQuery
def bm_ball_query() -> None:
backends = ["cpu", "cuda:0"]
kwargs_list = []

View File

@ -11,7 +11,6 @@ from tests.test_cameras_alignment import TestCamerasAlignment
def bm_cameras_alignment() -> None:
case_grid = {
"batch_size": [10, 100, 1000],
"mode": ["centers", "extrinsics"],

View File

@ -11,7 +11,6 @@ from tests.test_knn import TestKNN
def bm_knn() -> None:
backends = ["cpu", "cuda:0"]
kwargs_list = []

View File

@ -12,7 +12,6 @@ from tests.test_point_mesh_distance import TestPointMeshDistance
def bm_point_mesh_distance() -> None:
backend = ["cuda:0"]
kwargs_list = []

View File

@ -12,7 +12,6 @@ from tests.test_points_alignment import TestCorrespondingPointsAlignment, TestIC
def bm_iterative_closest_point() -> None:
case_grid = {
"batch_size": [1, 10],
"dim": [3, 20],
@ -43,7 +42,6 @@ def bm_iterative_closest_point() -> None:
def bm_corresponding_points_alignment() -> None:
case_grid = {
"allow_reflection": [True, False],
"batch_size": [1, 10, 100],

View File

@ -5,6 +5,7 @@
# LICENSE file in the root directory of this source tree.
"""Test render speed."""
import logging
import sys
from os import path

View File

@ -11,7 +11,6 @@ from tests.test_sample_pdf import TestSamplePDF
def bm_sample_pdf() -> None:
backends = ["python_cuda", "cuda", "python_cpu", "cpu"]
kwargs_list = []

View File

@ -13,7 +13,6 @@ from tests.test_sample_points_from_meshes import TestSamplePoints
def bm_sample_points() -> None:
backend = ["cpu"]
if torch.cuda.is_available():
backend.append("cuda:0")

View File

@ -576,7 +576,6 @@ class TestConfig(unittest.TestCase):
a: int = 9
for Unprocessed in [UnprocessedConfigurable, UnprocessedReplaceable]:
self.assertFalse(_is_actually_dataclass(Unprocessed))
unprocessed = Unprocessed()
self.assertTrue(_is_actually_dataclass(Unprocessed))

View File

@ -71,7 +71,6 @@ class TestEvaluation(unittest.TestCase):
for diff in 10 ** torch.linspace(-5, 0, 6):
for crop in (0, 5):
pred = gt + (torch.rand_like(gt) - 0.5) * 2 * diff
# scaled prediction test

View File

@ -74,7 +74,6 @@ class TestGenericModel(unittest.TestCase):
eval_test: bool = True,
bw_test: bool = True,
):
R, T = look_at_view_transform(azim=torch.rand(n_train_cameras) * 360)
cameras = PerspectiveCameras(R=R, T=T, device=device)

View File

@ -171,9 +171,7 @@ def _make_random_json_dataset_map_provider_v2_data(
Image.fromarray(
(mask * 255.0).astype(np.uint8),
mode="L",
).convert(
"L"
).save(mask_path)
).convert("L").save(mask_path)
fa = FrameAnnotation(
sequence_name=seq_name,

View File

@ -65,7 +65,6 @@ class TestModelVisualize(unittest.TestCase):
visdom_show_preds = Visdom().check_connection()
for load_dataset_pointcloud in [True, False]:
model = _PointcloudRenderingModel(
train_dataset,
show_sequence_name,

View File

@ -142,7 +142,6 @@ class TestViewsampling(unittest.TestCase):
expand_args_fields(ViewSampler)
for masked_sampling in (True, False):
view_sampler = ViewSampler(masked_sampling=masked_sampling)
feats_sampled, masks_sampled = view_sampler(

View File

@ -275,7 +275,6 @@ class TestVoxelGrids(TestCaseMixin, unittest.TestCase):
return torch.cat(result)
def test_interpolation(self):
with self.subTest("1D interpolation"):
points = self.get_random_normalized_points(
n_grids=4, n_points=5, dimension=1

View File

@ -6,6 +6,7 @@
# LICENSE file in the root directory of this source tree.
"""Create multiview data."""
import sys
from os import path

View File

@ -5,6 +5,7 @@
# LICENSE file in the root directory of this source tree.
"""Test number of channels."""
import logging
import sys
import unittest

View File

@ -5,6 +5,7 @@
# LICENSE file in the root directory of this source tree.
"""Test the sorting of the closest spheres."""
import logging
import os
import sys

View File

@ -5,6 +5,7 @@
# LICENSE file in the root directory of this source tree.
"""Basic rendering test."""
import logging
import os
import sys

View File

@ -5,6 +5,7 @@
# LICENSE file in the root directory of this source tree.
"""Test right hand/left hand system compatibility."""
import logging
import sys
import unittest

View File

@ -5,6 +5,7 @@
# LICENSE file in the root directory of this source tree.
"""Tests for the orthogonal projection."""
import logging
import sys
import unittest

View File

@ -6,6 +6,7 @@
# LICENSE file in the root directory of this source tree.
"""Test right hand/left hand system compatibility."""
import sys
import unittest
from os import path

View File

@ -401,7 +401,10 @@ class TestBlending(TestCaseMixin, unittest.TestCase):
dists1 = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
zbuf = torch.randn(size=(N, S, S, K), requires_grad=True, device=device)
fragments = Fragments(
pix_to_face=pix_to_face, bary_coords=empty, zbuf=zbuf, dists=dists1 # dummy
pix_to_face=pix_to_face,
bary_coords=empty,
zbuf=zbuf,
dists=dists1, # dummy
)
blend_params = BlendParams(sigma=1e-3)

View File

@ -495,8 +495,8 @@ class TestCamerasCommon(TestCaseMixin, unittest.TestCase):
screen_cam_params["image_size"] = image_size
screen_cam_params["focal_length"] = fcl * scale
screen_cam_params["principal_point"] = (
image_size[:, [1, 0]]
) / 2.0 - prc * scale
(image_size[:, [1, 0]]) / 2.0 - prc * scale
)
screen_cam_params["in_ndc"] = False
else:
raise ValueError(str(cam_type))
@ -615,7 +615,6 @@ class TestCamerasCommon(TestCaseMixin, unittest.TestCase):
OrthographicCameras,
PerspectiveCameras,
):
# init the cameras
cameras = init_random_cameras(cam_type, batch_size)
# xyz - the ground truth point cloud

View File

@ -17,7 +17,6 @@ from .common_testing import get_random_cuda_device, TestCaseMixin
class TestAccumulatePoints(TestCaseMixin, unittest.TestCase):
# NAIVE PYTHON IMPLEMENTATIONS (USED FOR TESTING)
@staticmethod
def accumulate_alphacomposite_python(points_idx, alphas, features):
@ -63,7 +62,6 @@ class TestAccumulatePoints(TestCaseMixin, unittest.TestCase):
for c in range(0, C):
for i in range(0, W):
for j in range(0, H):
for k in range(0, K):
n_idx = points_idx[b, k, j, i]
@ -237,16 +235,16 @@ class TestAccumulatePoints(TestCaseMixin, unittest.TestCase):
[
# fmt: off
[
[0, 0, 0, 0], # noqa: E241, E201
[0, 0, 0, 0], # noqa: E241, E201
[0, -1, -1, -1], # noqa: E241, E201
[0, 1, 1, 0], # noqa: E241, E201
[0, 0, 0, 0], # noqa: E241, E201
[0, 1, 1, 0], # noqa: E241, E201
[0, 0, 0, 0], # noqa: E241, E201
],
[
[2, 2, 2, 2], # noqa: E241, E201
[2, 3, 3, 2], # noqa: E241, E201
[2, 3, 3, 2], # noqa: E241, E201
[2, 2, -1, 2], # noqa: E241, E201
[2, 2, 2, 2], # noqa: E241, E201
[2, 3, 3, 2], # noqa: E241, E201
[2, 3, 3, 2], # noqa: E241, E201
[2, 2, -1, 2], # noqa: E241, E201
],
# fmt: on
]

View File

@ -95,7 +95,6 @@ class TestIoU3D(TestCaseMixin, unittest.TestCase):
return ious
def _test_iou(self, overlap_fn, device):
box1 = torch.tensor(
UNIT_BOX,
dtype=torch.float32,

View File

@ -23,7 +23,6 @@ def convert_to_local(verts, volume_dim):
class TestCubeConfiguration(TestCaseMixin, unittest.TestCase):
# Test single cubes. Each case corresponds to the corresponding
# cube vertex configuration in each case here (0-indexed):
# https://en.wikipedia.org/wiki/Marching_cubes#/media/File:MarchingCubes.svg

View File

@ -335,7 +335,6 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
Meshes(verts=verts_padded, faces=faces_padded)
def test_simple_random_meshes(self):
# Define the test mesh object either as a list or tensor of faces/verts.
for lists_to_tensors in (False, True):
N = 10
@ -1120,7 +1119,6 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
self.assertClose(face_areas, expected_areas)
def test_compute_normals(self):
# Simple case with one mesh where normals point in either +/- ijk
verts = torch.tensor(
[

View File

@ -1072,7 +1072,6 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
for with_normals in (True, False):
for run_padded in (True, False):
for run_packed in (True, False):
clouds = TestPointclouds.init_cloud(
3,
100,

View File

@ -56,7 +56,6 @@ class TestICP(TestCaseMixin, unittest.TestCase):
use_pointclouds=False,
estimate_scale=False,
):
device = torch.device("cuda:0")
# initialize a ground truth point cloud
@ -433,7 +432,6 @@ class TestCorrespondingPointsAlignment(TestCaseMixin, unittest.TestCase):
reflect=False,
random_weights=False,
):
device = torch.device("cuda:0")
# initialize a ground truth point cloud

View File

@ -258,9 +258,7 @@ class TestPointsToVolumes(TestCaseMixin, unittest.TestCase):
batch_size = 4
for volume_size in ([25, 25, 25], [30, 25, 15]):
for python, interp_mode in product([True, False], ["trilinear", "nearest"]):
(pointclouds, initial_volumes) = init_volume_boundary_pointcloud(
volume_size=volume_size,
n_points=int(1e5),

View File

@ -7,6 +7,7 @@
"""
Sanity checks for loading R2N2.
"""
import json
import os
import unittest

View File

@ -210,13 +210,11 @@ class TestRaysampling(TestCaseMixin, unittest.TestCase):
device = torch.device("cuda")
for n_pts_per_ray in (100, 1):
for raysampler_type in (
MonteCarloRaysampler,
MultinomialRaysampler,
NDCMultinomialRaysampler,
):
raysampler = TestRaysampling.init_raysampler(
raysampler_type=raysampler_type,
min_x=min_x,
@ -258,7 +256,6 @@ class TestRaysampling(TestCaseMixin, unittest.TestCase):
OrthographicCameras,
PerspectiveCameras,
):
# init a batch of random cameras
cameras = init_random_cameras(
cam_type, batch_size, random_z=True

View File

@ -215,9 +215,7 @@ class TestRenderImplicit(TestCaseMixin, unittest.TestCase):
volumetric_function=spherical_volumetric_function,
sphere_centroid=sphere_centroid,
sphere_diameter=sphere_diameter,
)[
0
]
)[0]
# check that the renderer does not erase gradients
loss = images_opacities.sum()

View File

@ -8,6 +8,7 @@
"""
Sanity checks for output images from the renderer.
"""
import os
import unittest
from collections import namedtuple

View File

@ -12,6 +12,7 @@ behind the image plane. These faces are clipped and then rasterized.
See pytorch3d/renderer/mesh/clip.py for more details about the
clipping process.
"""
import unittest
import imageio

View File

@ -134,7 +134,6 @@ class TestRenderMeshesMultiGPU(TestCaseMixin, unittest.TestCase):
self.renderer = self.init_render(device)
def init_render(self, device):
cameras = FoVPerspectiveCameras().to(device)
raster_settings = RasterizationSettings(
image_size=128, blur_radius=0.0, faces_per_pixel=1

View File

@ -8,6 +8,7 @@
"""
Sanity checks for output images from the pointcloud renderer.
"""
import unittest
import warnings
from os import path
@ -220,7 +221,8 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
# znear and zfar is required in this case.
self.assertRaises(
ValueError,
lambda renderer=renderer, pointclouds=pointclouds: renderer.forward(
lambda renderer=renderer,
pointclouds=pointclouds: renderer.forward(
point_clouds=pointclouds, gamma=(1e-4,)
),
)
@ -233,7 +235,8 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
# znear and zfar must be batched.
self.assertRaises(
TypeError,
lambda renderer=renderer, pointclouds=pointclouds: renderer.forward(
lambda renderer=renderer,
pointclouds=pointclouds: renderer.forward(
point_clouds=pointclouds,
gamma=(1e-4,),
znear=1.0,
@ -242,7 +245,8 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
)
self.assertRaises(
TypeError,
lambda renderer=renderer, pointclouds=pointclouds: renderer.forward(
lambda renderer=renderer,
pointclouds=pointclouds: renderer.forward(
point_clouds=pointclouds,
gamma=(1e-4,),
znear=(1.0,),
@ -253,7 +257,8 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
# gamma must be batched.
self.assertRaises(
TypeError,
lambda renderer=renderer, pointclouds=pointclouds: renderer.forward(
lambda renderer=renderer,
pointclouds=pointclouds: renderer.forward(
point_clouds=pointclouds, gamma=1e-4
),
)
@ -262,7 +267,8 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
renderer.rasterizer.raster_settings.image_size = 0
self.assertRaises(
ValueError,
lambda renderer=renderer, pointclouds=pointclouds: renderer.forward(
lambda renderer=renderer,
pointclouds=pointclouds: renderer.forward(
point_clouds=pointclouds, gamma=(1e-4,)
),
)
@ -372,7 +378,6 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
self.assertClose(rgb, image_ref)
def test_compositor_background_color_rgba(self):
N, H, W, K, C, P = 1, 15, 15, 20, 4, 225
ptclds = torch.randn((C, P))
alphas = torch.rand((N, K, H, W))
@ -385,7 +390,6 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
]
for compositor_class, composite_func in compositor_funcs:
compositor = compositor_class(background_color)
# run the forward method to generate masked images
@ -423,7 +427,6 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
)
def test_compositor_background_color_rgb(self):
N, H, W, K, C, P = 1, 15, 15, 20, 3, 225
ptclds = torch.randn((C, P))
alphas = torch.rand((N, K, H, W))
@ -436,7 +439,6 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
]
for compositor_class, composite_func in compositor_funcs:
compositor = compositor_class(background_color)
# run the forward method to generate masked images

View File

@ -308,7 +308,6 @@ class TestRenderVolumes(TestCaseMixin, unittest.TestCase):
# init the boundary volume
for shape in ("sphere", "cube"):
if not DEBUG and shape == "cube":
# do not run numeric checks for the cube as the
# differences in rendering equations make the renders incomparable
@ -515,7 +514,6 @@ class TestRenderVolumes(TestCaseMixin, unittest.TestCase):
for shape in ("sphere", "cube"):
for sample_mode in ("bilinear", "nearest"):
volumes = init_boundary_volume(
volume_size=volume_size, batch_size=n_frames, shape=shape
)[0]
@ -593,7 +591,6 @@ class TestRenderVolumes(TestCaseMixin, unittest.TestCase):
for volume_size in ([25, 25, 25],):
for sample_mode in ("bilinear", "nearest"):
volume_translation = torch.zeros(4, 3)
volume_translation.requires_grad = True
volumes, volume_voxel_size, _ = init_boundary_volume(

View File

@ -231,7 +231,6 @@ class TestTensorProperties(TestCaseMixin, unittest.TestCase):
# check both H > W and W > H
for flip_axes in [False, True]:
# non-batched version
for xy_ndc, xy_gs in xy_ndc_gs:
xy_gs_predicted = ndc_to_grid_sample_coords(

View File

@ -301,7 +301,6 @@ class TestSamplePoints(TestCaseMixin, unittest.TestCase):
)
def test_outputs(self):
for add_texture in (True, False):
meshes = init_meshes(device=torch.device("cuda:0"), add_texture=add_texture)
out1 = sample_points_from_meshes(meshes, num_samples=100)

View File

@ -7,6 +7,7 @@
"""
Sanity checks for loading ShapeNetCore.
"""
import os
import unittest

View File

@ -105,7 +105,6 @@ class TestStructUtils(TestCaseMixin, unittest.TestCase):
ndim = 2
for ndim in (2, 3, 4):
dims = [K] * ndim
x = torch.rand([N] + dims, device=device)

View File

@ -308,9 +308,7 @@ class TestTransform(TestCaseMixin, unittest.TestCase):
t1._matrix = torch.FloatTensor(persp_proj)
points = torch.tensor(
[[0.0, 1.0, 0.0], [0.0, 0.0, 1e-5], [-1.0, 0.0, 1e-5]]
).view(
1, 3, 3
) # a set of points with z-coord very close to 0
).view(1, 3, 3) # a set of points with z-coord very close to 0
proj = t1.transform_points(points)
proj_eps = t1.transform_points(points, eps=1e-4)
@ -323,7 +321,6 @@ class TestTransform(TestCaseMixin, unittest.TestCase):
# generate a random chain of transforms
for _ in range(10): # 10 different tries
# list of transform matrices
ts = []

View File

@ -66,7 +66,6 @@ class TestVolumes(TestCaseMixin, unittest.TestCase):
volume_size=(10, 13, 8),
dtype=torch.float32,
):
device = torch.device("cuda:0")
# make sure we have at least 3 volumes to prevent indexing crash
@ -94,7 +93,6 @@ class TestVolumes(TestCaseMixin, unittest.TestCase):
for features_, densities_ in zip(
(None, features, features_list), (densities, densities, densities_list)
):
# init the volume structure
v = Volumes(
features=features_,
@ -205,7 +203,6 @@ class TestVolumes(TestCaseMixin, unittest.TestCase):
# try for 10 sets of different random sizes/centers/voxel_sizes
for _ in range(10):
size = torch.randint(high=10, size=(3,), low=3).tolist()
densities = torch.randn(
@ -433,7 +430,6 @@ class TestVolumes(TestCaseMixin, unittest.TestCase):
# try for 10 sets of different random sizes/centers/voxel_sizes
for _ in range(10):
size = torch.randint(high=10, size=(3,), low=3).tolist()
center = torch.randn(num_volumes, 3, dtype=torch.float32, device=device)
@ -449,7 +445,6 @@ class TestVolumes(TestCaseMixin, unittest.TestCase):
num_volumes, 3, size, num_channels, device, rand_sizes=None
)[0],
):
# init the volume structure
v = Volumes(
densities=densities,
@ -794,9 +789,7 @@ class TestVolumes(TestCaseMixin, unittest.TestCase):
size=[num_volumes + 1, num_channels, *size],
device=device,
dtype=torch.float32,
).unbind(
0
), # list with diff batch size
).unbind(0), # list with diff batch size
torch.randn(
size=[num_volumes + 1, num_channels, *size],
device=device,
@ -806,9 +799,7 @@ class TestVolumes(TestCaseMixin, unittest.TestCase):
size=[num_volumes, num_channels, *diff_size],
device=device,
dtype=torch.float32,
).unbind(
0
), # list with different size
).unbind(0), # list with different size
torch.randn(
size=[num_volumes, num_channels, *diff_size],
device=device,
@ -823,9 +814,7 @@ class TestVolumes(TestCaseMixin, unittest.TestCase):
size=[num_volumes, num_channels, *size],
device=diff_device,
dtype=torch.float32,
).unbind(
0
), # list with different device
).unbind(0), # list with different device
]
# good ways to define features
@ -834,9 +823,7 @@ class TestVolumes(TestCaseMixin, unittest.TestCase):
size=[num_volumes, num_channels, *size],
device=device,
dtype=torch.float32,
).unbind(
0
), # list of features of correct size
).unbind(0), # list of features of correct size
torch.randn(
size=[num_volumes, num_channels, *size],
device=device,
@ -872,9 +859,7 @@ class TestVolumes(TestCaseMixin, unittest.TestCase):
size=[num_volumes, num_channels, *size],
device=device,
dtype=torch.float32,
).unbind(
0
), # list of features
).unbind(0), # list of features
None, # no features
]
@ -890,9 +875,7 @@ class TestVolumes(TestCaseMixin, unittest.TestCase):
), # 4 dims
torch.randn(
size=[num_volumes, *size], device=device, dtype=torch.float32
).unbind(
0
), # list of 4 dim tensors
).unbind(0), # list of 4 dim tensors
]
# all ways to define densities
@ -902,9 +885,7 @@ class TestVolumes(TestCaseMixin, unittest.TestCase):
), # padded tensor
torch.randn(
size=[num_volumes, 1, *size], device=device, dtype=torch.float32
).unbind(
0
), # list of densities
).unbind(0), # list of densities
]
# bad ways to define densities
@ -915,9 +896,7 @@ class TestVolumes(TestCaseMixin, unittest.TestCase):
), # 6-dim tensor
torch.randn(
size=[num_volumes, 1, 1, *size], device=device, dtype=torch.float32
).unbind(
0
), # list of 5-dim densities
).unbind(0), # list of 5-dim densities
]
# all possible ways to define the voxels sizes