mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2026-01-16 19:30:34 +08:00
apply Black 25.11.0 style in fbcode (70/92)
Summary: Formats the covered files with pyfmt. paintitblack Reviewed By: itamaro Differential Revision: D90476295 fbshipit-source-id: 5101d4aae980a9f8955a4cb10bae23997c48837f
This commit is contained in:
committed by
meta-codesync[bot]
parent
6be5e2da06
commit
0c3b204375
@@ -19,7 +19,6 @@
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
|
||||
import unittest.mock as mock
|
||||
|
||||
from recommonmark.parser import CommonMarkParser
|
||||
|
||||
@@ -48,22 +48,18 @@ The outputs of the experiment are saved and logged in multiple ways:
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
|
||||
from dataclasses import field
|
||||
|
||||
import hydra
|
||||
|
||||
import torch
|
||||
from accelerate import Accelerator
|
||||
from omegaconf import DictConfig, OmegaConf
|
||||
from packaging import version
|
||||
|
||||
from pytorch3d.implicitron.dataset.data_source import (
|
||||
DataSourceBase,
|
||||
ImplicitronDataSource,
|
||||
)
|
||||
from pytorch3d.implicitron.models.base_model import ImplicitronModelBase
|
||||
|
||||
from pytorch3d.implicitron.models.renderer.multipass_ea import (
|
||||
MultiPassEmissionAbsorptionRenderer,
|
||||
)
|
||||
|
||||
@@ -11,7 +11,6 @@ import os
|
||||
from typing import Optional
|
||||
|
||||
import torch.optim
|
||||
|
||||
from accelerate import Accelerator
|
||||
from pytorch3d.implicitron.models.base_model import ImplicitronModelBase
|
||||
from pytorch3d.implicitron.tools import model_io
|
||||
|
||||
@@ -14,9 +14,7 @@ from dataclasses import field
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
import torch.optim
|
||||
|
||||
from accelerate import Accelerator
|
||||
|
||||
from pytorch3d.implicitron.models.base_model import ImplicitronModelBase
|
||||
from pytorch3d.implicitron.tools import model_io
|
||||
from pytorch3d.implicitron.tools.config import (
|
||||
|
||||
@@ -12,7 +12,6 @@ import unittest
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
|
||||
from hydra import compose, initialize_config_dir
|
||||
from omegaconf import OmegaConf
|
||||
from projects.implicitron_trainer.impl.optimizer_factory import (
|
||||
|
||||
@@ -21,7 +21,6 @@ from typing import (
|
||||
)
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.dataset.frame_data import FrameData
|
||||
from pytorch3d.implicitron.dataset.utils import GenericWorkaround
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ from typing import (
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.dataset import orm_types, types
|
||||
from pytorch3d.implicitron.dataset.utils import (
|
||||
adjust_camera_to_bbox_crop_,
|
||||
|
||||
@@ -38,7 +38,6 @@ from pytorch3d.implicitron.dataset.utils import is_known_frame_scalar
|
||||
from pytorch3d.implicitron.tools.config import registry, ReplaceableBase
|
||||
from pytorch3d.renderer.camera_utils import join_cameras_as_batch
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
@@ -327,9 +326,9 @@ class JsonIndexDataset(DatasetBase, ReplaceableBase):
|
||||
assert os.path.normpath(
|
||||
# pyre-ignore[16]
|
||||
self.frame_annots[idx]["frame_annotation"].image.path
|
||||
) == os.path.normpath(
|
||||
path
|
||||
), f"Inconsistent frame indices {seq_name, frame_no, path}."
|
||||
) == os.path.normpath(path), (
|
||||
f"Inconsistent frame indices {seq_name, frame_no, path}."
|
||||
)
|
||||
return idx
|
||||
|
||||
dataset_idx = [
|
||||
|
||||
@@ -21,7 +21,6 @@ from pytorch3d.renderer.cameras import CamerasBase
|
||||
|
||||
from .dataset_map_provider import DatasetMap, DatasetMapProviderBase, PathManagerFactory
|
||||
from .json_index_dataset import JsonIndexDataset
|
||||
|
||||
from .utils import (
|
||||
DATASET_TYPE_KNOWN,
|
||||
DATASET_TYPE_TEST,
|
||||
|
||||
@@ -18,7 +18,6 @@ from typing import Dict, List, Optional, Tuple, Type, Union
|
||||
|
||||
import numpy as np
|
||||
from iopath.common.file_io import PathManager
|
||||
|
||||
from omegaconf import DictConfig
|
||||
from pytorch3d.implicitron.dataset.dataset_map_provider import (
|
||||
DatasetMap,
|
||||
@@ -31,7 +30,6 @@ from pytorch3d.implicitron.tools.config import (
|
||||
registry,
|
||||
run_auto_creation,
|
||||
)
|
||||
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ import torch
|
||||
from pytorch3d.implicitron.tools.config import registry
|
||||
|
||||
from .load_llff import load_llff_data
|
||||
|
||||
from .single_sequence_dataset import (
|
||||
_interpret_blender_cameras,
|
||||
SingleSceneDatasetMapProviderBase,
|
||||
|
||||
@@ -8,7 +8,6 @@ import os
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
|
||||
from PIL import Image
|
||||
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import struct
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import numpy as np
|
||||
|
||||
from pytorch3d.implicitron.dataset.types import (
|
||||
DepthAnnotation,
|
||||
ImageAnnotation,
|
||||
@@ -22,7 +21,6 @@ from pytorch3d.implicitron.dataset.types import (
|
||||
VideoAnnotation,
|
||||
ViewpointAnnotation,
|
||||
)
|
||||
|
||||
from sqlalchemy import LargeBinary
|
||||
from sqlalchemy.orm import (
|
||||
composite,
|
||||
|
||||
@@ -10,7 +10,6 @@ import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
import urllib
|
||||
from dataclasses import dataclass, Field, field
|
||||
from typing import (
|
||||
@@ -32,13 +31,11 @@ import pandas as pd
|
||||
import sqlalchemy as sa
|
||||
import torch
|
||||
from pytorch3d.implicitron.dataset.dataset_base import DatasetBase
|
||||
|
||||
from pytorch3d.implicitron.dataset.frame_data import (
|
||||
FrameData,
|
||||
FrameDataBuilder, # noqa
|
||||
FrameDataBuilderBase,
|
||||
)
|
||||
|
||||
from pytorch3d.implicitron.tools.config import (
|
||||
registry,
|
||||
ReplaceableBase,
|
||||
|
||||
@@ -12,9 +12,7 @@ import os
|
||||
from typing import List, Optional, Tuple, Type
|
||||
|
||||
import numpy as np
|
||||
|
||||
from omegaconf import DictConfig, OmegaConf
|
||||
|
||||
from pytorch3d.implicitron.dataset.dataset_map_provider import (
|
||||
DatasetMap,
|
||||
DatasetMapProviderBase,
|
||||
|
||||
@@ -18,7 +18,6 @@ from pytorch3d.implicitron.dataset.dataset_base import DatasetBase
|
||||
from pytorch3d.implicitron.dataset.dataset_map_provider import DatasetMap
|
||||
from pytorch3d.implicitron.dataset.frame_data import FrameData
|
||||
from pytorch3d.implicitron.tools.config import registry, run_auto_creation
|
||||
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -15,7 +15,6 @@ from typing import List, Optional, Tuple, TypeVar, Union
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
|
||||
from pytorch3d.io import IO
|
||||
from pytorch3d.renderer.cameras import PerspectiveCameras
|
||||
from pytorch3d.structures.pointclouds import Pointclouds
|
||||
|
||||
@@ -14,7 +14,6 @@ import warnings
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
import torch
|
||||
|
||||
import tqdm
|
||||
from pytorch3d.implicitron.evaluation import evaluate_new_view_synthesis as evaluate
|
||||
from pytorch3d.implicitron.models.base_model import EvaluationMode, ImplicitronModelBase
|
||||
|
||||
@@ -10,7 +10,6 @@ from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.models.renderer.base import EvaluationMode
|
||||
from pytorch3d.implicitron.tools.config import ReplaceableBase
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
|
||||
@@ -16,7 +16,6 @@ from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
|
||||
|
||||
import torch
|
||||
from omegaconf import DictConfig
|
||||
|
||||
from pytorch3d.implicitron.models.base_model import (
|
||||
ImplicitronModelBase,
|
||||
ImplicitronRender,
|
||||
@@ -28,7 +27,6 @@ from pytorch3d.implicitron.models.metrics import (
|
||||
RegularizationMetricsBase,
|
||||
ViewMetricsBase,
|
||||
)
|
||||
|
||||
from pytorch3d.implicitron.models.renderer.base import (
|
||||
BaseRenderer,
|
||||
EvaluationMode,
|
||||
@@ -38,7 +36,6 @@ from pytorch3d.implicitron.models.renderer.base import (
|
||||
RenderSamplingMode,
|
||||
)
|
||||
from pytorch3d.implicitron.models.renderer.ray_sampler import RaySamplerBase
|
||||
|
||||
from pytorch3d.implicitron.models.utils import (
|
||||
apply_chunked,
|
||||
chunk_generator,
|
||||
@@ -53,7 +50,6 @@ from pytorch3d.implicitron.tools.config import (
|
||||
registry,
|
||||
run_auto_creation,
|
||||
)
|
||||
|
||||
from pytorch3d.implicitron.tools.rasterize_mc import rasterize_sparse_ray_bundle
|
||||
from pytorch3d.renderer import utils as rend_utils
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
|
||||
@@ -10,7 +10,6 @@ from abc import ABC, abstractmethod
|
||||
from typing import Optional
|
||||
|
||||
from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
|
||||
|
||||
from pytorch3d.implicitron.tools.config import ReplaceableBase
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
|
||||
|
||||
@@ -16,14 +16,11 @@ This file contains
|
||||
|
||||
import logging
|
||||
from dataclasses import field
|
||||
|
||||
from enum import Enum
|
||||
from typing import Dict, Optional, Tuple
|
||||
|
||||
import torch
|
||||
|
||||
from omegaconf import DictConfig
|
||||
|
||||
from pytorch3d.implicitron.tools.config import (
|
||||
Configurable,
|
||||
registry,
|
||||
|
||||
@@ -11,7 +11,6 @@ import torch
|
||||
from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
|
||||
from pytorch3d.implicitron.tools.config import registry
|
||||
from pytorch3d.renderer.implicit import HarmonicEmbedding
|
||||
|
||||
from torch import nn
|
||||
|
||||
from .base import ImplicitFunctionBase
|
||||
|
||||
@@ -21,7 +21,6 @@ from pytorch3d.renderer.implicit import HarmonicEmbedding
|
||||
from pytorch3d.renderer.implicit.utils import ray_bundle_to_ray_points
|
||||
|
||||
from .base import ImplicitFunctionBase
|
||||
|
||||
from .decoding_functions import ( # noqa
|
||||
_xavier_init,
|
||||
MLPWithInputSkips,
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
from typing import Callable, Optional
|
||||
|
||||
import torch
|
||||
|
||||
import torch.nn.functional as F
|
||||
from pytorch3d.common.compat import prod
|
||||
from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
|
||||
|
||||
@@ -13,9 +13,7 @@ from dataclasses import fields
|
||||
from typing import Callable, Dict, Optional, Tuple
|
||||
|
||||
import torch
|
||||
|
||||
from omegaconf import DictConfig
|
||||
|
||||
from pytorch3d.implicitron.models.implicit_function.base import ImplicitFunctionBase
|
||||
from pytorch3d.implicitron.models.implicit_function.decoding_functions import (
|
||||
DecoderFunctionBase,
|
||||
|
||||
@@ -17,7 +17,6 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, TYPE_CHECKING, Un
|
||||
|
||||
import torch
|
||||
from omegaconf import DictConfig
|
||||
|
||||
from pytorch3d.implicitron.models.base_model import (
|
||||
ImplicitronModelBase,
|
||||
ImplicitronRender,
|
||||
@@ -28,7 +27,6 @@ from pytorch3d.implicitron.models.metrics import (
|
||||
RegularizationMetricsBase,
|
||||
ViewMetricsBase,
|
||||
)
|
||||
|
||||
from pytorch3d.implicitron.models.renderer.base import (
|
||||
BaseRenderer,
|
||||
EvaluationMode,
|
||||
@@ -50,7 +48,6 @@ from pytorch3d.implicitron.tools.config import (
|
||||
registry,
|
||||
run_auto_creation,
|
||||
)
|
||||
|
||||
from pytorch3d.implicitron.tools.rasterize_mc import rasterize_sparse_ray_bundle
|
||||
from pytorch3d.renderer import utils as rend_utils
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
|
||||
@@ -11,7 +11,6 @@ import copy
|
||||
import torch
|
||||
from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
|
||||
from pytorch3d.implicitron.tools.config import Configurable, expand_args_fields
|
||||
|
||||
from pytorch3d.renderer.implicit.sample_pdf import sample_pdf
|
||||
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ import torch
|
||||
from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
|
||||
from pytorch3d.implicitron.tools.config import enable_get_default_args
|
||||
from pytorch3d.renderer.implicit import HarmonicEmbedding
|
||||
|
||||
from torch import nn
|
||||
|
||||
|
||||
|
||||
@@ -17,11 +17,8 @@ from typing import Any, Dict, Optional, Tuple
|
||||
import torch
|
||||
import tqdm
|
||||
from pytorch3d.common.compat import prod
|
||||
|
||||
from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
|
||||
|
||||
from pytorch3d.implicitron.tools import image_utils
|
||||
|
||||
from pytorch3d.implicitron.tools.utils import cat_dataclass
|
||||
|
||||
|
||||
@@ -83,9 +80,9 @@ def preprocess_input(
|
||||
|
||||
if mask_depths and fg_mask is not None and depth_map is not None:
|
||||
# mask the depths
|
||||
assert (
|
||||
mask_threshold > 0.0
|
||||
), "Depths should be masked only with thresholded masks"
|
||||
assert mask_threshold > 0.0, (
|
||||
"Depths should be masked only with thresholded masks"
|
||||
)
|
||||
warnings.warn("Masking depths!")
|
||||
depth_map = depth_map * fg_mask
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import math
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import pytorch3d
|
||||
|
||||
import torch
|
||||
from pytorch3d.ops import packed_to_padded
|
||||
from pytorch3d.renderer import PerspectiveCameras
|
||||
|
||||
@@ -499,7 +499,7 @@ class StatsJSONEncoder(json.JSONEncoder):
|
||||
return enc
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Object of type {o.__class__.__name__} " f"is not JSON serializable"
|
||||
f"Object of type {o.__class__.__name__} is not JSON serializable"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from PIL import Image
|
||||
|
||||
_NO_TORCHVISION = False
|
||||
|
||||
@@ -796,7 +796,7 @@ def save_obj(
|
||||
# Create .mtl file with the material name and texture map filename
|
||||
# TODO: enable material properties to also be saved.
|
||||
with _open_file(mtl_path, path_manager, "w") as f_mtl:
|
||||
lines = f"newmtl mesh\n" f"map_Kd {output_path.stem}.png\n"
|
||||
lines = f"newmtl mesh\nmap_Kd {output_path.stem}.png\n"
|
||||
f_mtl.write(lines)
|
||||
|
||||
|
||||
|
||||
@@ -8,11 +8,8 @@
|
||||
|
||||
|
||||
from .chamfer import chamfer_distance
|
||||
|
||||
from .mesh_edge_loss import mesh_edge_loss
|
||||
|
||||
from .mesh_laplacian_smoothing import mesh_laplacian_smoothing
|
||||
|
||||
from .mesh_normal_consistency import mesh_normal_consistency
|
||||
from .point_mesh_distance import point_mesh_edge_distance, point_mesh_face_distance
|
||||
|
||||
|
||||
@@ -8,17 +8,14 @@
|
||||
|
||||
from .ball_query import ball_query
|
||||
from .cameras_alignment import corresponding_cameras_alignment
|
||||
|
||||
from .cubify import cubify
|
||||
from .graph_conv import GraphConv
|
||||
from .interp_face_attrs import interpolate_face_attributes
|
||||
from .iou_box3d import box3d_overlap
|
||||
from .knn import knn_gather, knn_points
|
||||
from .laplacian_matrices import cot_laplacian, laplacian, norm_laplacian
|
||||
|
||||
from .mesh_face_areas_normals import mesh_face_areas_normals
|
||||
from .mesh_filtering import taubin_smoothing
|
||||
|
||||
from .packed_to_padded import packed_to_padded, padded_to_packed
|
||||
from .perspective_n_points import efficient_pnp
|
||||
from .points_alignment import corresponding_points_alignment, iterative_closest_point
|
||||
@@ -30,9 +27,7 @@ from .points_to_volumes import (
|
||||
add_pointclouds_to_volumes,
|
||||
add_points_features_to_volume_densities_features,
|
||||
)
|
||||
|
||||
from .sample_farthest_points import sample_farthest_points
|
||||
|
||||
from .sample_points_from_meshes import sample_points_from_meshes
|
||||
from .subdivide_meshes import SubdivideMeshes
|
||||
from .utils import (
|
||||
@@ -42,7 +37,6 @@ from .utils import (
|
||||
is_pointclouds,
|
||||
wmean,
|
||||
)
|
||||
|
||||
from .vert_align import vert_align
|
||||
|
||||
|
||||
|
||||
@@ -11,9 +11,7 @@ from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from pytorch3d.common.compat import meshgrid_ij
|
||||
|
||||
from pytorch3d.structures import Meshes
|
||||
|
||||
|
||||
|
||||
@@ -16,9 +16,7 @@ import sys
|
||||
from typing import Tuple, Union
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d.ops.mesh_face_areas_normals import mesh_face_areas_normals
|
||||
|
||||
from pytorch3d.ops.packed_to_padded import packed_to_padded
|
||||
from pytorch3d.renderer.mesh.rasterizer import Fragments as MeshFragments
|
||||
|
||||
|
||||
@@ -69,7 +69,6 @@ from .mesh import (
|
||||
TexturesUV,
|
||||
TexturesVertex,
|
||||
)
|
||||
|
||||
from .points import (
|
||||
AlphaCompositor,
|
||||
NormWeightedCompositor,
|
||||
|
||||
@@ -153,12 +153,12 @@ def _pulsar_from_opencv_projection(
|
||||
# Check image sizes.
|
||||
image_w = image_size_wh[0, 0]
|
||||
image_h = image_size_wh[0, 1]
|
||||
assert torch.all(
|
||||
image_size_wh[:, 0] == image_w
|
||||
), "All images in a batch must have the same width!"
|
||||
assert torch.all(
|
||||
image_size_wh[:, 1] == image_h
|
||||
), "All images in a batch must have the same height!"
|
||||
assert torch.all(image_size_wh[:, 0] == image_w), (
|
||||
"All images in a batch must have the same width!"
|
||||
)
|
||||
assert torch.all(image_size_wh[:, 1] == image_h), (
|
||||
"All images in a batch must have the same height!"
|
||||
)
|
||||
# Focal length.
|
||||
fx = camera_matrix[:, 0, 0].unsqueeze(1)
|
||||
fy = camera_matrix[:, 1, 1].unsqueeze(1)
|
||||
|
||||
@@ -12,7 +12,6 @@ from .clip import (
|
||||
ClippedFaces,
|
||||
convert_clipped_rasterization_to_original_faces,
|
||||
)
|
||||
|
||||
from .rasterize_meshes import rasterize_meshes
|
||||
from .rasterizer import MeshRasterizer, RasterizationSettings
|
||||
from .renderer import MeshRenderer, MeshRendererWithFragments
|
||||
|
||||
@@ -14,7 +14,6 @@ import torch
|
||||
from pytorch3d import _C
|
||||
|
||||
from ..utils import parse_image_size
|
||||
|
||||
from .clip import (
|
||||
clip_faces,
|
||||
ClipFrustum,
|
||||
|
||||
@@ -625,9 +625,7 @@ class TexturesAtlas(TexturesBase):
|
||||
of length `k`.
|
||||
"""
|
||||
if len(faces_ids_list) != len(self.atlas_list()):
|
||||
raise IndexError(
|
||||
"faces_ids_list must be of " "the same length as atlas_list."
|
||||
)
|
||||
raise IndexError("faces_ids_list must be of the same length as atlas_list.")
|
||||
|
||||
sub_features = []
|
||||
for atlas, faces_ids in zip(self.atlas_list(), faces_ids_list):
|
||||
@@ -1657,7 +1655,7 @@ class TexturesUV(TexturesBase):
|
||||
raise NotImplementedError("This function does not support multiple maps.")
|
||||
if len(faces_ids_list) != len(self.faces_uvs_padded()):
|
||||
raise IndexError(
|
||||
"faces_uvs_padded must be of " "the same length as face_ids_list."
|
||||
"faces_uvs_padded must be of the same length as face_ids_list."
|
||||
)
|
||||
|
||||
sub_faces_uvs, sub_verts_uvs, sub_maps = [], [], []
|
||||
@@ -1871,7 +1869,7 @@ class TexturesVertex(TexturesBase):
|
||||
"""
|
||||
if len(vertex_ids_list) != len(self.verts_features_list()):
|
||||
raise IndexError(
|
||||
"verts_features_list must be of " "the same length as vertex_ids_list."
|
||||
"verts_features_list must be of the same length as vertex_ids_list."
|
||||
)
|
||||
|
||||
sub_features = []
|
||||
|
||||
@@ -24,7 +24,6 @@ from typing import Any, Dict
|
||||
|
||||
os.environ["PYOPENGL_PLATFORM"] = "egl"
|
||||
import OpenGL.EGL as egl # noqa
|
||||
|
||||
import pycuda.driver as cuda # noqa
|
||||
from OpenGL._opaque import opaque_pointer_cls # noqa
|
||||
from OpenGL.raw.EGL._errors import EGLError # noqa
|
||||
|
||||
@@ -17,15 +17,12 @@ import numpy as np
|
||||
import OpenGL.GL as gl
|
||||
import pycuda.gl
|
||||
import torch
|
||||
|
||||
import torch.nn as nn
|
||||
|
||||
from pytorch3d.structures.meshes import Meshes
|
||||
|
||||
from ..cameras import FoVOrthographicCameras, FoVPerspectiveCameras
|
||||
from ..mesh.rasterizer import Fragments, RasterizationSettings
|
||||
from ..utils import parse_image_size
|
||||
|
||||
from .opengl_utils import _torch_to_opengl, global_device_context_store
|
||||
|
||||
# Shader strings, used below to compile an OpenGL program.
|
||||
|
||||
@@ -9,9 +9,7 @@
|
||||
import torch
|
||||
|
||||
from .compositor import AlphaCompositor, NormWeightedCompositor
|
||||
|
||||
from .pulsar.unified import PulsarPointsRenderer
|
||||
|
||||
from .rasterize_points import rasterize_points
|
||||
from .rasterizer import PointsRasterizationSettings, PointsRasterizer
|
||||
from .renderer import PointsRenderer
|
||||
|
||||
@@ -11,7 +11,6 @@ from typing import List, Optional, Tuple, Union
|
||||
import numpy as np
|
||||
import torch
|
||||
from pytorch3d import _C
|
||||
|
||||
from pytorch3d.renderer.mesh.rasterize_meshes import pix_to_non_square_ndc
|
||||
|
||||
from ..utils import parse_image_size
|
||||
|
||||
@@ -531,9 +531,9 @@ class Meshes:
|
||||
list of tensors of vertices of shape (V_n, 3).
|
||||
"""
|
||||
if self._verts_list is None:
|
||||
assert (
|
||||
self._verts_padded is not None
|
||||
), "verts_padded is required to compute verts_list."
|
||||
assert self._verts_padded is not None, (
|
||||
"verts_padded is required to compute verts_list."
|
||||
)
|
||||
self._verts_list = struct_utils.padded_to_list(
|
||||
self._verts_padded, self.num_verts_per_mesh().tolist()
|
||||
)
|
||||
@@ -547,9 +547,9 @@ class Meshes:
|
||||
list of tensors of faces of shape (F_n, 3).
|
||||
"""
|
||||
if self._faces_list is None:
|
||||
assert (
|
||||
self._faces_padded is not None
|
||||
), "faces_padded is required to compute faces_list."
|
||||
assert self._faces_padded is not None, (
|
||||
"faces_padded is required to compute faces_list."
|
||||
)
|
||||
self._faces_list = struct_utils.padded_to_list(
|
||||
self._faces_padded, self.num_faces_per_mesh().tolist()
|
||||
)
|
||||
@@ -925,9 +925,9 @@ class Meshes:
|
||||
|
||||
verts_list = self.verts_list()
|
||||
faces_list = self.faces_list()
|
||||
assert (
|
||||
faces_list is not None and verts_list is not None
|
||||
), "faces_list and verts_list arguments are required"
|
||||
assert faces_list is not None and verts_list is not None, (
|
||||
"faces_list and verts_list arguments are required"
|
||||
)
|
||||
|
||||
if self.isempty():
|
||||
self._faces_padded = torch.zeros(
|
||||
|
||||
@@ -433,9 +433,9 @@ class Pointclouds:
|
||||
list of tensors of points of shape (P_n, 3).
|
||||
"""
|
||||
if self._points_list is None:
|
||||
assert (
|
||||
self._points_padded is not None
|
||||
), "points_padded is required to compute points_list."
|
||||
assert self._points_padded is not None, (
|
||||
"points_padded is required to compute points_list."
|
||||
)
|
||||
points_list = []
|
||||
for i in range(self._N):
|
||||
points_list.append(
|
||||
|
||||
@@ -12,11 +12,8 @@ from .camera_conversions import (
|
||||
pulsar_from_cameras_projection,
|
||||
pulsar_from_opencv_projection,
|
||||
)
|
||||
|
||||
from .checkerboard import checkerboard
|
||||
|
||||
from .ico_sphere import ico_sphere
|
||||
|
||||
from .torus import torus
|
||||
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ from itertools import product
|
||||
|
||||
import torch
|
||||
from fvcore.common.benchmark import benchmark
|
||||
|
||||
from pytorch3d.ops.ball_query import ball_query
|
||||
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.models.utils import preprocess_input, weighted_sum_losses
|
||||
|
||||
|
||||
|
||||
@@ -11,12 +11,10 @@ from dataclasses import dataclass
|
||||
from itertools import product
|
||||
|
||||
import numpy as np
|
||||
|
||||
import torch
|
||||
from pytorch3d.implicitron.dataset.data_loader_map_provider import (
|
||||
DoublePoolBatchSampler,
|
||||
)
|
||||
|
||||
from pytorch3d.implicitron.dataset.dataset_base import DatasetBase
|
||||
from pytorch3d.implicitron.dataset.frame_data import FrameData
|
||||
from pytorch3d.implicitron.dataset.scene_batch_sampler import SceneBatchSampler
|
||||
|
||||
@@ -7,9 +7,7 @@
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.dataset.utils import (
|
||||
bbox_xywh_to_xyxy,
|
||||
bbox_xyxy_to_xywh,
|
||||
@@ -21,7 +19,6 @@ from pytorch3d.implicitron.dataset.utils import (
|
||||
rescale_bbox,
|
||||
resize_image,
|
||||
)
|
||||
|
||||
from tests.common_testing import TestCaseMixin
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ import os
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.dataset.data_loader_map_provider import ( # noqa
|
||||
SequenceDataLoaderMapProvider,
|
||||
SimpleDataLoaderMapProvider,
|
||||
|
||||
@@ -8,7 +8,6 @@ import os
|
||||
import unittest
|
||||
|
||||
from pytorch3d.implicitron import eval_demo
|
||||
|
||||
from tests.common_testing import interactive_testing_requested
|
||||
|
||||
from .common_resources import CO3D_MANIFOLD_PATH
|
||||
|
||||
@@ -15,7 +15,6 @@ import unittest
|
||||
import lpips
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.dataset.frame_data import FrameData
|
||||
from pytorch3d.implicitron.dataset.json_index_dataset import JsonIndexDataset
|
||||
from pytorch3d.implicitron.evaluation.evaluate_new_view_synthesis import eval_batch
|
||||
|
||||
@@ -14,7 +14,6 @@ from typing import ClassVar, Optional, Type
|
||||
import pandas as pd
|
||||
import pkg_resources
|
||||
import sqlalchemy as sa
|
||||
|
||||
from pytorch3d.implicitron.dataset import types
|
||||
from pytorch3d.implicitron.dataset.frame_data import FrameData, GenericFrameDataBuilder
|
||||
from pytorch3d.implicitron.dataset.orm_types import (
|
||||
|
||||
@@ -12,7 +12,6 @@ from typing import List
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.dataset import types
|
||||
from pytorch3d.implicitron.dataset.dataset_base import FrameData
|
||||
from pytorch3d.implicitron.dataset.frame_data import FrameDataBuilder
|
||||
@@ -29,7 +28,6 @@ from pytorch3d.implicitron.dataset.utils import (
|
||||
)
|
||||
from pytorch3d.implicitron.tools.config import get_default_args
|
||||
from pytorch3d.renderer.cameras import PerspectiveCameras
|
||||
|
||||
from tests.common_testing import TestCaseMixin
|
||||
from tests.implicitron.common_resources import get_skateboard_data
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import unittest
|
||||
from typing import List
|
||||
|
||||
import numpy as np
|
||||
|
||||
import torch
|
||||
import torchvision
|
||||
from PIL import Image
|
||||
|
||||
@@ -13,7 +13,6 @@ from typing import Tuple
|
||||
import torch
|
||||
from pytorch3d.implicitron.dataset.json_index_dataset import JsonIndexDataset
|
||||
from pytorch3d.implicitron.dataset.visualize import get_implicitron_sequence_pointcloud
|
||||
|
||||
from pytorch3d.implicitron.models.visualization.render_flyaround import render_flyaround
|
||||
from pytorch3d.implicitron.tools.config import expand_args_fields
|
||||
from pytorch3d.implicitron.tools.point_cloud_utils import render_point_cloud_pytorch3d
|
||||
|
||||
@@ -8,9 +8,7 @@
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.models.renderer.base import (
|
||||
approximate_conical_frustum_as_gaussians,
|
||||
compute_3d_diagonal_covariance_gaussian,
|
||||
@@ -18,7 +16,6 @@ from pytorch3d.implicitron.models.renderer.base import (
|
||||
ImplicitronRayBundle,
|
||||
)
|
||||
from pytorch3d.implicitron.models.renderer.ray_sampler import AbstractMaskRaySampler
|
||||
|
||||
from tests.common_testing import TestCaseMixin
|
||||
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
import unittest
|
||||
from itertools import product
|
||||
from typing import Tuple
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
import torch
|
||||
@@ -18,7 +17,6 @@ from pytorch3d.implicitron.models.renderer.ray_sampler import (
|
||||
compute_radii,
|
||||
NearFarRaySampler,
|
||||
)
|
||||
|
||||
from pytorch3d.renderer.cameras import (
|
||||
CamerasBase,
|
||||
FoVOrthographicCameras,
|
||||
@@ -28,7 +26,6 @@ from pytorch3d.renderer.cameras import (
|
||||
)
|
||||
from pytorch3d.renderer.implicit.utils import HeterogeneousRayBundle
|
||||
from tests.common_camera_utils import init_random_cameras
|
||||
|
||||
from tests.common_testing import TestCaseMixin
|
||||
|
||||
CAMERA_TYPES = (
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
from pytorch3d.implicitron.dataset.orm_types import ArrayTypeFactory, TupleTypeFactory
|
||||
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import unittest
|
||||
|
||||
import torch
|
||||
from pytorch3d.implicitron.tools.point_cloud_utils import get_rgbd_point_cloud
|
||||
|
||||
from pytorch3d.renderer.cameras import PerspectiveCameras
|
||||
from tests.common_testing import TestCaseMixin
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import unittest
|
||||
from itertools import product
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.models.renderer.ray_point_refiner import (
|
||||
apply_blurpool_on_weights,
|
||||
RayPointRefiner,
|
||||
|
||||
@@ -10,9 +10,7 @@ import unittest
|
||||
from collections import Counter
|
||||
|
||||
import pkg_resources
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.dataset.sql_dataset import SqlIndexDataset
|
||||
|
||||
NO_BLOBS_KWARGS = {
|
||||
|
||||
@@ -16,7 +16,6 @@ from pytorch3d.implicitron.models.implicit_function.scene_representation_network
|
||||
from pytorch3d.implicitron.models.renderer.ray_sampler import ImplicitronRayBundle
|
||||
from pytorch3d.implicitron.tools.config import get_default_args
|
||||
from pytorch3d.renderer import PerspectiveCameras
|
||||
|
||||
from tests.common_testing import TestCaseMixin
|
||||
|
||||
_BATCH_SIZE: int = 3
|
||||
|
||||
@@ -8,13 +8,11 @@
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from omegaconf import DictConfig, OmegaConf
|
||||
from pytorch3d.implicitron.models.implicit_function.voxel_grid_implicit_function import (
|
||||
VoxelGridImplicitFunction,
|
||||
)
|
||||
from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
|
||||
|
||||
from pytorch3d.implicitron.tools.config import expand_args_fields, get_default_args
|
||||
from pytorch3d.renderer import ray_bundle_to_ray_points
|
||||
from tests.common_testing import TestCaseMixin
|
||||
|
||||
@@ -10,7 +10,6 @@ from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
from omegaconf import DictConfig, OmegaConf
|
||||
|
||||
from pytorch3d.implicitron.models.implicit_function.utils import (
|
||||
interpolate_line,
|
||||
interpolate_plane,
|
||||
@@ -22,7 +21,6 @@ from pytorch3d.implicitron.models.implicit_function.voxel_grid import (
|
||||
VMFactorizedVoxelGrid,
|
||||
VoxelGridModule,
|
||||
)
|
||||
|
||||
from pytorch3d.implicitron.tools.config import expand_args_fields, get_default_args
|
||||
from tests.common_testing import TestCaseMixin
|
||||
|
||||
|
||||
@@ -60,7 +60,6 @@ from pytorch3d.transforms.rotation_conversions import random_rotations
|
||||
from pytorch3d.transforms.so3 import so3_exp_map
|
||||
|
||||
from .common_camera_utils import init_random_cameras
|
||||
|
||||
from .common_testing import TestCaseMixin
|
||||
|
||||
|
||||
|
||||
@@ -673,9 +673,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
|
||||
|
||||
def test_load_simple_binary(self):
|
||||
for big_endian in [True, False]:
|
||||
verts = (
|
||||
"0 0 0 " "0 0 1 " "0 1 1 " "0 1 0 " "1 0 0 " "1 0 1 " "1 1 1 " "1 1 0"
|
||||
).split()
|
||||
verts = ("0 0 0 0 0 1 0 1 1 0 1 0 1 0 0 1 0 1 1 1 1 1 1 0").split()
|
||||
faces = (
|
||||
"4 0 1 2 3 "
|
||||
"4 7 6 5 4 "
|
||||
@@ -688,7 +686,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
|
||||
"3 4 5 1"
|
||||
).split()
|
||||
short_one = b"\00\01" if big_endian else b"\01\00"
|
||||
mixed_data = b"\00\00" b"\03\03" + (short_one + b"\00\01\01\01" b"\00\02")
|
||||
mixed_data = b"\00\00\03\03" + (short_one + b"\00\01\01\01\00\02")
|
||||
minus_one_data = b"\xff" * 14
|
||||
endian_char = ">" if big_endian else "<"
|
||||
format = (
|
||||
|
||||
@@ -604,9 +604,9 @@ class TestRaysampling(TestCaseMixin, unittest.TestCase):
|
||||
# test weather they are of the correct shape
|
||||
for attr in ("origins", "directions", "lengths", "xys"):
|
||||
tensor = getattr(ray_bundle, attr)
|
||||
assert tensor.shape[:2] == torch.Size(
|
||||
(n_rays_total, 1)
|
||||
), tensor.shape
|
||||
assert tensor.shape[:2] == torch.Size((n_rays_total, 1)), (
|
||||
tensor.shape
|
||||
)
|
||||
|
||||
# if two camera ids are same than origins should also be the same
|
||||
# directions and xys are always different and lengths equal
|
||||
|
||||
@@ -12,7 +12,6 @@ Sanity checks for output images from the renderer.
|
||||
import os
|
||||
import unittest
|
||||
from collections import namedtuple
|
||||
|
||||
from itertools import product
|
||||
|
||||
import numpy as np
|
||||
|
||||
Reference in New Issue
Block a user