mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-12-22 07:10:34 +08:00
Compare commits
30 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f34104cf6e | ||
|
|
f247c86dc0 | ||
|
|
ae9d8787ce | ||
|
|
8772fe0de8 | ||
|
|
c292c71c1a | ||
|
|
d0d9cae9cd | ||
|
|
1f92c4e9d2 | ||
|
|
9b981f2c7e | ||
|
|
85eccbbf77 | ||
|
|
b80ab0caf0 | ||
|
|
1e817914b3 | ||
|
|
799c1cd21b | ||
|
|
292acc71a3 | ||
|
|
3621a36494 | ||
|
|
3087ab7f62 | ||
|
|
e46ab49a34 | ||
|
|
8a27590c5f | ||
|
|
06cdc313a7 | ||
|
|
94da8841af | ||
|
|
fbc6725f03 | ||
|
|
6b8766080d | ||
|
|
c373a84400 | ||
|
|
7606854ff7 | ||
|
|
83bacda8fb | ||
|
|
f74fc450e8 | ||
|
|
3b4f8a4980 | ||
|
|
79b46734cb | ||
|
|
55638f3bae | ||
|
|
f4f2209271 | ||
|
|
f613682551 |
@@ -260,6 +260,48 @@ workflows:
|
||||
name: linux_conda_py38_cu121_pyt210
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt211
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt211
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt212
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt212
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt220
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt220
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
@@ -358,6 +400,48 @@ workflows:
|
||||
name: linux_conda_py39_cu121_pyt210
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt211
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt211
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt212
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt212
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt220
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt220
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
@@ -456,6 +540,48 @@ workflows:
|
||||
name: linux_conda_py310_cu121_pyt210
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt211
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt211
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt212
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt212
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt220
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt220
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
@@ -470,6 +596,62 @@ workflows:
|
||||
name: linux_conda_py311_cu121_pyt210
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt211
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt211
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt212
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt212
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt220
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt220
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py312_cu118_pyt220
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py312_cu121_pyt220
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda_cuda:
|
||||
name: testrun_conda_cuda_py310_cu117_pyt201
|
||||
context: DOCKERHUB_TOKEN
|
||||
|
||||
@@ -26,12 +26,13 @@ CONDA_CUDA_VERSIONS = {
|
||||
"2.0.0": ["cu117", "cu118"],
|
||||
"2.0.1": ["cu117", "cu118"],
|
||||
"2.1.0": ["cu118", "cu121"],
|
||||
"2.1.1": ["cu118", "cu121"],
|
||||
"2.1.2": ["cu118", "cu121"],
|
||||
"2.2.0": ["cu118", "cu121"],
|
||||
}
|
||||
|
||||
|
||||
def conda_docker_image_for_cuda(cuda_version):
|
||||
if cuda_version in ("cu101", "cu102", "cu111"):
|
||||
return None
|
||||
if len(cuda_version) != 5:
|
||||
raise ValueError("Unknown cuda version")
|
||||
return "pytorch/conda-builder:cuda" + cuda_version[2:]
|
||||
@@ -52,12 +53,18 @@ def pytorch_versions_for_python(python_version):
|
||||
for i in CONDA_CUDA_VERSIONS
|
||||
if version.Version(i) >= version.Version("2.1.0")
|
||||
]
|
||||
if python_version == "3.12":
|
||||
return [
|
||||
i
|
||||
for i in CONDA_CUDA_VERSIONS
|
||||
if version.Version(i) >= version.Version("2.2.0")
|
||||
]
|
||||
|
||||
|
||||
def workflows(prefix="", filter_branch=None, upload=False, indentation=6):
|
||||
w = []
|
||||
for btype in ["conda"]:
|
||||
for python_version in ["3.8", "3.9", "3.10", "3.11"]:
|
||||
for python_version in ["3.8", "3.9", "3.10", "3.11", "3.12"]:
|
||||
for pytorch_version in pytorch_versions_for_python(python_version):
|
||||
for cu_version in CONDA_CUDA_VERSIONS[pytorch_version]:
|
||||
w += workflow_pair(
|
||||
|
||||
5
.flake8
5
.flake8
@@ -1,5 +1,8 @@
|
||||
[flake8]
|
||||
ignore = E203, E266, E501, W503, E221
|
||||
# B028 No explicit stacklevel argument found.
|
||||
# B907 'foo' is manually surrounded by quotes, consider using the `!r` conversion flag.
|
||||
# B905 `zip()` without an explicit `strict=` parameter.
|
||||
ignore = E203, E266, E501, W503, E221, B028, B905, B907
|
||||
max-line-length = 88
|
||||
max-complexity = 18
|
||||
select = B,C,E,F,W,T4,B9
|
||||
|
||||
@@ -9,7 +9,7 @@ The core library is written in PyTorch. Several components have underlying imple
|
||||
|
||||
- Linux or macOS or Windows
|
||||
- Python 3.8, 3.9 or 3.10
|
||||
- PyTorch 1.12.0, 1.12.1, 1.13.0, 2.0.0, 2.0.1 or 2.1.0.
|
||||
- PyTorch 1.12.0, 1.12.1, 1.13.0, 2.0.0, 2.0.1, 2.1.0, 2.1.1, 2.1.2 or 2.2.0.
|
||||
- torchvision that matches the PyTorch installation. You can install them together as explained at pytorch.org to make sure of this.
|
||||
- gcc & g++ ≥ 4.9
|
||||
- [fvcore](https://github.com/facebookresearch/fvcore)
|
||||
|
||||
@@ -146,6 +146,12 @@ If you are using the pulsar backend for sphere-rendering (the `PulsarPointRender
|
||||
|
||||
Please see below for a timeline of the codebase updates in reverse chronological order. We are sharing updates on the releases as well as research projects which are built with PyTorch3D. The changelogs for the releases are available under [`Releases`](https://github.com/facebookresearch/pytorch3d/releases), and the builds can be installed using `conda` as per the instructions in [INSTALL.md](INSTALL.md).
|
||||
|
||||
**[Oct 31st 2023]:** PyTorch3D [v0.7.5](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.5) released.
|
||||
|
||||
**[May 10th 2023]:** PyTorch3D [v0.7.4](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.4) released.
|
||||
|
||||
**[Apr 5th 2023]:** PyTorch3D [v0.7.3](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.3) released.
|
||||
|
||||
**[Dec 19th 2022]:** PyTorch3D [v0.7.2](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.2) released.
|
||||
|
||||
**[Oct 23rd 2022]:** PyTorch3D [v0.7.1](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.1) released.
|
||||
|
||||
@@ -89,7 +89,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"2.2.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
|
||||
@@ -76,7 +76,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"2.2.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"2.2.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"2.2.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"2.2.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"2.2.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
|
||||
@@ -47,7 +47,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"2.2.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
|
||||
@@ -78,7 +78,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"2.2.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
|
||||
@@ -72,7 +72,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"2.2.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
|
||||
@@ -50,7 +50,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"2.2.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
|
||||
@@ -57,7 +57,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"2.2.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
|
||||
@@ -73,7 +73,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"2.2.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
|
||||
@@ -121,7 +121,6 @@ class ImplicitronOptimizerFactory(OptimizerFactoryBase):
|
||||
"""
|
||||
# Get the parameters to optimize
|
||||
if hasattr(model, "_get_param_groups"): # use the model function
|
||||
# pyre-ignore[29]
|
||||
p_groups = model._get_param_groups(self.lr, wd=self.weight_decay)
|
||||
else:
|
||||
p_groups = [
|
||||
|
||||
@@ -110,6 +110,8 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
def __post_init__(self):
|
||||
run_auto_creation(self)
|
||||
|
||||
# pyre-fixme[14]: `run` overrides method defined in `TrainingLoopBase`
|
||||
# inconsistently.
|
||||
def run(
|
||||
self,
|
||||
*,
|
||||
@@ -391,7 +393,6 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
):
|
||||
prefix = f"e{stats.epoch}_it{stats.it[trainmode]}"
|
||||
if hasattr(model, "visualize"):
|
||||
# pyre-ignore [29]
|
||||
model.visualize(
|
||||
viz,
|
||||
visdom_env_imgs,
|
||||
|
||||
@@ -4,4 +4,4 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
__version__ = "0.7.5"
|
||||
__version__ = "0.7.6"
|
||||
|
||||
@@ -495,7 +495,7 @@ __global__ void KNearestNeighborBackwardKernel(
|
||||
if ((p1_idx < num1) && (k < num2)) {
|
||||
const float grad_dist = grad_dists[n * P1 * K + p1_idx * K + k];
|
||||
// index of point in p2 corresponding to the k-th nearest neighbor
|
||||
const size_t p2_idx = idxs[n * P1 * K + p1_idx * K + k];
|
||||
const int64_t p2_idx = idxs[n * P1 * K + p1_idx * K + k];
|
||||
// If the index is the pad value of -1 then ignore it
|
||||
if (p2_idx == -1) {
|
||||
continue;
|
||||
|
||||
@@ -223,7 +223,7 @@ __global__ void CompactVoxelsKernel(
|
||||
compactedVoxelArray,
|
||||
const at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
||||
voxelOccupied,
|
||||
const at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
||||
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
|
||||
voxelOccupiedScan,
|
||||
uint numVoxels) {
|
||||
uint id = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
@@ -255,7 +255,8 @@ __global__ void GenerateFacesKernel(
|
||||
at::PackedTensorAccessor<int64_t, 1, at::RestrictPtrTraits> ids,
|
||||
at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
||||
compactedVoxelArray,
|
||||
at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits> numVertsScanned,
|
||||
at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
|
||||
numVertsScanned,
|
||||
const uint activeVoxels,
|
||||
const at::PackedTensorAccessor32<float, 3, at::RestrictPtrTraits> vol,
|
||||
const at::PackedTensorAccessor32<int, 2, at::RestrictPtrTraits> faceTable,
|
||||
@@ -471,7 +472,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
auto d_voxelOccupiedScan_ = d_voxelOccupiedScan.index({Slice(1, None)});
|
||||
|
||||
// number of active voxels
|
||||
int activeVoxels = d_voxelOccupiedScan[numVoxels].cpu().item<int>();
|
||||
int64_t activeVoxels = d_voxelOccupiedScan[numVoxels].cpu().item<int64_t>();
|
||||
|
||||
const int device_id = vol.device().index();
|
||||
auto opt = at::TensorOptions().dtype(at::kInt).device(at::kCUDA, device_id);
|
||||
@@ -492,7 +493,8 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
CompactVoxelsKernel<<<grid, threads, 0, stream>>>(
|
||||
d_compVoxelArray.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupied.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupiedScan_.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupiedScan_
|
||||
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
|
||||
numVoxels);
|
||||
AT_CUDA_CHECK(cudaGetLastError());
|
||||
cudaDeviceSynchronize();
|
||||
@@ -502,7 +504,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
auto d_voxelVertsScan_ = d_voxelVertsScan.index({Slice(1, None)});
|
||||
|
||||
// total number of vertices
|
||||
int totalVerts = d_voxelVertsScan[numVoxels].cpu().item<int>();
|
||||
int64_t totalVerts = d_voxelVertsScan[numVoxels].cpu().item<int64_t>();
|
||||
|
||||
// Execute "GenerateFacesKernel" kernel
|
||||
// This runs only on the occupied voxels.
|
||||
@@ -522,7 +524,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
faces.packed_accessor<int64_t, 2, at::RestrictPtrTraits>(),
|
||||
ids.packed_accessor<int64_t, 1, at::RestrictPtrTraits>(),
|
||||
d_compVoxelArray.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelVertsScan_.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelVertsScan_.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
|
||||
activeVoxels,
|
||||
vol.packed_accessor32<float, 3, at::RestrictPtrTraits>(),
|
||||
faceTable.packed_accessor32<int, 2, at::RestrictPtrTraits>(),
|
||||
|
||||
@@ -71,8 +71,8 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCpu(
|
||||
if ((j + 1) % 3 == 0 && ps[0] != ps[1] && ps[1] != ps[2] &&
|
||||
ps[2] != ps[0]) {
|
||||
for (int k = 0; k < 3; k++) {
|
||||
int v = tri[k];
|
||||
edge_id_to_v[tri.at(k)] = ps.at(k);
|
||||
int64_t v = tri.at(k);
|
||||
edge_id_to_v[v] = ps.at(k);
|
||||
if (!uniq_edge_id.count(v)) {
|
||||
uniq_edge_id[v] = verts.size();
|
||||
verts.push_back(edge_id_to_v[v]);
|
||||
|
||||
@@ -30,11 +30,18 @@
|
||||
#define GLOBAL __global__
|
||||
#define RESTRICT __restrict__
|
||||
#define DEBUGBREAK()
|
||||
#ifdef __NVCC_DIAG_PRAGMA_SUPPORT__
|
||||
#pragma nv_diag_suppress 1866
|
||||
#pragma nv_diag_suppress 2941
|
||||
#pragma nv_diag_suppress 2951
|
||||
#pragma nv_diag_suppress 2967
|
||||
#else
|
||||
#pragma diag_suppress = attribute_not_allowed
|
||||
#pragma diag_suppress = 1866
|
||||
#pragma diag_suppress = 2941
|
||||
#pragma diag_suppress = 2951
|
||||
#pragma diag_suppress = 2967
|
||||
#endif
|
||||
#else // __CUDACC__
|
||||
#define INLINE inline
|
||||
#define HOST
|
||||
@@ -49,6 +56,7 @@
|
||||
#pragma clang diagnostic pop
|
||||
#ifdef WITH_CUDA
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#include <vector_functions.h>
|
||||
#else
|
||||
#ifndef cudaStream_t
|
||||
typedef void* cudaStream_t;
|
||||
@@ -65,8 +73,6 @@ struct float2 {
|
||||
struct float3 {
|
||||
float x, y, z;
|
||||
};
|
||||
#endif
|
||||
namespace py = pybind11;
|
||||
inline float3 make_float3(const float& x, const float& y, const float& z) {
|
||||
float3 res;
|
||||
res.x = x;
|
||||
@@ -74,6 +80,8 @@ inline float3 make_float3(const float& x, const float& y, const float& z) {
|
||||
res.z = z;
|
||||
return res;
|
||||
}
|
||||
#endif
|
||||
namespace py = pybind11;
|
||||
|
||||
inline bool operator==(const float3& a, const float3& b) {
|
||||
return a.x == b.x && a.y == b.y && a.z == b.z;
|
||||
|
||||
@@ -34,11 +34,7 @@ def _minify(basedir, path_manager, factors=(), resolutions=()):
|
||||
|
||||
imgdir = os.path.join(basedir, "images")
|
||||
imgs = [os.path.join(imgdir, f) for f in sorted(_ls(path_manager, imgdir))]
|
||||
imgs = [
|
||||
f
|
||||
for f in imgs
|
||||
if any([f.endswith(ex) for ex in ["JPG", "jpg", "png", "jpeg", "PNG"]])
|
||||
]
|
||||
imgs = [f for f in imgs if f.endswith("JPG", "jpg", "png", "jpeg", "PNG")]
|
||||
imgdir_orig = imgdir
|
||||
|
||||
wd = os.getcwd()
|
||||
|
||||
@@ -200,7 +200,7 @@ def resize_image(
|
||||
mode: str = "bilinear",
|
||||
) -> Tuple[torch.Tensor, float, torch.Tensor]:
|
||||
|
||||
if type(image) == np.ndarray:
|
||||
if isinstance(image, np.ndarray):
|
||||
image = torch.from_numpy(image)
|
||||
|
||||
if image_height is None or image_width is None:
|
||||
|
||||
@@ -89,9 +89,8 @@ def get_implicitron_sequence_pointcloud(
|
||||
frame_data.image_rgb,
|
||||
frame_data.depth_map,
|
||||
(cast(torch.Tensor, frame_data.fg_probability) > 0.5).float()
|
||||
if frame_data.fg_probability is not None
|
||||
if mask_points and frame_data.fg_probability is not None
|
||||
else None,
|
||||
mask_points=mask_points,
|
||||
)
|
||||
|
||||
return point_cloud, frame_data
|
||||
|
||||
@@ -98,6 +98,13 @@ def save_model(model, stats, fl, optimizer=None, cfg=None):
|
||||
return flstats, flmodel, flopt
|
||||
|
||||
|
||||
def save_stats(stats, fl, cfg=None):
|
||||
flstats = get_stats_path(fl)
|
||||
logger.info("saving model stats to %s" % flstats)
|
||||
stats.save(flstats)
|
||||
return flstats
|
||||
|
||||
|
||||
def load_model(fl, map_location: Optional[dict]):
|
||||
flstats = get_stats_path(fl)
|
||||
flmodel = get_model_path(fl)
|
||||
|
||||
@@ -26,7 +26,7 @@ def get_rgbd_point_cloud(
|
||||
depth_map: torch.Tensor,
|
||||
mask: Optional[torch.Tensor] = None,
|
||||
mask_thr: float = 0.5,
|
||||
mask_points: bool = True,
|
||||
*,
|
||||
euclidean: bool = False,
|
||||
) -> Pointclouds:
|
||||
"""
|
||||
@@ -80,7 +80,9 @@ def get_rgbd_point_cloud(
|
||||
mode="bilinear",
|
||||
align_corners=False,
|
||||
)
|
||||
pts_colors = pts_colors.permute(0, 2, 3, 1).reshape(-1, 3)[pts_mask]
|
||||
pts_colors = pts_colors.permute(0, 2, 3, 1).reshape(-1, image_rgb.shape[1])[
|
||||
pts_mask
|
||||
]
|
||||
|
||||
return Pointclouds(points=pts_3d[None], features=pts_colors[None])
|
||||
|
||||
|
||||
@@ -112,6 +112,7 @@ class VideoWriter:
|
||||
resize = im.size
|
||||
# make sure size is divisible by 2
|
||||
resize = tuple([resize[i] + resize[i] % 2 for i in (0, 1)])
|
||||
# pyre-fixme[16]: Module `Image` has no attribute `ANTIALIAS`.
|
||||
im = im.resize(resize, Image.ANTIALIAS)
|
||||
im.save(outfile)
|
||||
|
||||
|
||||
@@ -750,7 +750,7 @@ def save_obj(
|
||||
if path_manager is None:
|
||||
path_manager = PathManager()
|
||||
|
||||
save_texture = all([t is not None for t in [faces_uvs, verts_uvs, texture_map]])
|
||||
save_texture = all(t is not None for t in [faces_uvs, verts_uvs, texture_map])
|
||||
output_path = Path(f)
|
||||
|
||||
# Save the .obj file
|
||||
|
||||
@@ -10,6 +10,7 @@ This module implements utility functions for loading and saving
|
||||
meshes and point clouds as PLY files.
|
||||
"""
|
||||
import itertools
|
||||
import os
|
||||
import struct
|
||||
import sys
|
||||
import warnings
|
||||
@@ -21,8 +22,14 @@ from typing import List, Optional, Tuple
|
||||
import numpy as np
|
||||
import torch
|
||||
from iopath.common.file_io import PathManager
|
||||
from pytorch3d.io.utils import _check_faces_indices, _make_tensor, _open_file, PathOrStr
|
||||
from pytorch3d.renderer import TexturesVertex
|
||||
from pytorch3d.io.utils import (
|
||||
_check_faces_indices,
|
||||
_make_tensor,
|
||||
_open_file,
|
||||
_read_image,
|
||||
PathOrStr,
|
||||
)
|
||||
from pytorch3d.renderer import TexturesUV, TexturesVertex
|
||||
from pytorch3d.structures import Meshes, Pointclouds
|
||||
|
||||
from .pluggable_formats import (
|
||||
@@ -804,6 +811,7 @@ class _VertsColumnIndices:
|
||||
color_idxs: Optional[List[int]]
|
||||
color_scale: float
|
||||
normal_idxs: Optional[List[int]]
|
||||
texture_uv_idxs: Optional[List[int]]
|
||||
|
||||
|
||||
def _get_verts_column_indices(
|
||||
@@ -827,6 +835,8 @@ def _get_verts_column_indices(
|
||||
property uchar red
|
||||
property uchar green
|
||||
property uchar blue
|
||||
property double texture_u
|
||||
property double texture_v
|
||||
|
||||
then the return value will be ([0,1,2], [6,7,8], 1.0/255, [3,4,5])
|
||||
|
||||
@@ -839,6 +849,7 @@ def _get_verts_column_indices(
|
||||
point_idxs: List[Optional[int]] = [None, None, None]
|
||||
color_idxs: List[Optional[int]] = [None, None, None]
|
||||
normal_idxs: List[Optional[int]] = [None, None, None]
|
||||
texture_uv_idxs: List[Optional[int]] = [None, None]
|
||||
for i, prop in enumerate(vertex_head.properties):
|
||||
if prop.list_size_type is not None:
|
||||
raise ValueError("Invalid vertices in file: did not expect list.")
|
||||
@@ -851,6 +862,9 @@ def _get_verts_column_indices(
|
||||
for j, name in enumerate(["nx", "ny", "nz"]):
|
||||
if prop.name == name:
|
||||
normal_idxs[j] = i
|
||||
for j, name in enumerate(["texture_u", "texture_v"]):
|
||||
if prop.name == name:
|
||||
texture_uv_idxs[j] = i
|
||||
if None in point_idxs:
|
||||
raise ValueError("Invalid vertices in file.")
|
||||
color_scale = 1.0
|
||||
@@ -864,6 +878,7 @@ def _get_verts_column_indices(
|
||||
color_idxs=None if None in color_idxs else color_idxs,
|
||||
color_scale=color_scale,
|
||||
normal_idxs=None if None in normal_idxs else normal_idxs,
|
||||
texture_uv_idxs=None if None in texture_uv_idxs else texture_uv_idxs,
|
||||
)
|
||||
|
||||
|
||||
@@ -880,6 +895,7 @@ class _VertsData:
|
||||
verts: torch.Tensor
|
||||
verts_colors: Optional[torch.Tensor] = None
|
||||
verts_normals: Optional[torch.Tensor] = None
|
||||
verts_texture_uvs: Optional[torch.Tensor] = None
|
||||
|
||||
|
||||
def _get_verts(header: _PlyHeader, elements: dict) -> _VertsData:
|
||||
@@ -922,6 +938,7 @@ def _get_verts(header: _PlyHeader, elements: dict) -> _VertsData:
|
||||
|
||||
vertex_colors = None
|
||||
vertex_normals = None
|
||||
vertex_texture_uvs = None
|
||||
|
||||
if len(vertex) == 1:
|
||||
# This is the case where the whole vertex element has one type,
|
||||
@@ -935,6 +952,10 @@ def _get_verts(header: _PlyHeader, elements: dict) -> _VertsData:
|
||||
vertex_normals = torch.tensor(
|
||||
vertex[0][:, column_idxs.normal_idxs], dtype=torch.float32
|
||||
)
|
||||
if column_idxs.texture_uv_idxs is not None:
|
||||
vertex_texture_uvs = torch.tensor(
|
||||
vertex[0][:, column_idxs.texture_uv_idxs], dtype=torch.float32
|
||||
)
|
||||
else:
|
||||
# The vertex element is heterogeneous. It was read as several arrays,
|
||||
# part by part, where a part is a set of properties with the same type.
|
||||
@@ -973,11 +994,19 @@ def _get_verts(header: _PlyHeader, elements: dict) -> _VertsData:
|
||||
for axis in range(3):
|
||||
partnum, col = prop_to_partnum_col[column_idxs.normal_idxs[axis]]
|
||||
vertex_normals.numpy()[:, axis] = vertex[partnum][:, col]
|
||||
|
||||
if column_idxs.texture_uv_idxs is not None:
|
||||
vertex_texture_uvs = torch.empty(
|
||||
size=(vertex_head.count, 2),
|
||||
dtype=torch.float32,
|
||||
)
|
||||
for axis in range(2):
|
||||
partnum, col = prop_to_partnum_col[column_idxs.texture_uv_idxs[axis]]
|
||||
vertex_texture_uvs.numpy()[:, axis] = vertex[partnum][:, col]
|
||||
return _VertsData(
|
||||
verts=verts,
|
||||
verts_colors=vertex_colors,
|
||||
verts_normals=vertex_normals,
|
||||
verts_texture_uvs=vertex_texture_uvs,
|
||||
)
|
||||
|
||||
|
||||
@@ -998,6 +1027,7 @@ class _PlyData:
|
||||
faces: Optional[torch.Tensor]
|
||||
verts_colors: Optional[torch.Tensor]
|
||||
verts_normals: Optional[torch.Tensor]
|
||||
verts_texture_uvs: Optional[torch.Tensor]
|
||||
|
||||
|
||||
def _load_ply(f, *, path_manager: PathManager) -> _PlyData:
|
||||
@@ -1358,8 +1388,27 @@ class MeshPlyFormat(MeshFormatInterpreter):
|
||||
faces = torch.zeros(0, 3, dtype=torch.int64)
|
||||
|
||||
texture = None
|
||||
if include_textures and data.verts_colors is not None:
|
||||
texture = TexturesVertex([data.verts_colors.to(device)])
|
||||
if include_textures:
|
||||
if data.verts_colors is not None:
|
||||
texture = TexturesVertex([data.verts_colors.to(device)])
|
||||
elif data.verts_texture_uvs is not None:
|
||||
texture_file_path = None
|
||||
for comment in data.header.comments:
|
||||
if "TextureFile" in comment:
|
||||
given_texture_file = comment.split(" ")[-1]
|
||||
texture_file_path = os.path.join(
|
||||
os.path.dirname(str(path)), given_texture_file
|
||||
)
|
||||
if texture_file_path is not None:
|
||||
texture_map = _read_image(
|
||||
texture_file_path, path_manager, format="RGB"
|
||||
)
|
||||
texture_map = torch.tensor(texture_map, dtype=torch.float32) / 255.0
|
||||
texture = TexturesUV(
|
||||
[texture_map.to(device)],
|
||||
[faces.to(device)],
|
||||
[data.verts_texture_uvs.to(device)],
|
||||
)
|
||||
|
||||
verts_normals = None
|
||||
if data.verts_normals is not None:
|
||||
|
||||
@@ -5,9 +5,13 @@
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from pytorch3d.common.compat import meshgrid_ij
|
||||
|
||||
from pytorch3d.structures import Meshes
|
||||
|
||||
|
||||
@@ -50,7 +54,14 @@ def ravel_index(idx, dims) -> torch.Tensor:
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def cubify(voxels, thresh, device=None, align: str = "topleft") -> Meshes:
|
||||
def cubify(
|
||||
voxels: torch.Tensor,
|
||||
thresh: float,
|
||||
*,
|
||||
feats: Optional[torch.Tensor] = None,
|
||||
device=None,
|
||||
align: str = "topleft"
|
||||
) -> Meshes:
|
||||
r"""
|
||||
Converts a voxel to a mesh by replacing each occupied voxel with a cube
|
||||
consisting of 12 faces and 8 vertices. Shared vertices are merged, and
|
||||
@@ -59,6 +70,9 @@ def cubify(voxels, thresh, device=None, align: str = "topleft") -> Meshes:
|
||||
voxels: A FloatTensor of shape (N, D, H, W) containing occupancy probabilities.
|
||||
thresh: A scalar threshold. If a voxel occupancy is larger than
|
||||
thresh, the voxel is considered occupied.
|
||||
feats: A FloatTensor of shape (N, K, D, H, W) containing the color information
|
||||
of each voxel. K is the number of channels. This is supported only when
|
||||
align == "center"
|
||||
device: The device of the output meshes
|
||||
align: Defines the alignment of the mesh vertices and the grid locations.
|
||||
Has to be one of {"topleft", "corner", "center"}. See below for explanation.
|
||||
@@ -177,6 +191,7 @@ def cubify(voxels, thresh, device=None, align: str = "topleft") -> Meshes:
|
||||
# boolean to linear index
|
||||
# NF x 2
|
||||
linind = torch.nonzero(faces_idx, as_tuple=False)
|
||||
|
||||
# NF x 4
|
||||
nyxz = unravel_index(linind[:, 0], (N, H, W, D))
|
||||
|
||||
@@ -238,6 +253,21 @@ def cubify(voxels, thresh, device=None, align: str = "topleft") -> Meshes:
|
||||
grid_verts.index_select(0, (idleverts[n] == 0).nonzero(as_tuple=False)[:, 0])
|
||||
for n in range(N)
|
||||
]
|
||||
faces_list = [nface - idlenum[n][nface] for n, nface in enumerate(faces_list)]
|
||||
|
||||
return Meshes(verts=verts_list, faces=faces_list)
|
||||
textures_list = None
|
||||
if feats is not None and align == "center":
|
||||
# We return a TexturesAtlas containing one color for each face
|
||||
# N x K x D x H x W -> N x H x W x D x K
|
||||
feats = feats.permute(0, 3, 4, 2, 1)
|
||||
|
||||
# (NHWD) x K
|
||||
feats = feats.reshape(-1, feats.size(4))
|
||||
feats = torch.index_select(feats, 0, linind[:, 0])
|
||||
feats = feats.reshape(-1, 1, 1, feats.size(1))
|
||||
feats_list = list(torch.split(feats, split_size.tolist(), 0))
|
||||
from pytorch3d.renderer.mesh.textures import TexturesAtlas
|
||||
|
||||
textures_list = TexturesAtlas(feats_list)
|
||||
|
||||
faces_list = [nface - idlenum[n][nface] for n, nface in enumerate(faces_list)]
|
||||
return Meshes(verts=verts_list, faces=faces_list, textures=textures_list)
|
||||
|
||||
@@ -291,6 +291,7 @@ def add_pointclouds_to_volumes(
|
||||
mask=mask,
|
||||
mode=mode,
|
||||
rescale_features=rescale_features,
|
||||
align_corners=initial_volumes.get_align_corners(),
|
||||
_python=_python,
|
||||
)
|
||||
|
||||
@@ -310,6 +311,7 @@ def add_points_features_to_volume_densities_features(
|
||||
grid_sizes: Optional[torch.LongTensor] = None,
|
||||
rescale_features: bool = True,
|
||||
_python: bool = False,
|
||||
align_corners: bool = True,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Convert a batch of point clouds represented with tensors of per-point
|
||||
@@ -356,6 +358,7 @@ def add_points_features_to_volume_densities_features(
|
||||
output densities are just summed without rescaling, so
|
||||
you may need to rescale them afterwards.
|
||||
_python: Set to True to use a pure Python implementation.
|
||||
align_corners: as for grid_sample.
|
||||
Returns:
|
||||
volume_features: Output volume of shape `(minibatch, feature_dim, D, H, W)`
|
||||
volume_densities: Occupancy volume of shape `(minibatch, 1, D, H, W)`
|
||||
@@ -409,7 +412,7 @@ def add_points_features_to_volume_densities_features(
|
||||
grid_sizes,
|
||||
1.0, # point_weight
|
||||
mask,
|
||||
True, # align_corners
|
||||
align_corners, # align_corners
|
||||
splat,
|
||||
)
|
||||
|
||||
|
||||
@@ -153,6 +153,7 @@ def sample_farthest_points_naive(
|
||||
)
|
||||
|
||||
# Select a random point index and save it as the starting point
|
||||
# pyre-fixme[6]: For 2nd argument expected `int` but got `Tensor`.
|
||||
selected_idx = randint(0, lengths[n] - 1) if random_start_point else 0
|
||||
sample_idx_batch[0] = selected_idx
|
||||
|
||||
|
||||
@@ -382,9 +382,9 @@ class VolumeSampler(torch.nn.Module):
|
||||
rays_densities = torch.nn.functional.grid_sample(
|
||||
volumes_densities,
|
||||
rays_points_local_flat,
|
||||
align_corners=True,
|
||||
mode=self._sample_mode,
|
||||
padding_mode=self._padding_mode,
|
||||
align_corners=self._volumes.get_align_corners(),
|
||||
)
|
||||
|
||||
# permute the dimensions & reshape densities after sampling
|
||||
@@ -400,9 +400,9 @@ class VolumeSampler(torch.nn.Module):
|
||||
rays_features = torch.nn.functional.grid_sample(
|
||||
volumes_features,
|
||||
rays_points_local_flat,
|
||||
align_corners=True,
|
||||
mode=self._sample_mode,
|
||||
padding_mode=self._padding_mode,
|
||||
align_corners=self._volumes.get_align_corners(),
|
||||
)
|
||||
|
||||
# permute the dimensions & reshape features after sampling
|
||||
|
||||
@@ -549,6 +549,33 @@ class TexturesAtlas(TexturesBase):
|
||||
|
||||
return texels
|
||||
|
||||
def submeshes(
|
||||
self,
|
||||
vertex_ids_list: List[List[torch.LongTensor]],
|
||||
faces_ids_list: List[List[torch.LongTensor]],
|
||||
) -> "TexturesAtlas":
|
||||
"""
|
||||
Extract a sub-texture for use in a submesh.
|
||||
|
||||
If the meshes batch corresponding to this TextureAtlas contains
|
||||
`n = len(faces_ids_list)` meshes, then self.atlas_list()
|
||||
will be of length n. After submeshing, we obtain a batch of
|
||||
`k = sum(len(v) for v in atlas_list` submeshes (see Meshes.submeshes). This
|
||||
function creates a corresponding TexturesAtlas object with `atlas_list`
|
||||
of length `k`.
|
||||
"""
|
||||
if len(faces_ids_list) != len(self.atlas_list()):
|
||||
raise IndexError(
|
||||
"faces_ids_list must be of " "the same length as atlas_list."
|
||||
)
|
||||
|
||||
sub_features = []
|
||||
for atlas, faces_ids in zip(self.atlas_list(), faces_ids_list):
|
||||
for faces_ids_submesh in faces_ids:
|
||||
sub_features.append(atlas[faces_ids_submesh])
|
||||
|
||||
return self.__class__(sub_features)
|
||||
|
||||
def faces_verts_textures_packed(self) -> torch.Tensor:
|
||||
"""
|
||||
Samples texture from each vertex for each face in the mesh.
|
||||
@@ -995,9 +1022,13 @@ class TexturesUV(TexturesBase):
|
||||
# is the left-top pixel of input, and values x = 1, y = 1 is the
|
||||
# right-bottom pixel of input.
|
||||
|
||||
pixel_uvs = pixel_uvs * 2.0 - 1.0
|
||||
# map to a range of [-1, 1] and flip the y axis
|
||||
pixel_uvs = torch.lerp(
|
||||
pixel_uvs.new_tensor([-1.0, 1.0]),
|
||||
pixel_uvs.new_tensor([1.0, -1.0]),
|
||||
pixel_uvs,
|
||||
)
|
||||
|
||||
texture_maps = torch.flip(texture_maps, [2]) # flip y axis of the texture map
|
||||
if texture_maps.device != pixel_uvs.device:
|
||||
texture_maps = texture_maps.to(pixel_uvs.device)
|
||||
texels = F.grid_sample(
|
||||
@@ -1035,8 +1066,12 @@ class TexturesUV(TexturesBase):
|
||||
texture_maps = self.maps_padded() # NxHxWxC
|
||||
texture_maps = texture_maps.permute(0, 3, 1, 2) # NxCxHxW
|
||||
|
||||
faces_verts_uvs = faces_verts_uvs * 2.0 - 1.0
|
||||
texture_maps = torch.flip(texture_maps, [2]) # flip y axis of the texture map
|
||||
# map to a range of [-1, 1] and flip the y axis
|
||||
faces_verts_uvs = torch.lerp(
|
||||
faces_verts_uvs.new_tensor([-1.0, 1.0]),
|
||||
faces_verts_uvs.new_tensor([1.0, -1.0]),
|
||||
faces_verts_uvs,
|
||||
)
|
||||
|
||||
textures = F.grid_sample(
|
||||
texture_maps,
|
||||
@@ -1324,6 +1359,60 @@ class TexturesUV(TexturesBase):
|
||||
self.verts_uvs_padded().shape[0] == batch_size
|
||||
)
|
||||
|
||||
def submeshes(
|
||||
self,
|
||||
vertex_ids_list: List[List[torch.LongTensor]],
|
||||
faces_ids_list: List[List[torch.LongTensor]],
|
||||
) -> "TexturesUV":
|
||||
"""
|
||||
Extract a sub-texture for use in a submesh.
|
||||
|
||||
If the meshes batch corresponding to this TexturesUV contains
|
||||
`n = len(faces_ids_list)` meshes, then self.faces_uvs_padded()
|
||||
will be of length n. After submeshing, we obtain a batch of
|
||||
`k = sum(len(f) for f in faces_ids_list` submeshes (see Meshes.submeshes). This
|
||||
function creates a corresponding TexturesUV object with `faces_uvs_padded`
|
||||
of length `k`.
|
||||
|
||||
Args:
|
||||
vertex_ids_list: Not used when submeshing TexturesUV.
|
||||
|
||||
face_ids_list: A list of length equal to self.faces_uvs_padded. Each
|
||||
element is a LongTensor listing the face ids that the submesh keeps in
|
||||
each respective mesh.
|
||||
|
||||
|
||||
Returns:
|
||||
A "TexturesUV in which faces_uvs_padded, verts_uvs_padded, and maps_padded
|
||||
have length sum(len(faces) for faces in faces_ids_list)
|
||||
"""
|
||||
|
||||
if len(faces_ids_list) != len(self.faces_uvs_padded()):
|
||||
raise IndexError(
|
||||
"faces_uvs_padded must be of " "the same length as face_ids_list."
|
||||
)
|
||||
|
||||
sub_faces_uvs, sub_verts_uvs, sub_maps = [], [], []
|
||||
for faces_ids, faces_uvs, verts_uvs, map_ in zip(
|
||||
faces_ids_list,
|
||||
self.faces_uvs_padded(),
|
||||
self.verts_uvs_padded(),
|
||||
self.maps_padded(),
|
||||
):
|
||||
for faces_ids_submesh in faces_ids:
|
||||
sub_faces_uvs.append(faces_uvs[faces_ids_submesh])
|
||||
sub_verts_uvs.append(verts_uvs)
|
||||
sub_maps.append(map_)
|
||||
|
||||
return self.__class__(
|
||||
sub_maps,
|
||||
sub_faces_uvs,
|
||||
sub_verts_uvs,
|
||||
self.padding_mode,
|
||||
self.align_corners,
|
||||
self.sampling_mode,
|
||||
)
|
||||
|
||||
|
||||
class TexturesVertex(TexturesBase):
|
||||
def __init__(
|
||||
|
||||
@@ -16,8 +16,30 @@ from pytorch3d.structures import Pointclouds
|
||||
from .rasterize_points import rasterize_points
|
||||
|
||||
|
||||
# Class to store the outputs of point rasterization
|
||||
class PointFragments(NamedTuple):
|
||||
"""
|
||||
Class to store the outputs of point rasterization
|
||||
|
||||
Members:
|
||||
idx: int32 Tensor of shape (N, image_size, image_size, points_per_pixel)
|
||||
giving the indices of the nearest points at each pixel, in ascending
|
||||
z-order. Concretely `idx[n, y, x, k] = p` means that `points[p]` is the kth
|
||||
closest point (along the z-direction) to pixel (y, x) - note that points
|
||||
represents the packed points of shape (P, 3).
|
||||
Pixels that are hit by fewer than points_per_pixel are padded with -1.
|
||||
zbuf: Tensor of shape (N, image_size, image_size, points_per_pixel)
|
||||
giving the z-coordinates of the nearest points at each pixel, sorted in
|
||||
z-order. Concretely, if `idx[n, y, x, k] = p` then
|
||||
`zbuf[n, y, x, k] = points[n, p, 2]`. Pixels hit by fewer than
|
||||
points_per_pixel are padded with -1.
|
||||
dists: Tensor of shape (N, image_size, image_size, points_per_pixel)
|
||||
giving the squared Euclidean distance (in NDC units) in the x/y plane
|
||||
for each point closest to the pixel. Concretely if `idx[n, y, x, k] = p`
|
||||
then `dists[n, y, x, k]` is the squared distance between the pixel (y, x)
|
||||
and the point `(points[n, p, 0], points[n, p, 1])`. Pixels hit with fewer
|
||||
than points_per_pixel are padded with -1.
|
||||
"""
|
||||
|
||||
idx: torch.Tensor
|
||||
zbuf: torch.Tensor
|
||||
dists: torch.Tensor
|
||||
|
||||
@@ -30,6 +30,12 @@ class PointsRenderer(nn.Module):
|
||||
A class for rendering a batch of points. The class should
|
||||
be initialized with a rasterizer and compositor class which each have a forward
|
||||
function.
|
||||
|
||||
The points are rendered with with varying alpha (weights) values depending on
|
||||
the distance of the pixel center to the true point in the xy plane. The purpose
|
||||
of this is to soften the hard decision boundary, for differentiability.
|
||||
See Section 3.2 of "SynSin: End-to-end View Synthesis from a Single Image"
|
||||
(https://arxiv.org/pdf/1912.08804.pdf) for more details.
|
||||
"""
|
||||
|
||||
def __init__(self, rasterizer, compositor) -> None:
|
||||
|
||||
@@ -453,6 +453,6 @@ def parse_image_size(
|
||||
raise ValueError("Image size can only be a tuple/list of (H, W)")
|
||||
if not all(i > 0 for i in image_size):
|
||||
raise ValueError("Image sizes must be greater than 0; got %d, %d" % image_size)
|
||||
if not all(type(i) == int for i in image_size):
|
||||
if not all(isinstance(i, int) for i in image_size):
|
||||
raise ValueError("Image sizes must be integers; got %f, %f" % image_size)
|
||||
return tuple(image_size)
|
||||
|
||||
@@ -1576,8 +1576,6 @@ class Meshes:
|
||||
Returns:
|
||||
Meshes object of length `sum(len(ids) for ids in face_indices)`.
|
||||
|
||||
Submeshing only works with no textures or with the TexturesVertex texture.
|
||||
|
||||
Example 1:
|
||||
|
||||
If `meshes` has batch size 1, and `face_indices` is a 1D LongTensor,
|
||||
@@ -1616,16 +1614,13 @@ class Meshes:
|
||||
sub_verts = []
|
||||
sub_verts_ids = []
|
||||
sub_faces = []
|
||||
sub_face_ids = []
|
||||
|
||||
for face_ids_per_mesh, faces, verts in zip(
|
||||
face_indices, self.faces_list(), self.verts_list()
|
||||
):
|
||||
sub_verts_ids.append([])
|
||||
sub_face_ids.append([])
|
||||
for submesh_face_ids in face_ids_per_mesh:
|
||||
faces_to_keep = faces[submesh_face_ids]
|
||||
sub_face_ids[-1].append(faces_to_keep)
|
||||
|
||||
# Say we are keeping two faces from a mesh with six vertices:
|
||||
# faces_to_keep = [[0, 6, 4],
|
||||
@@ -1652,7 +1647,7 @@ class Meshes:
|
||||
verts=sub_verts,
|
||||
faces=sub_faces,
|
||||
textures=(
|
||||
self.textures.submeshes(sub_verts_ids, sub_face_ids)
|
||||
self.textures.submeshes(sub_verts_ids, face_indices)
|
||||
if self.textures
|
||||
else None
|
||||
),
|
||||
@@ -1698,7 +1693,7 @@ def join_meshes_as_batch(meshes: List[Meshes], include_textures: bool = True) ->
|
||||
# Now we know there are multiple meshes and they have textures to merge.
|
||||
all_textures = [mesh.textures for mesh in meshes]
|
||||
first = all_textures[0]
|
||||
tex_types_same = all(type(tex) == type(first) for tex in all_textures)
|
||||
tex_types_same = all(type(tex) == type(first) for tex in all_textures) # noqa: E721
|
||||
|
||||
if not tex_types_same:
|
||||
raise ValueError("All meshes in the batch must have the same type of texture.")
|
||||
|
||||
@@ -85,7 +85,7 @@ class Volumes:
|
||||
are linearly interpolated over the spatial dimensions of the volume.
|
||||
- Note that the convention is the same as for the 5D version of the
|
||||
`torch.nn.functional.grid_sample` function called with
|
||||
`align_corners==True`.
|
||||
the same value of `align_corners` argument.
|
||||
- Note that the local coordinate convention of `Volumes`
|
||||
(+X = left to right, +Y = top to bottom, +Z = away from the user)
|
||||
is *different* from the world coordinate convention of the
|
||||
@@ -143,7 +143,7 @@ class Volumes:
|
||||
torch.nn.functional.grid_sample(
|
||||
v.densities(),
|
||||
v.get_coord_grid(world_coordinates=False),
|
||||
align_corners=True,
|
||||
align_corners=align_corners,
|
||||
) == v.densities(),
|
||||
|
||||
i.e. sampling the volume at trivial local coordinates
|
||||
@@ -157,6 +157,7 @@ class Volumes:
|
||||
features: Optional[_TensorBatch] = None,
|
||||
voxel_size: _VoxelSize = 1.0,
|
||||
volume_translation: _Translation = (0.0, 0.0, 0.0),
|
||||
align_corners: bool = True,
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
@@ -186,6 +187,10 @@ class Volumes:
|
||||
b) a Tensor of shape (3,)
|
||||
c) a Tensor of shape (minibatch, 3)
|
||||
d) a Tensor of shape (1,) (square voxels)
|
||||
**align_corners**: If set (default), the coordinates of the corner voxels are
|
||||
exactly −1 or +1 in the local coordinate system. Otherwise, the coordinates
|
||||
correspond to the centers of the corner voxels. Cf. the namesake argument to
|
||||
`torch.nn.functional.grid_sample`.
|
||||
"""
|
||||
|
||||
# handle densities
|
||||
@@ -206,6 +211,7 @@ class Volumes:
|
||||
voxel_size=voxel_size,
|
||||
volume_translation=volume_translation,
|
||||
device=self.device,
|
||||
align_corners=align_corners,
|
||||
)
|
||||
|
||||
# handle features
|
||||
@@ -336,6 +342,13 @@ class Volumes:
|
||||
return None
|
||||
return self._features_densities_list(features_)
|
||||
|
||||
def get_align_corners(self) -> bool:
|
||||
"""
|
||||
Return whether the corners of the voxels should be aligned with the
|
||||
image pixels.
|
||||
"""
|
||||
return self.locator._align_corners
|
||||
|
||||
def _features_densities_list(self, x: torch.Tensor) -> List[torch.Tensor]:
|
||||
"""
|
||||
Retrieve the list representation of features/densities.
|
||||
@@ -576,7 +589,7 @@ class VolumeLocator:
|
||||
are linearly interpolated over the spatial dimensions of the volume.
|
||||
- Note that the convention is the same as for the 5D version of the
|
||||
`torch.nn.functional.grid_sample` function called with
|
||||
`align_corners==True`.
|
||||
the same value of `align_corners` argument.
|
||||
- Note that the local coordinate convention of `VolumeLocator`
|
||||
(+X = left to right, +Y = top to bottom, +Z = away from the user)
|
||||
is *different* from the world coordinate convention of the
|
||||
@@ -634,7 +647,7 @@ class VolumeLocator:
|
||||
torch.nn.functional.grid_sample(
|
||||
v.densities(),
|
||||
v.get_coord_grid(world_coordinates=False),
|
||||
align_corners=True,
|
||||
align_corners=align_corners,
|
||||
) == v.densities(),
|
||||
|
||||
i.e. sampling the volume at trivial local coordinates
|
||||
@@ -651,6 +664,7 @@ class VolumeLocator:
|
||||
device: torch.device,
|
||||
voxel_size: _VoxelSize = 1.0,
|
||||
volume_translation: _Translation = (0.0, 0.0, 0.0),
|
||||
align_corners: bool = True,
|
||||
):
|
||||
"""
|
||||
**batch_size** : Batch size of the underlying grids
|
||||
@@ -674,15 +688,21 @@ class VolumeLocator:
|
||||
b) a Tensor of shape (3,)
|
||||
c) a Tensor of shape (minibatch, 3)
|
||||
d) a Tensor of shape (1,) (square voxels)
|
||||
**align_corners**: If set (default), the coordinates of the corner voxels are
|
||||
exactly −1 or +1 in the local coordinate system. Otherwise, the coordinates
|
||||
correspond to the centers of the corner voxels. Cf. the namesake argument to
|
||||
`torch.nn.functional.grid_sample`.
|
||||
"""
|
||||
self.device = device
|
||||
self._batch_size = batch_size
|
||||
self._grid_sizes = self._convert_grid_sizes2tensor(grid_sizes)
|
||||
self._resolution = tuple(torch.max(self._grid_sizes.cpu(), dim=0).values)
|
||||
self._align_corners = align_corners
|
||||
|
||||
# set the local_to_world transform
|
||||
self._set_local_to_world_transform(
|
||||
voxel_size=voxel_size, volume_translation=volume_translation
|
||||
voxel_size=voxel_size,
|
||||
volume_translation=volume_translation,
|
||||
)
|
||||
|
||||
def _convert_grid_sizes2tensor(
|
||||
@@ -806,8 +826,17 @@ class VolumeLocator:
|
||||
grid_sizes = self.get_grid_sizes()
|
||||
|
||||
# generate coordinate axes
|
||||
def corner_coord_adjustment(r):
|
||||
return 0.0 if self._align_corners else 1.0 / r
|
||||
|
||||
vol_axes = [
|
||||
torch.linspace(-1.0, 1.0, r, dtype=torch.float32, device=self.device)
|
||||
torch.linspace(
|
||||
-1.0 + corner_coord_adjustment(r),
|
||||
1.0 - corner_coord_adjustment(r),
|
||||
r,
|
||||
dtype=torch.float32,
|
||||
device=self.device,
|
||||
)
|
||||
for r in (de, he, wi)
|
||||
]
|
||||
|
||||
|
||||
@@ -155,10 +155,10 @@ def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:
|
||||
|
||||
# if not for numerical problems, quat_candidates[i] should be same (up to a sign),
|
||||
# forall i; we pick the best-conditioned one (with the largest denominator)
|
||||
|
||||
return quat_candidates[
|
||||
out = quat_candidates[
|
||||
F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, :
|
||||
].reshape(batch_dim + (4,))
|
||||
return standardize_quaternion(out)
|
||||
|
||||
|
||||
def _axis_angle_rotation(axis: str, angle: torch.Tensor) -> torch.Tensor:
|
||||
|
||||
@@ -8,6 +8,7 @@ import warnings
|
||||
from typing import Tuple
|
||||
|
||||
import torch
|
||||
from pytorch3d.transforms import rotation_conversions
|
||||
|
||||
from ..transforms import acos_linear_extrapolation
|
||||
|
||||
@@ -160,19 +161,10 @@ def _so3_exp_map(
|
||||
nrms = (log_rot * log_rot).sum(1)
|
||||
# phis ... rotation angles
|
||||
rot_angles = torch.clamp(nrms, eps).sqrt()
|
||||
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`.
|
||||
rot_angles_inv = 1.0 / rot_angles
|
||||
fac1 = rot_angles_inv * rot_angles.sin()
|
||||
fac2 = rot_angles_inv * rot_angles_inv * (1.0 - rot_angles.cos())
|
||||
skews = hat(log_rot)
|
||||
skews_square = torch.bmm(skews, skews)
|
||||
|
||||
R = (
|
||||
fac1[:, None, None] * skews
|
||||
# pyre-fixme[16]: `float` has no attribute `__getitem__`.
|
||||
+ fac2[:, None, None] * skews_square
|
||||
+ torch.eye(3, dtype=log_rot.dtype, device=log_rot.device)[None]
|
||||
)
|
||||
R = rotation_conversions.axis_angle_to_matrix(log_rot)
|
||||
|
||||
return R, rot_angles, skews, skews_square
|
||||
|
||||
@@ -183,49 +175,23 @@ def so3_log_map(
|
||||
"""
|
||||
Convert a batch of 3x3 rotation matrices `R`
|
||||
to a batch of 3-dimensional matrix logarithms of rotation matrices
|
||||
The conversion has a singularity around `(R=I)` which is handled
|
||||
by clamping controlled with the `eps` and `cos_bound` arguments.
|
||||
The conversion has a singularity around `(R=I)`.
|
||||
|
||||
Args:
|
||||
R: batch of rotation matrices of shape `(minibatch, 3, 3)`.
|
||||
eps: A float constant handling the conversion singularity.
|
||||
cos_bound: Clamps the cosine of the rotation angle to
|
||||
[-1 + cos_bound, 1 - cos_bound] to avoid non-finite outputs/gradients
|
||||
of the `acos` call when computing `so3_rotation_angle`.
|
||||
Note that the non-finite outputs/gradients are returned when
|
||||
the rotation angle is close to 0 or π.
|
||||
eps: (unused, for backward compatibility)
|
||||
cos_bound: (unused, for backward compatibility)
|
||||
|
||||
Returns:
|
||||
Batch of logarithms of input rotation matrices
|
||||
of shape `(minibatch, 3)`.
|
||||
|
||||
Raises:
|
||||
ValueError if `R` is of incorrect shape.
|
||||
ValueError if `R` has an unexpected trace.
|
||||
"""
|
||||
|
||||
N, dim1, dim2 = R.shape
|
||||
if dim1 != 3 or dim2 != 3:
|
||||
raise ValueError("Input has to be a batch of 3x3 Tensors.")
|
||||
|
||||
phi = so3_rotation_angle(R, cos_bound=cos_bound, eps=eps)
|
||||
|
||||
phi_sin = torch.sin(phi)
|
||||
|
||||
# We want to avoid a tiny denominator of phi_factor = phi / (2.0 * phi_sin).
|
||||
# Hence, for phi_sin.abs() <= 0.5 * eps, we approximate phi_factor with
|
||||
# 2nd order Taylor expansion: phi_factor = 0.5 + (1.0 / 12) * phi**2
|
||||
phi_factor = torch.empty_like(phi)
|
||||
ok_denom = phi_sin.abs() > (0.5 * eps)
|
||||
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
|
||||
phi_factor[~ok_denom] = 0.5 + (phi[~ok_denom] ** 2) * (1.0 / 12)
|
||||
phi_factor[ok_denom] = phi[ok_denom] / (2.0 * phi_sin[ok_denom])
|
||||
|
||||
log_rot_hat = phi_factor[:, None, None] * (R - R.permute(0, 2, 1))
|
||||
|
||||
log_rot = hat_inv(log_rot_hat)
|
||||
|
||||
return log_rot
|
||||
return rotation_conversions.matrix_to_axis_angle(R)
|
||||
|
||||
|
||||
def hat_inv(h: torch.Tensor) -> torch.Tensor:
|
||||
|
||||
@@ -440,22 +440,22 @@ class Transform3d:
|
||||
|
||||
def translate(self, *args, **kwargs) -> "Transform3d":
|
||||
return self.compose(
|
||||
Translate(device=self.device, dtype=self.dtype, *args, **kwargs)
|
||||
Translate(*args, device=self.device, dtype=self.dtype, **kwargs)
|
||||
)
|
||||
|
||||
def scale(self, *args, **kwargs) -> "Transform3d":
|
||||
return self.compose(
|
||||
Scale(device=self.device, dtype=self.dtype, *args, **kwargs)
|
||||
Scale(*args, device=self.device, dtype=self.dtype, **kwargs)
|
||||
)
|
||||
|
||||
def rotate(self, *args, **kwargs) -> "Transform3d":
|
||||
return self.compose(
|
||||
Rotate(device=self.device, dtype=self.dtype, *args, **kwargs)
|
||||
Rotate(*args, device=self.device, dtype=self.dtype, **kwargs)
|
||||
)
|
||||
|
||||
def rotate_axis_angle(self, *args, **kwargs) -> "Transform3d":
|
||||
return self.compose(
|
||||
RotateAxisAngle(device=self.device, dtype=self.dtype, *args, **kwargs)
|
||||
RotateAxisAngle(*args, device=self.device, dtype=self.dtype, **kwargs)
|
||||
)
|
||||
|
||||
def clone(self) -> "Transform3d":
|
||||
|
||||
@@ -12,7 +12,7 @@ import sys
|
||||
from os.path import dirname, isfile, join
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main() -> None:
|
||||
# pyre-ignore[16]
|
||||
if len(sys.argv) > 1:
|
||||
# Parse from flags.
|
||||
@@ -36,3 +36,7 @@ if __name__ == "__main__":
|
||||
os.environ["PYTHONPATH"] = ":".join(sys.path)
|
||||
for file_name in file_names:
|
||||
subprocess.check_call([sys.executable, file_name])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main() # pragma: no cover
|
||||
|
||||
@@ -24,6 +24,13 @@ def interactive_testing_requested() -> bool:
|
||||
return os.environ.get("PYTORCH3D_INTERACTIVE_TESTING", "") == "1"
|
||||
|
||||
|
||||
def skip_opengl_requested() -> bool:
|
||||
return os.environ.get("PYTORCH3D_NO_TEST_OPENGL", "") == "1"
|
||||
|
||||
|
||||
usesOpengl = unittest.skipIf(skip_opengl_requested(), "uses opengl")
|
||||
|
||||
|
||||
def get_tests_dir() -> Path:
|
||||
"""
|
||||
Returns Path for the directory containing this file.
|
||||
|
||||
28
tests/data/uvs.ply
Normal file
28
tests/data/uvs.ply
Normal file
@@ -0,0 +1,28 @@
|
||||
ply
|
||||
format ascii 1.0
|
||||
comment made by Greg Turk
|
||||
comment this file is a cube
|
||||
comment TextureFile test_nd_sphere.png
|
||||
element vertex 8
|
||||
property float x
|
||||
property float y
|
||||
property float z
|
||||
property float texture_u
|
||||
property float texture_v
|
||||
element face 6
|
||||
property list uchar int vertex_index
|
||||
end_header
|
||||
0 0 0 0 0
|
||||
0 0 1 0.2 0.3
|
||||
0 1 1 0.2 0.3
|
||||
0 1 0 0.2 0.3
|
||||
1 0 0 0.2 0.3
|
||||
1 0 1 0.2 0.3
|
||||
1 1 1 0.2 0.3
|
||||
1 1 0 0.4 0.5
|
||||
4 0 1 2 3
|
||||
4 7 6 5 4
|
||||
4 0 4 5 1
|
||||
4 1 5 6 2
|
||||
4 2 6 7 3
|
||||
4 3 7 4 0
|
||||
@@ -15,15 +15,14 @@ from pytorch3d.implicitron.models.utils import preprocess_input, weighted_sum_lo
|
||||
class TestUtils(unittest.TestCase):
|
||||
def test_prepare_inputs_wrong_num_dim(self):
|
||||
img = torch.randn(3, 3, 3)
|
||||
with self.assertRaises(ValueError) as context:
|
||||
text = (
|
||||
"Model received unbatched inputs. "
|
||||
+ "Perhaps they came from a FrameData which had not been collated."
|
||||
)
|
||||
with self.assertRaisesRegex(ValueError, text):
|
||||
img, fg_prob, depth_map = preprocess_input(
|
||||
img, None, None, True, True, 0.5, (0.0, 0.0, 0.0)
|
||||
)
|
||||
self.assertEqual(
|
||||
"Model received unbatched inputs. "
|
||||
+ "Perhaps they came from a FrameData which had not been collated.",
|
||||
context.exception,
|
||||
)
|
||||
|
||||
def test_prepare_inputs_mask_image_true(self):
|
||||
batch, channels, height, width = 2, 3, 10, 10
|
||||
|
||||
@@ -224,6 +224,7 @@ class TestFrameDataBuilder(TestCaseMixin, unittest.TestCase):
|
||||
|
||||
def test_load_mask(self):
|
||||
path = os.path.join(self.dataset_root, self.frame_annotation.mask.path)
|
||||
path = self.path_manager.get_local_path(path)
|
||||
mask = load_mask(path)
|
||||
self.assertEqual(mask.dtype, np.float32)
|
||||
self.assertLessEqual(np.max(mask), 1.0)
|
||||
@@ -231,12 +232,14 @@ class TestFrameDataBuilder(TestCaseMixin, unittest.TestCase):
|
||||
|
||||
def test_load_depth(self):
|
||||
path = os.path.join(self.dataset_root, self.frame_annotation.depth.path)
|
||||
path = self.path_manager.get_local_path(path)
|
||||
depth_map = load_depth(path, self.frame_annotation.depth.scale_adjustment)
|
||||
self.assertEqual(depth_map.dtype, np.float32)
|
||||
self.assertEqual(len(depth_map.shape), 3)
|
||||
|
||||
def test_load_16big_png_depth(self):
|
||||
path = os.path.join(self.dataset_root, self.frame_annotation.depth.path)
|
||||
path = self.path_manager.get_local_path(path)
|
||||
depth_map = load_16big_png_depth(path)
|
||||
self.assertEqual(depth_map.dtype, np.float32)
|
||||
self.assertEqual(len(depth_map.shape), 2)
|
||||
@@ -245,6 +248,7 @@ class TestFrameDataBuilder(TestCaseMixin, unittest.TestCase):
|
||||
mask_path = os.path.join(
|
||||
self.dataset_root, self.frame_annotation.depth.mask_path
|
||||
)
|
||||
mask_path = self.path_manager.get_local_path(mask_path)
|
||||
mask = load_1bit_png_mask(mask_path)
|
||||
self.assertEqual(mask.dtype, np.float32)
|
||||
self.assertEqual(len(mask.shape), 2)
|
||||
@@ -253,6 +257,7 @@ class TestFrameDataBuilder(TestCaseMixin, unittest.TestCase):
|
||||
mask_path = os.path.join(
|
||||
self.dataset_root, self.frame_annotation.depth.mask_path
|
||||
)
|
||||
mask_path = self.path_manager.get_local_path(mask_path)
|
||||
mask = load_depth_mask(mask_path)
|
||||
self.assertEqual(mask.dtype, np.float32)
|
||||
self.assertEqual(len(mask.shape), 3)
|
||||
|
||||
@@ -38,22 +38,23 @@ class TestRendererBase(TestCaseMixin, unittest.TestCase):
|
||||
def test_implicitron_raise_value_error_bins_is_set_and_try_to_set_lengths(
|
||||
self,
|
||||
) -> None:
|
||||
with self.assertRaises(ValueError) as context:
|
||||
ray_bundle = ImplicitronRayBundle(
|
||||
origins=torch.rand(2, 3, 4, 3),
|
||||
directions=torch.rand(2, 3, 4, 3),
|
||||
lengths=None,
|
||||
xys=torch.rand(2, 3, 4, 2),
|
||||
bins=torch.rand(2, 3, 4, 1),
|
||||
)
|
||||
ray_bundle = ImplicitronRayBundle(
|
||||
origins=torch.rand(2, 3, 4, 3),
|
||||
directions=torch.rand(2, 3, 4, 3),
|
||||
lengths=None,
|
||||
xys=torch.rand(2, 3, 4, 2),
|
||||
bins=torch.rand(2, 3, 4, 14),
|
||||
)
|
||||
with self.assertRaisesRegex(
|
||||
ValueError,
|
||||
"If the bins attribute is not None you cannot set the lengths attribute.",
|
||||
):
|
||||
ray_bundle.lengths = torch.empty(2)
|
||||
self.assertEqual(
|
||||
str(context.exception),
|
||||
"If the bins attribute is not None you cannot set the lengths attribute.",
|
||||
)
|
||||
|
||||
def test_implicitron_raise_value_error_if_bins_dim_equal_1(self) -> None:
|
||||
with self.assertRaises(ValueError) as context:
|
||||
with self.assertRaisesRegex(
|
||||
ValueError, "The last dim of bins must be at least superior or equal to 2."
|
||||
):
|
||||
ImplicitronRayBundle(
|
||||
origins=torch.rand(2, 3, 4, 3),
|
||||
directions=torch.rand(2, 3, 4, 3),
|
||||
@@ -61,15 +62,14 @@ class TestRendererBase(TestCaseMixin, unittest.TestCase):
|
||||
xys=torch.rand(2, 3, 4, 2),
|
||||
bins=torch.rand(2, 3, 4, 1),
|
||||
)
|
||||
self.assertEqual(
|
||||
str(context.exception),
|
||||
"The last dim of bins must be at least superior or equal to 2.",
|
||||
)
|
||||
|
||||
def test_implicitron_raise_value_error_if_neither_bins_or_lengths_provided(
|
||||
self,
|
||||
) -> None:
|
||||
with self.assertRaises(ValueError) as context:
|
||||
with self.assertRaisesRegex(
|
||||
ValueError,
|
||||
"Please set either bins or lengths to initialize an ImplicitronRayBundle.",
|
||||
):
|
||||
ImplicitronRayBundle(
|
||||
origins=torch.rand(2, 3, 4, 3),
|
||||
directions=torch.rand(2, 3, 4, 3),
|
||||
@@ -77,10 +77,6 @@ class TestRendererBase(TestCaseMixin, unittest.TestCase):
|
||||
xys=torch.rand(2, 3, 4, 2),
|
||||
bins=None,
|
||||
)
|
||||
self.assertEqual(
|
||||
str(context.exception),
|
||||
"Please set either bins or lengths to initialize an ImplicitronRayBundle.",
|
||||
)
|
||||
|
||||
def test_conical_frustum_to_gaussian(self) -> None:
|
||||
origins = torch.zeros(3, 3, 3)
|
||||
@@ -266,8 +262,6 @@ class TestRendererBase(TestCaseMixin, unittest.TestCase):
|
||||
ray = ImplicitronRayBundle(
|
||||
origins=origins, directions=directions, lengths=lengths, xys=None
|
||||
)
|
||||
with self.assertRaises(ValueError) as context:
|
||||
_ = conical_frustum_to_gaussian(ray)
|
||||
|
||||
expected_error_message = (
|
||||
"RayBundle pixel_radii_2d or bins have not been provided."
|
||||
@@ -276,7 +270,8 @@ class TestRendererBase(TestCaseMixin, unittest.TestCase):
|
||||
"`cast_ray_bundle_as_cone` to True?"
|
||||
)
|
||||
|
||||
self.assertEqual(expected_error_message, str(context.exception))
|
||||
with self.assertRaisesRegex(ValueError, expected_error_message):
|
||||
_ = conical_frustum_to_gaussian(ray)
|
||||
|
||||
# Ensure message is coherent with AbstractMaskRaySampler
|
||||
class FakeRaySampler(AbstractMaskRaySampler):
|
||||
|
||||
@@ -62,3 +62,11 @@ class TestPointCloudUtils(TestCaseMixin, unittest.TestCase):
|
||||
)
|
||||
[points] = cloud.points_list()
|
||||
self.assertConstant(torch.norm(points, dim=1), depth, atol=1e-5)
|
||||
|
||||
# 3. four channels
|
||||
get_rgbd_point_cloud(
|
||||
camera,
|
||||
image_rgb=image[None],
|
||||
depth_map=image[3:][None],
|
||||
euclidean=True,
|
||||
)
|
||||
|
||||
@@ -964,8 +964,8 @@ class TestFoVPerspectiveProjection(TestCaseMixin, unittest.TestCase):
|
||||
with self.assertRaisesRegex(IndexError, "out of bounds"):
|
||||
cam[N_CAMERAS]
|
||||
|
||||
index = torch.tensor([1, 0, 1], dtype=torch.bool)
|
||||
with self.assertRaisesRegex(ValueError, "does not match cameras"):
|
||||
index = torch.tensor([1, 0, 1], dtype=torch.bool)
|
||||
cam[index]
|
||||
|
||||
with self.assertRaisesRegex(ValueError, "Invalid index type"):
|
||||
@@ -974,8 +974,8 @@ class TestFoVPerspectiveProjection(TestCaseMixin, unittest.TestCase):
|
||||
with self.assertRaisesRegex(ValueError, "Invalid index type"):
|
||||
cam[[True, False]]
|
||||
|
||||
index = torch.tensor(SLICE, dtype=torch.float32)
|
||||
with self.assertRaisesRegex(ValueError, "Invalid index type"):
|
||||
index = torch.tensor(SLICE, dtype=torch.float32)
|
||||
cam[index]
|
||||
|
||||
def test_get_full_transform(self):
|
||||
|
||||
@@ -8,6 +8,7 @@ import unittest
|
||||
|
||||
import torch
|
||||
from pytorch3d.ops import cubify
|
||||
from pytorch3d.renderer.mesh.textures import TexturesAtlas
|
||||
|
||||
from .common_testing import TestCaseMixin
|
||||
|
||||
@@ -313,3 +314,42 @@ class TestCubify(TestCaseMixin, unittest.TestCase):
|
||||
torch.cuda.synchronize()
|
||||
|
||||
return convert
|
||||
|
||||
def test_cubify_with_feats(self):
|
||||
N, V = 3, 2
|
||||
device = torch.device("cuda:0")
|
||||
voxels = torch.zeros((N, V, V, V), dtype=torch.float32, device=device)
|
||||
feats = torch.zeros((N, 3, V, V, V), dtype=torch.float32, device=device)
|
||||
# fill the feats with red color
|
||||
feats[:, 0, :, :, :] = 255
|
||||
|
||||
# 1st example: (top left corner, znear) is on
|
||||
voxels[0, 0, 0, 0] = 1.0
|
||||
# the color is set to green
|
||||
feats[0, :, 0, 0, 0] = torch.Tensor([0, 255, 0])
|
||||
# 2nd example: all are on
|
||||
voxels[1] = 1.0
|
||||
|
||||
# 3rd example
|
||||
voxels[2, :, :, 1] = 1.0
|
||||
voxels[2, 1, 1, 0] = 1.0
|
||||
# the color is set to yellow and blue respectively
|
||||
feats[2, 1, :, :, 1] = 255
|
||||
feats[2, :, 1, 1, 0] = torch.Tensor([0, 0, 255])
|
||||
meshes = cubify(voxels, 0.5, feats=feats, align="center")
|
||||
textures = meshes.textures
|
||||
self.assertTrue(textures is not None)
|
||||
self.assertTrue(isinstance(textures, TexturesAtlas))
|
||||
faces_textures = textures.faces_verts_textures_packed()
|
||||
red = faces_textures.new_tensor([255.0, 0.0, 0.0])
|
||||
green = faces_textures.new_tensor([0.0, 255.0, 0.0])
|
||||
blue = faces_textures.new_tensor([0.0, 0.0, 255.0])
|
||||
yellow = faces_textures.new_tensor([255.0, 255.0, 0.0])
|
||||
|
||||
self.assertEqual(faces_textures.shape, (100, 3, 3))
|
||||
faces_textures_ = faces_textures.flatten(end_dim=1)
|
||||
self.assertClose(faces_textures_[:36], green.expand(36, -1))
|
||||
self.assertClose(faces_textures_[36:180], red.expand(144, -1))
|
||||
self.assertClose(faces_textures_[180:228], yellow.expand(48, -1))
|
||||
self.assertClose(faces_textures_[228:258], blue.expand(30, -1))
|
||||
self.assertClose(faces_textures_[258:300], yellow.expand(42, -1))
|
||||
|
||||
@@ -422,9 +422,9 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
|
||||
|
||||
def test_save_obj_invalid_shapes(self):
|
||||
# Invalid vertices shape
|
||||
verts = torch.FloatTensor([[0.1, 0.2, 0.3, 0.4]]) # (V, 4)
|
||||
faces = torch.LongTensor([[0, 1, 2]])
|
||||
with self.assertRaises(ValueError) as error:
|
||||
verts = torch.FloatTensor([[0.1, 0.2, 0.3, 0.4]]) # (V, 4)
|
||||
faces = torch.LongTensor([[0, 1, 2]])
|
||||
with NamedTemporaryFile(mode="w", suffix=".obj") as f:
|
||||
save_obj(Path(f.name), verts, faces)
|
||||
expected_message = (
|
||||
@@ -433,9 +433,9 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
|
||||
self.assertTrue(expected_message, error.exception)
|
||||
|
||||
# Invalid faces shape
|
||||
verts = torch.FloatTensor([[0.1, 0.2, 0.3]])
|
||||
faces = torch.LongTensor([[0, 1, 2, 3]]) # (F, 4)
|
||||
with self.assertRaises(ValueError) as error:
|
||||
verts = torch.FloatTensor([[0.1, 0.2, 0.3]])
|
||||
faces = torch.LongTensor([[0, 1, 2, 3]]) # (F, 4)
|
||||
with NamedTemporaryFile(mode="w", suffix=".obj") as f:
|
||||
save_obj(Path(f.name), verts, faces)
|
||||
expected_message = (
|
||||
|
||||
@@ -20,10 +20,11 @@ from pytorch3d.renderer.mesh import TexturesVertex
|
||||
from pytorch3d.structures import Meshes, Pointclouds
|
||||
from pytorch3d.utils import torus
|
||||
|
||||
from .common_testing import TestCaseMixin
|
||||
from .common_testing import get_tests_dir, TestCaseMixin
|
||||
|
||||
|
||||
global_path_manager = PathManager()
|
||||
DATA_DIR = get_tests_dir() / "data"
|
||||
|
||||
|
||||
def _load_ply_raw(stream):
|
||||
@@ -307,9 +308,9 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
|
||||
|
||||
def test_save_ply_invalid_shapes(self):
|
||||
# Invalid vertices shape
|
||||
verts = torch.FloatTensor([[0.1, 0.2, 0.3, 0.4]]) # (V, 4)
|
||||
faces = torch.LongTensor([[0, 1, 2]])
|
||||
with self.assertRaises(ValueError) as error:
|
||||
verts = torch.FloatTensor([[0.1, 0.2, 0.3, 0.4]]) # (V, 4)
|
||||
faces = torch.LongTensor([[0, 1, 2]])
|
||||
save_ply(BytesIO(), verts, faces)
|
||||
expected_message = (
|
||||
"Argument 'verts' should either be empty or of shape (num_verts, 3)."
|
||||
@@ -317,9 +318,9 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
|
||||
self.assertTrue(expected_message, error.exception)
|
||||
|
||||
# Invalid faces shape
|
||||
verts = torch.FloatTensor([[0.1, 0.2, 0.3]])
|
||||
faces = torch.LongTensor([[0, 1, 2, 3]]) # (F, 4)
|
||||
with self.assertRaises(ValueError) as error:
|
||||
verts = torch.FloatTensor([[0.1, 0.2, 0.3]])
|
||||
faces = torch.LongTensor([[0, 1, 2, 3]]) # (F, 4)
|
||||
save_ply(BytesIO(), verts, faces)
|
||||
expected_message = (
|
||||
"Argument 'faces' should either be empty or of shape (num_faces, 3)."
|
||||
@@ -778,6 +779,19 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
|
||||
data["minus_ones"], [-1, 255, -1, 65535, -1, 4294967295]
|
||||
)
|
||||
|
||||
def test_load_uvs(self):
|
||||
io = IO()
|
||||
mesh = io.load_mesh(DATA_DIR / "uvs.ply")
|
||||
self.assertEqual(mesh.textures.verts_uvs_padded().shape, (1, 8, 2))
|
||||
self.assertClose(
|
||||
mesh.textures.verts_uvs_padded()[0],
|
||||
torch.tensor([[0, 0]] + [[0.2, 0.3]] * 6 + [[0.4, 0.5]]),
|
||||
)
|
||||
self.assertEqual(
|
||||
mesh.textures.faces_uvs_padded().shape, mesh.faces_padded().shape
|
||||
)
|
||||
self.assertEqual(mesh.textures.maps_padded().shape, (1, 512, 512, 3))
|
||||
|
||||
def test_bad_ply_syntax(self):
|
||||
"""Some syntactically bad ply files."""
|
||||
lines = [
|
||||
|
||||
@@ -854,6 +854,18 @@ class TestMarchingCubes(TestCaseMixin, unittest.TestCase):
|
||||
self.assertClose(verts2[0], expected_verts)
|
||||
self.assertClose(faces2[0], expected_faces)
|
||||
|
||||
def test_single_large_ellipsoid(self):
|
||||
if USE_SCIKIT:
|
||||
from skimage.draw import ellipsoid
|
||||
|
||||
ellip_base = ellipsoid(50, 60, 16, levelset=True)
|
||||
volume = torch.Tensor(ellip_base).unsqueeze(0).cpu()
|
||||
verts, faces = marching_cubes_naive(volume, 0)
|
||||
verts2, faces2 = marching_cubes(volume, 0)
|
||||
|
||||
self.assertClose(verts[0], verts2[0], atol=1e-6)
|
||||
self.assertClose(faces[0], faces2[0], atol=1e-6)
|
||||
|
||||
def test_cube_surface_area(self):
|
||||
if USE_SCIKIT:
|
||||
from skimage.measure import marching_cubes_classic, mesh_surface_area
|
||||
@@ -939,8 +951,11 @@ class TestMarchingCubes(TestCaseMixin, unittest.TestCase):
|
||||
u = u[None].float()
|
||||
verts, faces = marching_cubes_naive(u, 0, return_local_coords=False)
|
||||
verts2, faces2 = marching_cubes(u, 0, return_local_coords=False)
|
||||
self.assertClose(verts[0], verts2[0])
|
||||
self.assertClose(faces[0], faces2[0])
|
||||
self.assertClose(verts2[0], verts[0])
|
||||
self.assertClose(faces2[0], faces[0])
|
||||
verts3, faces3 = marching_cubes(u.cuda(), 0, return_local_coords=False)
|
||||
self.assertEqual(len(verts3), len(verts))
|
||||
self.assertEqual(len(faces3), len(faces))
|
||||
|
||||
@staticmethod
|
||||
def marching_cubes_with_init(algo_type: str, batch_size: int, V: int, device: str):
|
||||
|
||||
@@ -324,17 +324,15 @@ class TestMeshes(TestCaseMixin, unittest.TestCase):
|
||||
]
|
||||
faces_list = mesh.faces_list()
|
||||
|
||||
with self.assertRaises(ValueError) as cm:
|
||||
with self.assertRaisesRegex(ValueError, "same device"):
|
||||
Meshes(verts=verts_list, faces=faces_list)
|
||||
self.assertTrue("same device" in cm.msg)
|
||||
|
||||
verts_padded = mesh.verts_padded() # on cpu
|
||||
verts_padded = verts_padded.to("cuda:0")
|
||||
faces_padded = mesh.faces_padded()
|
||||
|
||||
with self.assertRaises(ValueError) as cm:
|
||||
with self.assertRaisesRegex(ValueError, "same device"):
|
||||
Meshes(verts=verts_padded, faces=faces_padded)
|
||||
self.assertTrue("same device" in cm.msg)
|
||||
|
||||
def test_simple_random_meshes(self):
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ from pytorch3d.renderer.opengl.opengl_utils import ( # noqa
|
||||
global_device_context_store,
|
||||
)
|
||||
|
||||
from .common_testing import TestCaseMixin # noqa
|
||||
from .common_testing import TestCaseMixin, usesOpengl # noqa
|
||||
|
||||
MAX_EGL_HEIGHT = global_device_context_store.max_egl_height
|
||||
MAX_EGL_WIDTH = global_device_context_store.max_egl_width
|
||||
@@ -82,6 +82,7 @@ def _draw_squares_with_context_store(
|
||||
result[thread_id] = data
|
||||
|
||||
|
||||
@usesOpengl
|
||||
class TestDeviceContextStore(TestCaseMixin, unittest.TestCase):
|
||||
def test_cuda_context(self):
|
||||
cuda_context_1 = global_device_context_store.get_cuda_context(
|
||||
@@ -118,6 +119,7 @@ class TestDeviceContextStore(TestCaseMixin, unittest.TestCase):
|
||||
self.assertIsNot(egl_context_1, egl_context_3)
|
||||
|
||||
|
||||
@usesOpengl
|
||||
class TestUtils(TestCaseMixin, unittest.TestCase):
|
||||
def test_load_extensions(self):
|
||||
# This should work
|
||||
@@ -145,6 +147,7 @@ class TestUtils(TestCaseMixin, unittest.TestCase):
|
||||
self.assertEqual(attribute_array[2], egl.EGL_NONE)
|
||||
|
||||
|
||||
@usesOpengl
|
||||
class TestOpenGLSingleThreaded(TestCaseMixin, unittest.TestCase):
|
||||
def test_draw_square(self):
|
||||
context = EGLContext(width=MAX_EGL_WIDTH, height=MAX_EGL_HEIGHT)
|
||||
@@ -184,6 +187,7 @@ class TestOpenGLSingleThreaded(TestCaseMixin, unittest.TestCase):
|
||||
)
|
||||
|
||||
|
||||
@usesOpengl
|
||||
class TestOpenGLMultiThreaded(TestCaseMixin, unittest.TestCase):
|
||||
def test_multiple_renders_single_gpu_single_context(self):
|
||||
_draw_squares_with_context()
|
||||
@@ -321,6 +325,7 @@ class TestOpenGLMultiThreaded(TestCaseMixin, unittest.TestCase):
|
||||
thread.join()
|
||||
|
||||
|
||||
@usesOpengl
|
||||
class TestOpenGLUtils(TestCaseMixin, unittest.TestCase):
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
|
||||
@@ -148,31 +148,28 @@ class TestPointclouds(TestCaseMixin, unittest.TestCase):
|
||||
features_list = clouds.features_list()
|
||||
normals_list = clouds.normals_list()
|
||||
|
||||
with self.assertRaises(ValueError) as cm:
|
||||
with self.assertRaisesRegex(ValueError, "same device"):
|
||||
Pointclouds(
|
||||
points=points_list, features=features_list, normals=normals_list
|
||||
)
|
||||
self.assertTrue("same device" in cm.msg)
|
||||
|
||||
points_list = clouds.points_list()
|
||||
features_list = [
|
||||
f.to("cpu") if random.uniform(0, 1) > 0.2 else f for f in features_list
|
||||
]
|
||||
with self.assertRaises(ValueError) as cm:
|
||||
with self.assertRaisesRegex(ValueError, "same device"):
|
||||
Pointclouds(
|
||||
points=points_list, features=features_list, normals=normals_list
|
||||
)
|
||||
self.assertTrue("same device" in cm.msg)
|
||||
|
||||
points_padded = clouds.points_padded() # on cuda:0
|
||||
features_padded = clouds.features_padded().to("cpu")
|
||||
normals_padded = clouds.normals_padded()
|
||||
|
||||
with self.assertRaises(ValueError) as cm:
|
||||
with self.assertRaisesRegex(ValueError, "same device"):
|
||||
Pointclouds(
|
||||
points=points_padded, features=features_padded, normals=normals_padded
|
||||
)
|
||||
self.assertTrue("same device" in cm.msg)
|
||||
|
||||
def test_all_constructions(self):
|
||||
public_getters = [
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import re
|
||||
import unittest
|
||||
from itertools import product
|
||||
|
||||
@@ -50,6 +51,7 @@ from .common_testing import (
|
||||
get_tests_dir,
|
||||
load_rgb_image,
|
||||
TestCaseMixin,
|
||||
usesOpengl,
|
||||
)
|
||||
|
||||
|
||||
@@ -102,62 +104,56 @@ class TestRasterizeRectangleImagesErrors(TestCaseMixin, unittest.TestCase):
|
||||
def test_mesh_image_size_arg(self):
|
||||
meshes = Meshes(verts=[verts0], faces=[faces0])
|
||||
|
||||
with self.assertRaises(ValueError) as cm:
|
||||
with self.assertRaisesRegex(ValueError, re.escape("tuple/list of (H, W)")):
|
||||
rasterize_meshes(
|
||||
meshes,
|
||||
(100, 200, 3),
|
||||
0.0001,
|
||||
faces_per_pixel=1,
|
||||
)
|
||||
self.assertTrue("tuple/list of (H, W)" in cm.msg)
|
||||
|
||||
with self.assertRaises(ValueError) as cm:
|
||||
with self.assertRaisesRegex(ValueError, "sizes must be greater than 0"):
|
||||
rasterize_meshes(
|
||||
meshes,
|
||||
(0, 10),
|
||||
0.0001,
|
||||
faces_per_pixel=1,
|
||||
)
|
||||
self.assertTrue("sizes must be positive" in cm.msg)
|
||||
|
||||
with self.assertRaises(ValueError) as cm:
|
||||
with self.assertRaisesRegex(ValueError, "sizes must be integers"):
|
||||
rasterize_meshes(
|
||||
meshes,
|
||||
(100.5, 120.5),
|
||||
0.0001,
|
||||
faces_per_pixel=1,
|
||||
)
|
||||
self.assertTrue("sizes must be integers" in cm.msg)
|
||||
|
||||
def test_points_image_size_arg(self):
|
||||
points = Pointclouds([verts0])
|
||||
|
||||
with self.assertRaises(ValueError) as cm:
|
||||
with self.assertRaisesRegex(ValueError, re.escape("tuple/list of (H, W)")):
|
||||
rasterize_points(
|
||||
points,
|
||||
(100, 200, 3),
|
||||
0.0001,
|
||||
points_per_pixel=1,
|
||||
)
|
||||
self.assertTrue("tuple/list of (H, W)" in cm.msg)
|
||||
|
||||
with self.assertRaises(ValueError) as cm:
|
||||
with self.assertRaisesRegex(ValueError, "sizes must be greater than 0"):
|
||||
rasterize_points(
|
||||
points,
|
||||
(0, 10),
|
||||
0.0001,
|
||||
points_per_pixel=1,
|
||||
)
|
||||
self.assertTrue("sizes must be positive" in cm.msg)
|
||||
|
||||
with self.assertRaises(ValueError) as cm:
|
||||
with self.assertRaisesRegex(ValueError, "sizes must be integers"):
|
||||
rasterize_points(
|
||||
points,
|
||||
(100.5, 120.5),
|
||||
0.0001,
|
||||
points_per_pixel=1,
|
||||
)
|
||||
self.assertTrue("sizes must be integers" in cm.msg)
|
||||
|
||||
|
||||
class TestRasterizeRectangleImagesMeshes(TestCaseMixin, unittest.TestCase):
|
||||
@@ -459,6 +455,7 @@ class TestRasterizeRectangleImagesMeshes(TestCaseMixin, unittest.TestCase):
|
||||
def test_render_cow(self):
|
||||
self._render_cow(MeshRasterizer)
|
||||
|
||||
@usesOpengl
|
||||
def test_render_cow_opengl(self):
|
||||
self._render_cow(MeshRasterizerOpenGL)
|
||||
|
||||
|
||||
@@ -33,7 +33,12 @@ from pytorch3d.structures import Pointclouds
|
||||
from pytorch3d.structures.meshes import Meshes
|
||||
from pytorch3d.utils.ico_sphere import ico_sphere
|
||||
|
||||
from .common_testing import get_tests_dir, TestCaseMixin
|
||||
from .common_testing import (
|
||||
get_tests_dir,
|
||||
skip_opengl_requested,
|
||||
TestCaseMixin,
|
||||
usesOpengl,
|
||||
)
|
||||
|
||||
|
||||
DATA_DIR = get_tests_dir() / "data"
|
||||
@@ -55,6 +60,7 @@ class TestMeshRasterizer(unittest.TestCase):
|
||||
def test_simple_sphere_fisheye(self):
|
||||
self._simple_sphere_fisheye_against_perspective(MeshRasterizer)
|
||||
|
||||
@usesOpengl
|
||||
def test_simple_sphere_opengl(self):
|
||||
self._simple_sphere(MeshRasterizerOpenGL)
|
||||
|
||||
@@ -250,9 +256,11 @@ class TestMeshRasterizer(unittest.TestCase):
|
||||
rasterizer = MeshRasterizer()
|
||||
rasterizer.to(device)
|
||||
|
||||
rasterizer = MeshRasterizerOpenGL()
|
||||
rasterizer.to(device)
|
||||
if not skip_opengl_requested():
|
||||
rasterizer = MeshRasterizerOpenGL()
|
||||
rasterizer.to(device)
|
||||
|
||||
@usesOpengl
|
||||
def test_compare_rasterizers(self):
|
||||
device = torch.device("cuda:0")
|
||||
|
||||
@@ -321,6 +329,7 @@ class TestMeshRasterizer(unittest.TestCase):
|
||||
)
|
||||
|
||||
|
||||
@usesOpengl
|
||||
class TestMeshRasterizerOpenGLUtils(TestCaseMixin, unittest.TestCase):
|
||||
def setUp(self):
|
||||
verts = torch.tensor(
|
||||
@@ -419,16 +428,16 @@ class TestMeshRasterizerOpenGLUtils(TestCaseMixin, unittest.TestCase):
|
||||
fragments = rasterizer(self.meshes_world, raster_settings=raster_settings)
|
||||
self.assertEqual(fragments.pix_to_face.shape, torch.Size([1, 10, 2047, 1]))
|
||||
|
||||
raster_settings.image_size = (2049, 512)
|
||||
with self.assertRaisesRegex(ValueError, "Max rasterization size is"):
|
||||
raster_settings.image_size = (2049, 512)
|
||||
rasterizer(self.meshes_world, raster_settings=raster_settings)
|
||||
|
||||
raster_settings.image_size = (512, 2049)
|
||||
with self.assertRaisesRegex(ValueError, "Max rasterization size is"):
|
||||
raster_settings.image_size = (512, 2049)
|
||||
rasterizer(self.meshes_world, raster_settings=raster_settings)
|
||||
|
||||
raster_settings.image_size = (2049, 2049)
|
||||
with self.assertRaisesRegex(ValueError, "Max rasterization size is"):
|
||||
raster_settings.image_size = (2049, 2049)
|
||||
rasterizer(self.meshes_world, raster_settings=raster_settings)
|
||||
|
||||
|
||||
|
||||
@@ -59,7 +59,9 @@ from .common_testing import (
|
||||
get_pytorch3d_dir,
|
||||
get_tests_dir,
|
||||
load_rgb_image,
|
||||
skip_opengl_requested,
|
||||
TestCaseMixin,
|
||||
usesOpengl,
|
||||
)
|
||||
|
||||
# If DEBUG=True, save out images generated in the tests for debugging.
|
||||
@@ -159,13 +161,16 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
|
||||
MeshRasterizer, HardGouraudShader, "gouraud", "hard_gouraud"
|
||||
),
|
||||
RasterizerTest(MeshRasterizer, HardFlatShader, "flat", "hard_flat"),
|
||||
RasterizerTest(
|
||||
MeshRasterizerOpenGL,
|
||||
SplatterPhongShader,
|
||||
"splatter",
|
||||
"splatter_phong",
|
||||
),
|
||||
]
|
||||
if not skip_opengl_requested():
|
||||
rasterizer_tests.append(
|
||||
RasterizerTest(
|
||||
MeshRasterizerOpenGL,
|
||||
SplatterPhongShader,
|
||||
"splatter",
|
||||
"splatter_phong",
|
||||
)
|
||||
)
|
||||
for test in rasterizer_tests:
|
||||
shader = test.shader(
|
||||
lights=lights,
|
||||
@@ -400,13 +405,16 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
|
||||
MeshRasterizer, HardGouraudShader, "gouraud", "hard_gouraud"
|
||||
),
|
||||
RasterizerTest(MeshRasterizer, HardFlatShader, "flat", "hard_flat"),
|
||||
RasterizerTest(
|
||||
MeshRasterizerOpenGL,
|
||||
SplatterPhongShader,
|
||||
"splatter",
|
||||
"splatter_phong",
|
||||
),
|
||||
]
|
||||
if not skip_opengl_requested():
|
||||
rasterizer_tests.append(
|
||||
RasterizerTest(
|
||||
MeshRasterizerOpenGL,
|
||||
SplatterPhongShader,
|
||||
"splatter",
|
||||
"splatter_phong",
|
||||
)
|
||||
)
|
||||
for test in rasterizer_tests:
|
||||
reference_name = test.reference_name
|
||||
debug_name = test.debug_name
|
||||
@@ -518,6 +526,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
|
||||
"""
|
||||
self._texture_map_per_rasterizer(MeshRasterizer)
|
||||
|
||||
@usesOpengl
|
||||
def test_texture_map_opengl(self):
|
||||
"""
|
||||
Test a mesh with a texture map is loaded and rendered correctly.
|
||||
@@ -694,6 +703,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
|
||||
def test_batch_uvs(self):
|
||||
self._batch_uvs(MeshRasterizer)
|
||||
|
||||
@usesOpengl
|
||||
def test_batch_uvs_opengl(self):
|
||||
self._batch_uvs(MeshRasterizer)
|
||||
|
||||
@@ -786,6 +796,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
|
||||
def test_join_uvs(self):
|
||||
self._join_uvs(MeshRasterizer)
|
||||
|
||||
@usesOpengl
|
||||
def test_join_uvs_opengl(self):
|
||||
self._join_uvs(MeshRasterizerOpenGL)
|
||||
|
||||
@@ -932,7 +943,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
|
||||
)
|
||||
).save(DATA_DIR / f"DEBUG_test_joinuvs{i}_map3.png")
|
||||
|
||||
self.assertClose(output, merged)
|
||||
self.assertClose(output, merged, atol=0.005)
|
||||
self.assertClose(output, image_ref, atol=0.005)
|
||||
self.assertClose(mesh.textures.maps_padded()[0].cpu(), map_ref, atol=0.05)
|
||||
|
||||
@@ -975,6 +986,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
|
||||
def test_join_verts(self):
|
||||
self._join_verts(MeshRasterizer)
|
||||
|
||||
@usesOpengl
|
||||
def test_join_verts_opengl(self):
|
||||
self._join_verts(MeshRasterizerOpenGL)
|
||||
|
||||
@@ -1051,6 +1063,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
|
||||
def test_join_atlas(self):
|
||||
self._join_atlas(MeshRasterizer)
|
||||
|
||||
@usesOpengl
|
||||
def test_join_atlas_opengl(self):
|
||||
self._join_atlas(MeshRasterizerOpenGL)
|
||||
|
||||
@@ -1151,6 +1164,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
|
||||
def test_joined_spheres(self):
|
||||
self._joined_spheres(MeshRasterizer)
|
||||
|
||||
@usesOpengl
|
||||
def test_joined_spheres_opengl(self):
|
||||
self._joined_spheres(MeshRasterizerOpenGL)
|
||||
|
||||
@@ -1233,6 +1247,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
|
||||
def test_texture_map_atlas(self):
|
||||
self._texture_map_atlas(MeshRasterizer)
|
||||
|
||||
@usesOpengl
|
||||
def test_texture_map_atlas_opengl(self):
|
||||
self._texture_map_atlas(MeshRasterizerOpenGL)
|
||||
|
||||
@@ -1351,6 +1366,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
|
||||
def test_simple_sphere_outside_zfar(self):
|
||||
self._simple_sphere_outside_zfar(MeshRasterizer)
|
||||
|
||||
@usesOpengl
|
||||
def test_simple_sphere_outside_zfar_opengl(self):
|
||||
self._simple_sphere_outside_zfar(MeshRasterizerOpenGL)
|
||||
|
||||
@@ -1445,13 +1461,16 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
|
||||
# No elevation or azimuth rotation
|
||||
rasterizer_tests = [
|
||||
RasterizerTest(MeshRasterizer, HardPhongShader, "phong", "hard_phong"),
|
||||
RasterizerTest(
|
||||
MeshRasterizerOpenGL,
|
||||
SplatterPhongShader,
|
||||
"splatter",
|
||||
"splatter_phong",
|
||||
),
|
||||
]
|
||||
if not skip_opengl_requested():
|
||||
rasterizer_tests.append(
|
||||
RasterizerTest(
|
||||
MeshRasterizerOpenGL,
|
||||
SplatterPhongShader,
|
||||
"splatter",
|
||||
"splatter_phong",
|
||||
)
|
||||
)
|
||||
R, T = look_at_view_transform(2.7, 0.0, 0.0)
|
||||
for cam_type in (
|
||||
FoVPerspectiveCameras,
|
||||
|
||||
@@ -29,7 +29,7 @@ from pytorch3d.renderer.opengl import MeshRasterizerOpenGL
|
||||
from pytorch3d.structures import Meshes, Pointclouds
|
||||
from pytorch3d.utils.ico_sphere import ico_sphere
|
||||
|
||||
from .common_testing import TestCaseMixin
|
||||
from .common_testing import TestCaseMixin, usesOpengl
|
||||
|
||||
|
||||
# Set the number of GPUS you want to test with
|
||||
@@ -119,6 +119,7 @@ class TestRenderMeshesMultiGPU(TestCaseMixin, unittest.TestCase):
|
||||
def test_mesh_renderer_to(self):
|
||||
self._mesh_renderer_to(MeshRasterizer, SoftPhongShader)
|
||||
|
||||
@usesOpengl
|
||||
def test_mesh_renderer_opengl_to(self):
|
||||
self._mesh_renderer_to(MeshRasterizerOpenGL, SplatterPhongShader)
|
||||
|
||||
@@ -177,6 +178,7 @@ class TestRenderMeshesMultiGPU(TestCaseMixin, unittest.TestCase):
|
||||
self._render_meshes(MeshRasterizer, HardGouraudShader)
|
||||
|
||||
# @unittest.skip("Multi-GPU OpenGL training is currently not supported.")
|
||||
@usesOpengl
|
||||
def test_render_meshes_opengl(self):
|
||||
self._render_meshes(MeshRasterizerOpenGL, SplatterPhongShader)
|
||||
|
||||
|
||||
@@ -220,7 +220,7 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
|
||||
# znear and zfar is required in this case.
|
||||
self.assertRaises(
|
||||
ValueError,
|
||||
lambda: renderer.forward(
|
||||
lambda renderer=renderer, pointclouds=pointclouds: renderer.forward(
|
||||
point_clouds=pointclouds, gamma=(1e-4,)
|
||||
),
|
||||
)
|
||||
@@ -233,7 +233,7 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
|
||||
# znear and zfar must be batched.
|
||||
self.assertRaises(
|
||||
TypeError,
|
||||
lambda: renderer.forward(
|
||||
lambda renderer=renderer, pointclouds=pointclouds: renderer.forward(
|
||||
point_clouds=pointclouds,
|
||||
gamma=(1e-4,),
|
||||
znear=1.0,
|
||||
@@ -242,7 +242,7 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
|
||||
)
|
||||
self.assertRaises(
|
||||
TypeError,
|
||||
lambda: renderer.forward(
|
||||
lambda renderer=renderer, pointclouds=pointclouds: renderer.forward(
|
||||
point_clouds=pointclouds,
|
||||
gamma=(1e-4,),
|
||||
znear=(1.0,),
|
||||
@@ -253,7 +253,7 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
|
||||
# gamma must be batched.
|
||||
self.assertRaises(
|
||||
TypeError,
|
||||
lambda: renderer.forward(
|
||||
lambda renderer=renderer, pointclouds=pointclouds: renderer.forward(
|
||||
point_clouds=pointclouds, gamma=1e-4
|
||||
),
|
||||
)
|
||||
@@ -262,7 +262,7 @@ class TestRenderPoints(TestCaseMixin, unittest.TestCase):
|
||||
renderer.rasterizer.raster_settings.image_size = 0
|
||||
self.assertRaises(
|
||||
ValueError,
|
||||
lambda: renderer.forward(
|
||||
lambda renderer=renderer, pointclouds=pointclouds: renderer.forward(
|
||||
point_clouds=pointclouds, gamma=(1e-4,)
|
||||
),
|
||||
)
|
||||
|
||||
@@ -97,20 +97,6 @@ class TestSO3(TestCaseMixin, unittest.TestCase):
|
||||
so3_log_map(rot)
|
||||
self.assertTrue("Input has to be a batch of 3x3 Tensors." in str(err.exception))
|
||||
|
||||
# trace of rot definitely bigger than 3 or smaller than -1
|
||||
rot = torch.cat(
|
||||
(
|
||||
torch.rand(size=[5, 3, 3], device=device) + 4.0,
|
||||
torch.rand(size=[5, 3, 3], device=device) - 3.0,
|
||||
)
|
||||
)
|
||||
with self.assertRaises(ValueError) as err:
|
||||
so3_log_map(rot)
|
||||
self.assertTrue(
|
||||
"A matrix has trace outside valid range [-1-eps,3+eps]."
|
||||
in str(err.exception)
|
||||
)
|
||||
|
||||
def test_so3_exp_singularity(self, batch_size: int = 100):
|
||||
"""
|
||||
Tests whether the `so3_exp_map` is robust to the input vectors
|
||||
|
||||
@@ -80,8 +80,8 @@ class TestStructUtils(TestCaseMixin, unittest.TestCase):
|
||||
self.assertClose(x_padded, torch.stack(x, 0))
|
||||
|
||||
# catch ValueError for invalid dimensions
|
||||
pad_size = [K] * (ndim + 1)
|
||||
with self.assertRaisesRegex(ValueError, "Pad size must"):
|
||||
pad_size = [K] * (ndim + 1)
|
||||
struct_utils.list_to_padded(
|
||||
x, pad_size=pad_size, pad_value=0.0, equisized=False
|
||||
)
|
||||
@@ -196,9 +196,9 @@ class TestStructUtils(TestCaseMixin, unittest.TestCase):
|
||||
|
||||
# Case 6: Input has more than 3 dims.
|
||||
# Raise an error.
|
||||
x = torch.rand((N, K, K, K, K), device=device)
|
||||
split_size = torch.randint(1, K, size=(N,)).tolist()
|
||||
with self.assertRaisesRegex(ValueError, "Supports only"):
|
||||
x = torch.rand((N, K, K, K, K), device=device)
|
||||
split_size = torch.randint(1, K, size=(N,)).tolist()
|
||||
struct_utils.padded_to_packed(x, split_size=split_size)
|
||||
|
||||
def test_list_to_packed(self):
|
||||
|
||||
@@ -576,6 +576,39 @@ class TestTexturesAtlas(TestCaseMixin, unittest.TestCase):
|
||||
with self.assertRaisesRegex(ValueError, "do not match the dimensions"):
|
||||
meshes.sample_textures(None)
|
||||
|
||||
def test_submeshes(self):
|
||||
N = 2
|
||||
V = 5
|
||||
F = 5
|
||||
tex = TexturesAtlas(
|
||||
atlas=torch.arange(N * F * 4 * 4 * 3, dtype=torch.float32).reshape(
|
||||
N, F, 4, 4, 3
|
||||
)
|
||||
)
|
||||
|
||||
verts = torch.rand(size=(N, V, 3))
|
||||
faces = torch.randint(size=(N, F, 3), high=V)
|
||||
mesh = Meshes(verts=verts, faces=faces, textures=tex)
|
||||
|
||||
sub_faces = [
|
||||
[torch.tensor([0, 2]), torch.tensor([1, 2])],
|
||||
[],
|
||||
]
|
||||
subtex = mesh.submeshes(sub_faces).textures
|
||||
subtex_faces = subtex.atlas_list()
|
||||
|
||||
self.assertEqual(len(subtex_faces), 2)
|
||||
self.assertClose(
|
||||
subtex_faces[0].flatten().msort(),
|
||||
torch.cat(
|
||||
(
|
||||
torch.arange(4 * 4 * 3, dtype=torch.float32),
|
||||
torch.arange(96, 96 + 4 * 4 * 3, dtype=torch.float32),
|
||||
),
|
||||
0,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class TestTexturesUV(TestCaseMixin, unittest.TestCase):
|
||||
def setUp(self) -> None:
|
||||
@@ -1002,6 +1035,49 @@ class TestTexturesUV(TestCaseMixin, unittest.TestCase):
|
||||
with self.assertRaisesRegex(ValueError, "do not match the dimensions"):
|
||||
meshes.sample_textures(None)
|
||||
|
||||
def test_submeshes(self):
|
||||
N = 2
|
||||
faces_uvs_list = [
|
||||
torch.LongTensor([[0, 1, 2], [3, 5, 4], [7, 6, 8]]),
|
||||
torch.LongTensor([[0, 1, 2], [3, 4, 5]]),
|
||||
]
|
||||
verts_uvs_list = [
|
||||
torch.arange(18, dtype=torch.float32).reshape(9, 2),
|
||||
torch.ones(6, 2),
|
||||
]
|
||||
tex = TexturesUV(
|
||||
maps=torch.rand((N, 16, 16, 3)),
|
||||
faces_uvs=faces_uvs_list,
|
||||
verts_uvs=verts_uvs_list,
|
||||
)
|
||||
|
||||
sub_faces = [
|
||||
[torch.tensor([0, 1]), torch.tensor([1, 2])],
|
||||
[],
|
||||
]
|
||||
|
||||
mesh = Meshes(
|
||||
verts=[torch.rand(9, 3), torch.rand(6, 3)],
|
||||
faces=faces_uvs_list,
|
||||
textures=tex,
|
||||
)
|
||||
subtex = mesh.submeshes(sub_faces).textures
|
||||
subtex_faces = subtex.faces_uvs_padded()
|
||||
self.assertEqual(len(subtex_faces), 2)
|
||||
self.assertClose(
|
||||
subtex_faces[0],
|
||||
torch.tensor([[0, 1, 2], [3, 5, 4]]),
|
||||
)
|
||||
self.assertClose(
|
||||
subtex.verts_uvs_list()[0][subtex.faces_uvs_list()[0].flatten()]
|
||||
.flatten()
|
||||
.msort(),
|
||||
torch.arange(12, dtype=torch.float32),
|
||||
)
|
||||
self.assertClose(
|
||||
subtex.maps_padded(), tex.maps_padded()[:1].expand(2, -1, -1, -1)
|
||||
)
|
||||
|
||||
|
||||
class TestRectanglePacking(TestCaseMixin, unittest.TestCase):
|
||||
def setUp(self) -> None:
|
||||
@@ -1055,7 +1131,7 @@ class TestRectanglePacking(TestCaseMixin, unittest.TestCase):
|
||||
|
||||
def test_simple(self):
|
||||
self.assert_bb([(3, 4), (4, 3)], {6, 4})
|
||||
self.assert_bb([(2, 2), (2, 4), (2, 2)], {4, 4})
|
||||
self.assert_bb([(2, 2), (2, 4), (2, 2)], {4})
|
||||
|
||||
# many squares
|
||||
self.assert_bb([(2, 2)] * 9, {2, 18})
|
||||
|
||||
@@ -936,8 +936,8 @@ class TestTransformBroadcast(unittest.TestCase):
|
||||
y = torch.tensor([0.3] * M)
|
||||
z = torch.tensor([0.4] * M)
|
||||
tM = Translate(x, y, z)
|
||||
t = tN.compose(tM)
|
||||
with self.assertRaises(ValueError):
|
||||
t = tN.compose(tM)
|
||||
t.get_matrix()
|
||||
|
||||
def test_multiple_broadcast_compose(self):
|
||||
|
||||
@@ -312,6 +312,49 @@ class TestVolumes(TestCaseMixin, unittest.TestCase):
|
||||
).permute(0, 2, 3, 4, 1)
|
||||
self.assertClose(grid_world_resampled, grid_world, atol=1e-7)
|
||||
|
||||
for align_corners in [True, False]:
|
||||
v_trivial = Volumes(densities=densities, align_corners=align_corners)
|
||||
|
||||
# check the case with x_world=(0,0,0)
|
||||
pts_world = torch.zeros(
|
||||
num_volumes, 1, 3, device=device, dtype=torch.float32
|
||||
)
|
||||
pts_local = v_trivial.world_to_local_coords(pts_world)
|
||||
pts_local_expected = torch.zeros_like(pts_local)
|
||||
self.assertClose(pts_local, pts_local_expected)
|
||||
|
||||
# check the case with x_world=(-2, 3, -2)
|
||||
pts_world_tuple = [-2, 3, -2]
|
||||
pts_world = torch.tensor(
|
||||
pts_world_tuple, device=device, dtype=torch.float32
|
||||
)[None, None].repeat(num_volumes, 1, 1)
|
||||
pts_local = v_trivial.world_to_local_coords(pts_world)
|
||||
pts_local_expected = torch.tensor(
|
||||
[-1, 1, -1], device=device, dtype=torch.float32
|
||||
)[None, None].repeat(num_volumes, 1, 1)
|
||||
self.assertClose(pts_local, pts_local_expected)
|
||||
|
||||
# # check that the central voxel has coords x_world=(0, 0, 0) and x_local(0, 0, 0)
|
||||
grid_world = v_trivial.get_coord_grid(world_coordinates=True)
|
||||
grid_local = v_trivial.get_coord_grid(world_coordinates=False)
|
||||
for grid in (grid_world, grid_local):
|
||||
x0 = grid[0, :, :, 2, 0]
|
||||
y0 = grid[0, :, 3, :, 1]
|
||||
z0 = grid[0, 2, :, :, 2]
|
||||
for coord_line in (x0, y0, z0):
|
||||
self.assertClose(
|
||||
coord_line, torch.zeros_like(coord_line), atol=1e-7
|
||||
)
|
||||
|
||||
# resample grid_world using grid_sampler with local coords
|
||||
# -> make sure the resampled version is the same as original
|
||||
grid_world_resampled = torch.nn.functional.grid_sample(
|
||||
grid_world.permute(0, 4, 1, 2, 3),
|
||||
grid_local,
|
||||
align_corners=align_corners,
|
||||
).permute(0, 2, 3, 4, 1)
|
||||
self.assertClose(grid_world_resampled, grid_world, atol=1e-7)
|
||||
|
||||
def test_coord_grid_convention_heterogeneous(
|
||||
self, num_channels=4, dtype=torch.float32
|
||||
):
|
||||
|
||||
Reference in New Issue
Block a user