mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-12-23 07:40:34 +08:00
Compare commits
1 Commits
V0.7.8
...
export-D58
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0eac8299d4 |
@@ -162,6 +162,90 @@ workflows:
|
|||||||
jobs:
|
jobs:
|
||||||
# - main:
|
# - main:
|
||||||
# context: DOCKERHUB_TOKEN
|
# context: DOCKERHUB_TOKEN
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py38_cu113_pyt1120
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.12.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu116
|
||||||
|
name: linux_conda_py38_cu116_pyt1120
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.12.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py38_cu113_pyt1121
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.12.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu116
|
||||||
|
name: linux_conda_py38_cu116_pyt1121
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.12.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu116
|
||||||
|
name: linux_conda_py38_cu116_pyt1130
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.13.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda117
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu117
|
||||||
|
name: linux_conda_py38_cu117_pyt1130
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.13.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu116
|
||||||
|
name: linux_conda_py38_cu116_pyt1131
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.13.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda117
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu117
|
||||||
|
name: linux_conda_py38_cu117_pyt1131
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.13.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda117
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu117
|
||||||
|
name: linux_conda_py38_cu117_pyt200
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 2.0.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda118
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu118
|
||||||
|
name: linux_conda_py38_cu118_pyt200
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 2.0.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda117
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu117
|
||||||
|
name: linux_conda_py38_cu117_pyt201
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 2.0.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda118
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu118
|
||||||
|
name: linux_conda_py38_cu118_pyt201
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 2.0.1
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
conda_docker_image: pytorch/conda-builder:cuda118
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
@@ -247,33 +331,89 @@ workflows:
|
|||||||
python_version: '3.8'
|
python_version: '3.8'
|
||||||
pytorch_version: 2.3.1
|
pytorch_version: 2.3.1
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu118
|
cu_version: cu113
|
||||||
name: linux_conda_py38_cu118_pyt240
|
name: linux_conda_py39_cu113_pyt1120
|
||||||
python_version: '3.8'
|
python_version: '3.9'
|
||||||
pytorch_version: 2.4.0
|
pytorch_version: 1.12.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
conda_docker_image: pytorch/conda-builder:cuda121
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu121
|
cu_version: cu116
|
||||||
name: linux_conda_py38_cu121_pyt240
|
name: linux_conda_py39_cu116_pyt1120
|
||||||
python_version: '3.8'
|
python_version: '3.9'
|
||||||
pytorch_version: 2.4.0
|
pytorch_version: 1.12.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py39_cu113_pyt1121
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.12.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu116
|
||||||
|
name: linux_conda_py39_cu116_pyt1121
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.12.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu116
|
||||||
|
name: linux_conda_py39_cu116_pyt1130
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.13.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda117
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu117
|
||||||
|
name: linux_conda_py39_cu117_pyt1130
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.13.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu116
|
||||||
|
name: linux_conda_py39_cu116_pyt1131
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.13.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda117
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu117
|
||||||
|
name: linux_conda_py39_cu117_pyt1131
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.13.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda117
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu117
|
||||||
|
name: linux_conda_py39_cu117_pyt200
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 2.0.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
conda_docker_image: pytorch/conda-builder:cuda118
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu118
|
cu_version: cu118
|
||||||
name: linux_conda_py38_cu118_pyt241
|
name: linux_conda_py39_cu118_pyt200
|
||||||
python_version: '3.8'
|
python_version: '3.9'
|
||||||
pytorch_version: 2.4.1
|
pytorch_version: 2.0.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
conda_docker_image: pytorch/conda-builder:cuda121
|
conda_docker_image: pytorch/conda-builder:cuda117
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu121
|
cu_version: cu117
|
||||||
name: linux_conda_py38_cu121_pyt241
|
name: linux_conda_py39_cu117_pyt201
|
||||||
python_version: '3.8'
|
python_version: '3.9'
|
||||||
pytorch_version: 2.4.1
|
pytorch_version: 2.0.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda118
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu118
|
||||||
|
name: linux_conda_py39_cu118_pyt201
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 2.0.1
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
conda_docker_image: pytorch/conda-builder:cuda118
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
@@ -359,33 +499,89 @@ workflows:
|
|||||||
python_version: '3.9'
|
python_version: '3.9'
|
||||||
pytorch_version: 2.3.1
|
pytorch_version: 2.3.1
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu118
|
cu_version: cu113
|
||||||
name: linux_conda_py39_cu118_pyt240
|
name: linux_conda_py310_cu113_pyt1120
|
||||||
python_version: '3.9'
|
python_version: '3.10'
|
||||||
pytorch_version: 2.4.0
|
pytorch_version: 1.12.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
conda_docker_image: pytorch/conda-builder:cuda121
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu121
|
cu_version: cu116
|
||||||
name: linux_conda_py39_cu121_pyt240
|
name: linux_conda_py310_cu116_pyt1120
|
||||||
python_version: '3.9'
|
python_version: '3.10'
|
||||||
pytorch_version: 2.4.0
|
pytorch_version: 1.12.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py310_cu113_pyt1121
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 1.12.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu116
|
||||||
|
name: linux_conda_py310_cu116_pyt1121
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 1.12.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu116
|
||||||
|
name: linux_conda_py310_cu116_pyt1130
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 1.13.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda117
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu117
|
||||||
|
name: linux_conda_py310_cu117_pyt1130
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 1.13.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu116
|
||||||
|
name: linux_conda_py310_cu116_pyt1131
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 1.13.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda117
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu117
|
||||||
|
name: linux_conda_py310_cu117_pyt1131
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 1.13.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda117
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu117
|
||||||
|
name: linux_conda_py310_cu117_pyt200
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 2.0.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
conda_docker_image: pytorch/conda-builder:cuda118
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu118
|
cu_version: cu118
|
||||||
name: linux_conda_py39_cu118_pyt241
|
name: linux_conda_py310_cu118_pyt200
|
||||||
python_version: '3.9'
|
python_version: '3.10'
|
||||||
pytorch_version: 2.4.1
|
pytorch_version: 2.0.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
conda_docker_image: pytorch/conda-builder:cuda121
|
conda_docker_image: pytorch/conda-builder:cuda117
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu121
|
cu_version: cu117
|
||||||
name: linux_conda_py39_cu121_pyt241
|
name: linux_conda_py310_cu117_pyt201
|
||||||
python_version: '3.9'
|
python_version: '3.10'
|
||||||
pytorch_version: 2.4.1
|
pytorch_version: 2.0.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda118
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu118
|
||||||
|
name: linux_conda_py310_cu118_pyt201
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 2.0.1
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
conda_docker_image: pytorch/conda-builder:cuda118
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
@@ -470,34 +666,6 @@ workflows:
|
|||||||
name: linux_conda_py310_cu121_pyt231
|
name: linux_conda_py310_cu121_pyt231
|
||||||
python_version: '3.10'
|
python_version: '3.10'
|
||||||
pytorch_version: 2.3.1
|
pytorch_version: 2.3.1
|
||||||
- binary_linux_conda:
|
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu118
|
|
||||||
name: linux_conda_py310_cu118_pyt240
|
|
||||||
python_version: '3.10'
|
|
||||||
pytorch_version: 2.4.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
conda_docker_image: pytorch/conda-builder:cuda121
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu121
|
|
||||||
name: linux_conda_py310_cu121_pyt240
|
|
||||||
python_version: '3.10'
|
|
||||||
pytorch_version: 2.4.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu118
|
|
||||||
name: linux_conda_py310_cu118_pyt241
|
|
||||||
python_version: '3.10'
|
|
||||||
pytorch_version: 2.4.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
conda_docker_image: pytorch/conda-builder:cuda121
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu121
|
|
||||||
name: linux_conda_py310_cu121_pyt241
|
|
||||||
python_version: '3.10'
|
|
||||||
pytorch_version: 2.4.1
|
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
conda_docker_image: pytorch/conda-builder:cuda118
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
@@ -582,34 +750,6 @@ workflows:
|
|||||||
name: linux_conda_py311_cu121_pyt231
|
name: linux_conda_py311_cu121_pyt231
|
||||||
python_version: '3.11'
|
python_version: '3.11'
|
||||||
pytorch_version: 2.3.1
|
pytorch_version: 2.3.1
|
||||||
- binary_linux_conda:
|
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu118
|
|
||||||
name: linux_conda_py311_cu118_pyt240
|
|
||||||
python_version: '3.11'
|
|
||||||
pytorch_version: 2.4.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
conda_docker_image: pytorch/conda-builder:cuda121
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu121
|
|
||||||
name: linux_conda_py311_cu121_pyt240
|
|
||||||
python_version: '3.11'
|
|
||||||
pytorch_version: 2.4.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu118
|
|
||||||
name: linux_conda_py311_cu118_pyt241
|
|
||||||
python_version: '3.11'
|
|
||||||
pytorch_version: 2.4.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
conda_docker_image: pytorch/conda-builder:cuda121
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu121
|
|
||||||
name: linux_conda_py311_cu121_pyt241
|
|
||||||
python_version: '3.11'
|
|
||||||
pytorch_version: 2.4.1
|
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
conda_docker_image: pytorch/conda-builder:cuda118
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
@@ -652,34 +792,6 @@ workflows:
|
|||||||
name: linux_conda_py312_cu121_pyt231
|
name: linux_conda_py312_cu121_pyt231
|
||||||
python_version: '3.12'
|
python_version: '3.12'
|
||||||
pytorch_version: 2.3.1
|
pytorch_version: 2.3.1
|
||||||
- binary_linux_conda:
|
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu118
|
|
||||||
name: linux_conda_py312_cu118_pyt240
|
|
||||||
python_version: '3.12'
|
|
||||||
pytorch_version: 2.4.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
conda_docker_image: pytorch/conda-builder:cuda121
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu121
|
|
||||||
name: linux_conda_py312_cu121_pyt240
|
|
||||||
python_version: '3.12'
|
|
||||||
pytorch_version: 2.4.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
conda_docker_image: pytorch/conda-builder:cuda118
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu118
|
|
||||||
name: linux_conda_py312_cu118_pyt241
|
|
||||||
python_version: '3.12'
|
|
||||||
pytorch_version: 2.4.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
conda_docker_image: pytorch/conda-builder:cuda121
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu121
|
|
||||||
name: linux_conda_py312_cu121_pyt241
|
|
||||||
python_version: '3.12'
|
|
||||||
pytorch_version: 2.4.1
|
|
||||||
- binary_linux_conda_cuda:
|
- binary_linux_conda_cuda:
|
||||||
name: testrun_conda_cuda_py310_cu117_pyt201
|
name: testrun_conda_cuda_py310_cu117_pyt201
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
|
|||||||
@@ -19,14 +19,18 @@ from packaging import version
|
|||||||
# The CUDA versions which have pytorch conda packages available for linux for each
|
# The CUDA versions which have pytorch conda packages available for linux for each
|
||||||
# version of pytorch.
|
# version of pytorch.
|
||||||
CONDA_CUDA_VERSIONS = {
|
CONDA_CUDA_VERSIONS = {
|
||||||
|
"1.12.0": ["cu113", "cu116"],
|
||||||
|
"1.12.1": ["cu113", "cu116"],
|
||||||
|
"1.13.0": ["cu116", "cu117"],
|
||||||
|
"1.13.1": ["cu116", "cu117"],
|
||||||
|
"2.0.0": ["cu117", "cu118"],
|
||||||
|
"2.0.1": ["cu117", "cu118"],
|
||||||
"2.1.0": ["cu118", "cu121"],
|
"2.1.0": ["cu118", "cu121"],
|
||||||
"2.1.1": ["cu118", "cu121"],
|
"2.1.1": ["cu118", "cu121"],
|
||||||
"2.1.2": ["cu118", "cu121"],
|
"2.1.2": ["cu118", "cu121"],
|
||||||
"2.2.0": ["cu118", "cu121"],
|
"2.2.0": ["cu118", "cu121"],
|
||||||
"2.2.2": ["cu118", "cu121"],
|
"2.2.2": ["cu118", "cu121"],
|
||||||
"2.3.1": ["cu118", "cu121"],
|
"2.3.1": ["cu118", "cu121"],
|
||||||
"2.4.0": ["cu118", "cu121"],
|
|
||||||
"2.4.1": ["cu118", "cu121"],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
11
INSTALL.md
11
INSTALL.md
@@ -8,10 +8,11 @@
|
|||||||
The core library is written in PyTorch. Several components have underlying implementation in CUDA for improved performance. A subset of these components have CPU implementations in C++/PyTorch. It is advised to use PyTorch3D with GPU support in order to use all the features.
|
The core library is written in PyTorch. Several components have underlying implementation in CUDA for improved performance. A subset of these components have CPU implementations in C++/PyTorch. It is advised to use PyTorch3D with GPU support in order to use all the features.
|
||||||
|
|
||||||
- Linux or macOS or Windows
|
- Linux or macOS or Windows
|
||||||
- Python
|
- Python 3.8, 3.9 or 3.10
|
||||||
- PyTorch 2.1.0, 2.1.1, 2.1.2, 2.2.0, 2.2.1, 2.2.2, 2.3.0, 2.3.1, 2.4.0 or 2.4.1.
|
- PyTorch 1.12.0, 1.12.1, 1.13.0, 2.0.0, 2.0.1, 2.1.0, 2.1.1, 2.1.2, 2.2.0, 2.2.1, 2.2.2, 2.3.0 or 2.3.1.
|
||||||
- torchvision that matches the PyTorch installation. You can install them together as explained at pytorch.org to make sure of this.
|
- torchvision that matches the PyTorch installation. You can install them together as explained at pytorch.org to make sure of this.
|
||||||
- gcc & g++ ≥ 4.9
|
- gcc & g++ ≥ 4.9
|
||||||
|
- [fvcore](https://github.com/facebookresearch/fvcore)
|
||||||
- [ioPath](https://github.com/facebookresearch/iopath)
|
- [ioPath](https://github.com/facebookresearch/iopath)
|
||||||
- If CUDA is to be used, use a version which is supported by the corresponding pytorch version and at least version 9.2.
|
- If CUDA is to be used, use a version which is supported by the corresponding pytorch version and at least version 9.2.
|
||||||
- If CUDA older than 11.7 is to be used and you are building from source, the CUB library must be available. We recommend version 1.10.0.
|
- If CUDA older than 11.7 is to be used and you are building from source, the CUB library must be available. We recommend version 1.10.0.
|
||||||
@@ -21,7 +22,7 @@ The runtime dependencies can be installed by running:
|
|||||||
conda create -n pytorch3d python=3.9
|
conda create -n pytorch3d python=3.9
|
||||||
conda activate pytorch3d
|
conda activate pytorch3d
|
||||||
conda install pytorch=1.13.0 torchvision pytorch-cuda=11.6 -c pytorch -c nvidia
|
conda install pytorch=1.13.0 torchvision pytorch-cuda=11.6 -c pytorch -c nvidia
|
||||||
conda install -c iopath iopath
|
conda install -c fvcore -c iopath -c conda-forge fvcore iopath
|
||||||
```
|
```
|
||||||
|
|
||||||
For the CUB build time dependency, which you only need if you have CUDA older than 11.7, if you are using conda, you can continue with
|
For the CUB build time dependency, which you only need if you have CUDA older than 11.7, if you are using conda, you can continue with
|
||||||
@@ -48,7 +49,6 @@ For developing on top of PyTorch3D or contributing, you will need to run the lin
|
|||||||
- tdqm
|
- tdqm
|
||||||
- jupyter
|
- jupyter
|
||||||
- imageio
|
- imageio
|
||||||
- fvcore
|
|
||||||
- plotly
|
- plotly
|
||||||
- opencv-python
|
- opencv-python
|
||||||
|
|
||||||
@@ -59,7 +59,6 @@ conda install jupyter
|
|||||||
pip install scikit-image matplotlib imageio plotly opencv-python
|
pip install scikit-image matplotlib imageio plotly opencv-python
|
||||||
|
|
||||||
# Tests/Linting
|
# Tests/Linting
|
||||||
conda install -c fvcore -c conda-forge fvcore
|
|
||||||
pip install black usort flake8 flake8-bugbear flake8-comprehensions
|
pip install black usort flake8 flake8-bugbear flake8-comprehensions
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -98,7 +97,7 @@ version_str="".join([
|
|||||||
torch.version.cuda.replace(".",""),
|
torch.version.cuda.replace(".",""),
|
||||||
f"_pyt{pyt_version_str}"
|
f"_pyt{pyt_version_str}"
|
||||||
])
|
])
|
||||||
!pip install iopath
|
!pip install fvcore iopath
|
||||||
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ conda init bash
|
|||||||
source ~/.bashrc
|
source ~/.bashrc
|
||||||
conda create -y -n myenv python=3.8 matplotlib ipython ipywidgets nbconvert
|
conda create -y -n myenv python=3.8 matplotlib ipython ipywidgets nbconvert
|
||||||
conda activate myenv
|
conda activate myenv
|
||||||
conda install -y -c iopath iopath
|
conda install -y -c fvcore -c iopath -c conda-forge fvcore iopath
|
||||||
conda install -y -c pytorch pytorch=1.6.0 cudatoolkit=10.1 torchvision
|
conda install -y -c pytorch pytorch=1.6.0 cudatoolkit=10.1 torchvision
|
||||||
conda install -y -c pytorch3d-nightly pytorch3d
|
conda install -y -c pytorch3d-nightly pytorch3d
|
||||||
pip install plotly scikit-image
|
pip install plotly scikit-image
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ sphinx_rtd_theme
|
|||||||
sphinx_markdown_tables
|
sphinx_markdown_tables
|
||||||
numpy
|
numpy
|
||||||
iopath
|
iopath
|
||||||
|
fvcore
|
||||||
https://download.pytorch.org/whl/cpu/torchvision-0.15.2%2Bcpu-cp311-cp311-linux_x86_64.whl
|
https://download.pytorch.org/whl/cpu/torchvision-0.15.2%2Bcpu-cp311-cp311-linux_x86_64.whl
|
||||||
https://download.pytorch.org/whl/cpu/torch-2.0.1%2Bcpu-cp311-cp311-linux_x86_64.whl
|
https://download.pytorch.org/whl/cpu/torch-2.0.1%2Bcpu-cp311-cp311-linux_x86_64.whl
|
||||||
omegaconf
|
omegaconf
|
||||||
|
|||||||
@@ -96,7 +96,7 @@
|
|||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{pyt_version_str}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install iopath\n",
|
" !pip install fvcore iopath\n",
|
||||||
" if sys.platform.startswith(\"linux\"):\n",
|
" if sys.platform.startswith(\"linux\"):\n",
|
||||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
|
|||||||
@@ -83,7 +83,7 @@
|
|||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{pyt_version_str}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install iopath\n",
|
" !pip install fvcore iopath\n",
|
||||||
" if sys.platform.startswith(\"linux\"):\n",
|
" if sys.platform.startswith(\"linux\"):\n",
|
||||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
|
|||||||
@@ -58,7 +58,7 @@
|
|||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{pyt_version_str}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install iopath\n",
|
" !pip install fvcore iopath\n",
|
||||||
" if sys.platform.startswith(\"linux\"):\n",
|
" if sys.platform.startswith(\"linux\"):\n",
|
||||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
|
|||||||
@@ -97,7 +97,7 @@
|
|||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{pyt_version_str}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install iopath\n",
|
" !pip install fvcore iopath\n",
|
||||||
" if sys.platform.startswith(\"linux\"):\n",
|
" if sys.platform.startswith(\"linux\"):\n",
|
||||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
|
|||||||
@@ -63,7 +63,7 @@
|
|||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{pyt_version_str}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install iopath\n",
|
" !pip install fvcore iopath\n",
|
||||||
" if sys.platform.startswith(\"linux\"):\n",
|
" if sys.platform.startswith(\"linux\"):\n",
|
||||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
|
|||||||
@@ -75,7 +75,7 @@
|
|||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{pyt_version_str}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install iopath\n",
|
" !pip install fvcore iopath\n",
|
||||||
" if sys.platform.startswith(\"linux\"):\n",
|
" if sys.platform.startswith(\"linux\"):\n",
|
||||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
|
|||||||
@@ -54,7 +54,7 @@
|
|||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{pyt_version_str}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install iopath\n",
|
" !pip install fvcore iopath\n",
|
||||||
" if sys.platform.startswith(\"linux\"):\n",
|
" if sys.platform.startswith(\"linux\"):\n",
|
||||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
|
|||||||
@@ -85,7 +85,7 @@
|
|||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{pyt_version_str}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install iopath\n",
|
" !pip install fvcore iopath\n",
|
||||||
" if sys.platform.startswith(\"linux\"):\n",
|
" if sys.platform.startswith(\"linux\"):\n",
|
||||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
|
|||||||
@@ -79,7 +79,7 @@
|
|||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{pyt_version_str}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install iopath\n",
|
" !pip install fvcore iopath\n",
|
||||||
" if sys.platform.startswith(\"linux\"):\n",
|
" if sys.platform.startswith(\"linux\"):\n",
|
||||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
|
|||||||
@@ -57,7 +57,7 @@
|
|||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{pyt_version_str}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install iopath\n",
|
" !pip install fvcore iopath\n",
|
||||||
" if sys.platform.startswith(\"linux\"):\n",
|
" if sys.platform.startswith(\"linux\"):\n",
|
||||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
|
|||||||
@@ -64,7 +64,7 @@
|
|||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{pyt_version_str}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install iopath\n",
|
" !pip install fvcore iopath\n",
|
||||||
" if sys.platform.startswith(\"linux\"):\n",
|
" if sys.platform.startswith(\"linux\"):\n",
|
||||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
|
|||||||
@@ -80,7 +80,7 @@
|
|||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{pyt_version_str}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install iopath\n",
|
" !pip install fvcore iopath\n",
|
||||||
" if sys.platform.startswith(\"linux\"):\n",
|
" if sys.platform.startswith(\"linux\"):\n",
|
||||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
|
|||||||
@@ -123,7 +123,7 @@ def do_build(start_args: List[str]):
|
|||||||
if test_flag is not None:
|
if test_flag is not None:
|
||||||
args.append(test_flag)
|
args.append(test_flag)
|
||||||
|
|
||||||
args.extend(["-c", "bottler", "-c", "iopath", "-c", "conda-forge"])
|
args.extend(["-c", "bottler", "-c", "fvcore", "-c", "iopath", "-c", "conda-forge"])
|
||||||
args.append("--no-anaconda-upload")
|
args.append("--no-anaconda-upload")
|
||||||
args.extend(["--python", os.environ["PYTHON_VERSION"]])
|
args.extend(["--python", os.environ["PYTHON_VERSION"]])
|
||||||
args.append("packaging/pytorch3d")
|
args.append("packaging/pytorch3d")
|
||||||
|
|||||||
@@ -26,6 +26,6 @@ version_str="".join([
|
|||||||
torch.version.cuda.replace(".",""),
|
torch.version.cuda.replace(".",""),
|
||||||
f"_pyt{pyt_version_str}"
|
f"_pyt{pyt_version_str}"
|
||||||
])
|
])
|
||||||
!pip install iopath
|
!pip install fvcore iopath
|
||||||
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -144,7 +144,7 @@ do
|
|||||||
conda activate "$tag"
|
conda activate "$tag"
|
||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
conda install -y -c pytorch $extra_channel "pytorch=$pytorch_version" "$cudatools=$CUDA_TAG"
|
conda install -y -c pytorch $extra_channel "pytorch=$pytorch_version" "$cudatools=$CUDA_TAG"
|
||||||
pip install iopath
|
pip install fvcore iopath
|
||||||
echo "python version" "$python_version" "pytorch version" "$pytorch_version" "cuda version" "$cu_version" "tag" "$tag"
|
echo "python version" "$python_version" "pytorch version" "$pytorch_version" "cuda version" "$cu_version" "tag" "$tag"
|
||||||
|
|
||||||
rm -rf dist
|
rm -rf dist
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ requirements:
|
|||||||
- python
|
- python
|
||||||
- numpy >=1.11
|
- numpy >=1.11
|
||||||
- torchvision >=0.5
|
- torchvision >=0.5
|
||||||
|
- fvcore
|
||||||
- iopath
|
- iopath
|
||||||
{{ environ.get('CONDA_PYTORCH_CONSTRAINT') }}
|
{{ environ.get('CONDA_PYTORCH_CONSTRAINT') }}
|
||||||
{{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }}
|
{{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }}
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ except ModuleNotFoundError:
|
|||||||
no_accelerate = os.environ.get("PYTORCH3D_NO_ACCELERATE") is not None
|
no_accelerate = os.environ.get("PYTORCH3D_NO_ACCELERATE") is not None
|
||||||
|
|
||||||
|
|
||||||
class Experiment(Configurable):
|
class Experiment(Configurable): # pyre-ignore: 13
|
||||||
"""
|
"""
|
||||||
This class is at the top level of Implicitron's config hierarchy. Its
|
This class is at the top level of Implicitron's config hierarchy. Its
|
||||||
members are high-level components necessary for training an implicit rende-
|
members are high-level components necessary for training an implicit rende-
|
||||||
@@ -120,16 +120,12 @@ class Experiment(Configurable):
|
|||||||
will be saved here.
|
will be saved here.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# pyre-fixme[13]: Attribute `data_source` is never initialized.
|
|
||||||
data_source: DataSourceBase
|
data_source: DataSourceBase
|
||||||
data_source_class_type: str = "ImplicitronDataSource"
|
data_source_class_type: str = "ImplicitronDataSource"
|
||||||
# pyre-fixme[13]: Attribute `model_factory` is never initialized.
|
|
||||||
model_factory: ModelFactoryBase
|
model_factory: ModelFactoryBase
|
||||||
model_factory_class_type: str = "ImplicitronModelFactory"
|
model_factory_class_type: str = "ImplicitronModelFactory"
|
||||||
# pyre-fixme[13]: Attribute `optimizer_factory` is never initialized.
|
|
||||||
optimizer_factory: OptimizerFactoryBase
|
optimizer_factory: OptimizerFactoryBase
|
||||||
optimizer_factory_class_type: str = "ImplicitronOptimizerFactory"
|
optimizer_factory_class_type: str = "ImplicitronOptimizerFactory"
|
||||||
# pyre-fixme[13]: Attribute `training_loop` is never initialized.
|
|
||||||
training_loop: TrainingLoopBase
|
training_loop: TrainingLoopBase
|
||||||
training_loop_class_type: str = "ImplicitronTrainingLoop"
|
training_loop_class_type: str = "ImplicitronTrainingLoop"
|
||||||
|
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ class ModelFactoryBase(ReplaceableBase):
|
|||||||
|
|
||||||
|
|
||||||
@registry.register
|
@registry.register
|
||||||
class ImplicitronModelFactory(ModelFactoryBase):
|
class ImplicitronModelFactory(ModelFactoryBase): # pyre-ignore [13]
|
||||||
"""
|
"""
|
||||||
A factory class that initializes an implicit rendering model.
|
A factory class that initializes an implicit rendering model.
|
||||||
|
|
||||||
@@ -61,7 +61,6 @@ class ImplicitronModelFactory(ModelFactoryBase):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# pyre-fixme[13]: Attribute `model` is never initialized.
|
|
||||||
model: ImplicitronModelBase
|
model: ImplicitronModelBase
|
||||||
model_class_type: str = "GenericModel"
|
model_class_type: str = "GenericModel"
|
||||||
resume: bool = True
|
resume: bool = True
|
||||||
|
|||||||
@@ -30,13 +30,13 @@ from .utils import seed_all_random_engines
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# pyre-fixme[13]: Attribute `evaluator` is never initialized.
|
||||||
class TrainingLoopBase(ReplaceableBase):
|
class TrainingLoopBase(ReplaceableBase):
|
||||||
"""
|
"""
|
||||||
Members:
|
Members:
|
||||||
evaluator: An EvaluatorBase instance, used to evaluate training results.
|
evaluator: An EvaluatorBase instance, used to evaluate training results.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# pyre-fixme[13]: Attribute `evaluator` is never initialized.
|
|
||||||
evaluator: Optional[EvaluatorBase]
|
evaluator: Optional[EvaluatorBase]
|
||||||
evaluator_class_type: Optional[str] = "ImplicitronEvaluator"
|
evaluator_class_type: Optional[str] = "ImplicitronEvaluator"
|
||||||
|
|
||||||
|
|||||||
@@ -6,4 +6,4 @@
|
|||||||
|
|
||||||
# pyre-unsafe
|
# pyre-unsafe
|
||||||
|
|
||||||
__version__ = "0.7.8"
|
__version__ = "0.7.6"
|
||||||
|
|||||||
@@ -7,15 +7,11 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// clang-format off
|
// clang-format off
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
#include "./pulsar/global.h" // Include before <torch/extension.h>.
|
#include "./pulsar/global.h" // Include before <torch/extension.h>.
|
||||||
#endif
|
|
||||||
#include <torch/extension.h>
|
#include <torch/extension.h>
|
||||||
// clang-format on
|
// clang-format on
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
#include "./pulsar/pytorch/renderer.h"
|
#include "./pulsar/pytorch/renderer.h"
|
||||||
#include "./pulsar/pytorch/tensor_util.h"
|
#include "./pulsar/pytorch/tensor_util.h"
|
||||||
#endif
|
|
||||||
#include "ball_query/ball_query.h"
|
#include "ball_query/ball_query.h"
|
||||||
#include "blending/sigmoid_alpha_blend.h"
|
#include "blending/sigmoid_alpha_blend.h"
|
||||||
#include "compositing/alpha_composite.h"
|
#include "compositing/alpha_composite.h"
|
||||||
@@ -103,8 +99,6 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
|||||||
m.def("marching_cubes", &MarchingCubes);
|
m.def("marching_cubes", &MarchingCubes);
|
||||||
|
|
||||||
// Pulsar.
|
// Pulsar.
|
||||||
// Pulsar not enabled on AMD.
|
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
#ifdef PULSAR_LOGGING_ENABLED
|
#ifdef PULSAR_LOGGING_ENABLED
|
||||||
c10::ShowLogInfoToStderr();
|
c10::ShowLogInfoToStderr();
|
||||||
#endif
|
#endif
|
||||||
@@ -189,5 +183,4 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
|||||||
m.attr("MAX_UINT") = py::int_(MAX_UINT);
|
m.attr("MAX_UINT") = py::int_(MAX_UINT);
|
||||||
m.attr("MAX_USHORT") = py::int_(MAX_USHORT);
|
m.attr("MAX_USHORT") = py::int_(MAX_USHORT);
|
||||||
m.attr("PULSAR_MAX_GRAD_SPHERES") = py::int_(MAX_GRAD_SPHERES);
|
m.attr("PULSAR_MAX_GRAD_SPHERES") = py::int_(MAX_GRAD_SPHERES);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -144,7 +144,7 @@ __device__ void CheckPixelInsideFace(
|
|||||||
const bool zero_face_area =
|
const bool zero_face_area =
|
||||||
(face_area <= kEpsilon && face_area >= -1.0f * kEpsilon);
|
(face_area <= kEpsilon && face_area >= -1.0f * kEpsilon);
|
||||||
|
|
||||||
if (zmax < 0 || (cull_backfaces && back_face) || outside_bbox ||
|
if (zmax < 0 || cull_backfaces && back_face || outside_bbox ||
|
||||||
zero_face_area) {
|
zero_face_area) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,8 +18,6 @@ const auto vEpsilon = 1e-8;
|
|||||||
|
|
||||||
// Common functions and operators for float2.
|
// Common functions and operators for float2.
|
||||||
|
|
||||||
// Complex arithmetic is already defined for AMD.
|
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
__device__ inline float2 operator-(const float2& a, const float2& b) {
|
__device__ inline float2 operator-(const float2& a, const float2& b) {
|
||||||
return make_float2(a.x - b.x, a.y - b.y);
|
return make_float2(a.x - b.x, a.y - b.y);
|
||||||
}
|
}
|
||||||
@@ -43,7 +41,6 @@ __device__ inline float2 operator*(const float2& a, const float2& b) {
|
|||||||
__device__ inline float2 operator*(const float a, const float2& b) {
|
__device__ inline float2 operator*(const float a, const float2& b) {
|
||||||
return make_float2(a * b.x, a * b.y);
|
return make_float2(a * b.x, a * b.y);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
__device__ inline float FloatMin3(const float a, const float b, const float c) {
|
__device__ inline float FloatMin3(const float a, const float b, const float c) {
|
||||||
return fminf(a, fminf(b, c));
|
return fminf(a, fminf(b, c));
|
||||||
|
|||||||
@@ -23,51 +23,37 @@ WarpReduceMin(scalar_t* min_dists, int64_t* min_idxs, const size_t tid) {
|
|||||||
min_idxs[tid] = min_idxs[tid + 32];
|
min_idxs[tid] = min_idxs[tid + 32];
|
||||||
min_dists[tid] = min_dists[tid + 32];
|
min_dists[tid] = min_dists[tid + 32];
|
||||||
}
|
}
|
||||||
// AMD does not use explicit syncwarp and instead automatically inserts memory
|
|
||||||
// fences during compilation.
|
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
__syncwarp();
|
__syncwarp();
|
||||||
#endif
|
|
||||||
// s = 16
|
// s = 16
|
||||||
if (min_dists[tid] > min_dists[tid + 16]) {
|
if (min_dists[tid] > min_dists[tid + 16]) {
|
||||||
min_idxs[tid] = min_idxs[tid + 16];
|
min_idxs[tid] = min_idxs[tid + 16];
|
||||||
min_dists[tid] = min_dists[tid + 16];
|
min_dists[tid] = min_dists[tid + 16];
|
||||||
}
|
}
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
__syncwarp();
|
__syncwarp();
|
||||||
#endif
|
|
||||||
// s = 8
|
// s = 8
|
||||||
if (min_dists[tid] > min_dists[tid + 8]) {
|
if (min_dists[tid] > min_dists[tid + 8]) {
|
||||||
min_idxs[tid] = min_idxs[tid + 8];
|
min_idxs[tid] = min_idxs[tid + 8];
|
||||||
min_dists[tid] = min_dists[tid + 8];
|
min_dists[tid] = min_dists[tid + 8];
|
||||||
}
|
}
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
__syncwarp();
|
__syncwarp();
|
||||||
#endif
|
|
||||||
// s = 4
|
// s = 4
|
||||||
if (min_dists[tid] > min_dists[tid + 4]) {
|
if (min_dists[tid] > min_dists[tid + 4]) {
|
||||||
min_idxs[tid] = min_idxs[tid + 4];
|
min_idxs[tid] = min_idxs[tid + 4];
|
||||||
min_dists[tid] = min_dists[tid + 4];
|
min_dists[tid] = min_dists[tid + 4];
|
||||||
}
|
}
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
__syncwarp();
|
__syncwarp();
|
||||||
#endif
|
|
||||||
// s = 2
|
// s = 2
|
||||||
if (min_dists[tid] > min_dists[tid + 2]) {
|
if (min_dists[tid] > min_dists[tid + 2]) {
|
||||||
min_idxs[tid] = min_idxs[tid + 2];
|
min_idxs[tid] = min_idxs[tid + 2];
|
||||||
min_dists[tid] = min_dists[tid + 2];
|
min_dists[tid] = min_dists[tid + 2];
|
||||||
}
|
}
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
__syncwarp();
|
__syncwarp();
|
||||||
#endif
|
|
||||||
// s = 1
|
// s = 1
|
||||||
if (min_dists[tid] > min_dists[tid + 1]) {
|
if (min_dists[tid] > min_dists[tid + 1]) {
|
||||||
min_idxs[tid] = min_idxs[tid + 1];
|
min_idxs[tid] = min_idxs[tid + 1];
|
||||||
min_dists[tid] = min_dists[tid + 1];
|
min_dists[tid] = min_dists[tid + 1];
|
||||||
}
|
}
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
__syncwarp();
|
__syncwarp();
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename scalar_t>
|
template <typename scalar_t>
|
||||||
@@ -79,42 +65,30 @@ __device__ void WarpReduceMax(
|
|||||||
dists[tid] = dists[tid + 32];
|
dists[tid] = dists[tid + 32];
|
||||||
dists_idx[tid] = dists_idx[tid + 32];
|
dists_idx[tid] = dists_idx[tid + 32];
|
||||||
}
|
}
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
__syncwarp();
|
__syncwarp();
|
||||||
#endif
|
|
||||||
if (dists[tid] < dists[tid + 16]) {
|
if (dists[tid] < dists[tid + 16]) {
|
||||||
dists[tid] = dists[tid + 16];
|
dists[tid] = dists[tid + 16];
|
||||||
dists_idx[tid] = dists_idx[tid + 16];
|
dists_idx[tid] = dists_idx[tid + 16];
|
||||||
}
|
}
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
__syncwarp();
|
__syncwarp();
|
||||||
#endif
|
|
||||||
if (dists[tid] < dists[tid + 8]) {
|
if (dists[tid] < dists[tid + 8]) {
|
||||||
dists[tid] = dists[tid + 8];
|
dists[tid] = dists[tid + 8];
|
||||||
dists_idx[tid] = dists_idx[tid + 8];
|
dists_idx[tid] = dists_idx[tid + 8];
|
||||||
}
|
}
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
__syncwarp();
|
__syncwarp();
|
||||||
#endif
|
|
||||||
if (dists[tid] < dists[tid + 4]) {
|
if (dists[tid] < dists[tid + 4]) {
|
||||||
dists[tid] = dists[tid + 4];
|
dists[tid] = dists[tid + 4];
|
||||||
dists_idx[tid] = dists_idx[tid + 4];
|
dists_idx[tid] = dists_idx[tid + 4];
|
||||||
}
|
}
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
__syncwarp();
|
__syncwarp();
|
||||||
#endif
|
|
||||||
if (dists[tid] < dists[tid + 2]) {
|
if (dists[tid] < dists[tid + 2]) {
|
||||||
dists[tid] = dists[tid + 2];
|
dists[tid] = dists[tid + 2];
|
||||||
dists_idx[tid] = dists_idx[tid + 2];
|
dists_idx[tid] = dists_idx[tid + 2];
|
||||||
}
|
}
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
__syncwarp();
|
__syncwarp();
|
||||||
#endif
|
|
||||||
if (dists[tid] < dists[tid + 1]) {
|
if (dists[tid] < dists[tid + 1]) {
|
||||||
dists[tid] = dists[tid + 1];
|
dists[tid] = dists[tid + 1];
|
||||||
dists_idx[tid] = dists_idx[tid + 1];
|
dists_idx[tid] = dists_idx[tid + 1];
|
||||||
}
|
}
|
||||||
#if !defined(USE_ROCM)
|
|
||||||
__syncwarp();
|
__syncwarp();
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ class DataSourceBase(ReplaceableBase):
|
|||||||
|
|
||||||
|
|
||||||
@registry.register
|
@registry.register
|
||||||
class ImplicitronDataSource(DataSourceBase):
|
class ImplicitronDataSource(DataSourceBase): # pyre-ignore[13]
|
||||||
"""
|
"""
|
||||||
Represents the data used in Implicitron. This is the only implementation
|
Represents the data used in Implicitron. This is the only implementation
|
||||||
of DataSourceBase provided.
|
of DataSourceBase provided.
|
||||||
@@ -52,11 +52,8 @@ class ImplicitronDataSource(DataSourceBase):
|
|||||||
data_loader_map_provider_class_type: identifies type for data_loader_map_provider.
|
data_loader_map_provider_class_type: identifies type for data_loader_map_provider.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# pyre-fixme[13]: Attribute `dataset_map_provider` is never initialized.
|
|
||||||
dataset_map_provider: DatasetMapProviderBase
|
dataset_map_provider: DatasetMapProviderBase
|
||||||
# pyre-fixme[13]: Attribute `dataset_map_provider_class_type` is never initialized.
|
|
||||||
dataset_map_provider_class_type: str
|
dataset_map_provider_class_type: str
|
||||||
# pyre-fixme[13]: Attribute `data_loader_map_provider` is never initialized.
|
|
||||||
data_loader_map_provider: DataLoaderMapProviderBase
|
data_loader_map_provider: DataLoaderMapProviderBase
|
||||||
data_loader_map_provider_class_type: str = "SequenceDataLoaderMapProvider"
|
data_loader_map_provider_class_type: str = "SequenceDataLoaderMapProvider"
|
||||||
|
|
||||||
|
|||||||
@@ -276,7 +276,6 @@ class FrameData(Mapping[str, Any]):
|
|||||||
image_size_hw=tuple(self.effective_image_size_hw), # pyre-ignore
|
image_size_hw=tuple(self.effective_image_size_hw), # pyre-ignore
|
||||||
)
|
)
|
||||||
crop_bbox_xywh = bbox_xyxy_to_xywh(clamp_bbox_xyxy)
|
crop_bbox_xywh = bbox_xyxy_to_xywh(clamp_bbox_xyxy)
|
||||||
self.crop_bbox_xywh = crop_bbox_xywh
|
|
||||||
|
|
||||||
if self.fg_probability is not None:
|
if self.fg_probability is not None:
|
||||||
self.fg_probability = crop_around_box(
|
self.fg_probability = crop_around_box(
|
||||||
@@ -435,7 +434,7 @@ class FrameData(Mapping[str, Any]):
|
|||||||
# TODO: don't store K; enforce working in NDC space
|
# TODO: don't store K; enforce working in NDC space
|
||||||
return join_cameras_as_batch(batch)
|
return join_cameras_as_batch(batch)
|
||||||
else:
|
else:
|
||||||
return torch.utils.data.dataloader.default_collate(batch)
|
return torch.utils.data._utils.collate.default_collate(batch)
|
||||||
|
|
||||||
|
|
||||||
FrameDataSubtype = TypeVar("FrameDataSubtype", bound=FrameData)
|
FrameDataSubtype = TypeVar("FrameDataSubtype", bound=FrameData)
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ _NEED_CONTROL: Tuple[str, ...] = (
|
|||||||
|
|
||||||
|
|
||||||
@registry.register
|
@registry.register
|
||||||
class JsonIndexDatasetMapProvider(DatasetMapProviderBase):
|
class JsonIndexDatasetMapProvider(DatasetMapProviderBase): # pyre-ignore [13]
|
||||||
"""
|
"""
|
||||||
Generates the training / validation and testing dataset objects for
|
Generates the training / validation and testing dataset objects for
|
||||||
a dataset laid out on disk like Co3D, with annotations in json files.
|
a dataset laid out on disk like Co3D, with annotations in json files.
|
||||||
@@ -95,7 +95,6 @@ class JsonIndexDatasetMapProvider(DatasetMapProviderBase):
|
|||||||
path_manager_factory_class_type: The class type of `path_manager_factory`.
|
path_manager_factory_class_type: The class type of `path_manager_factory`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# pyre-fixme[13]: Attribute `category` is never initialized.
|
|
||||||
category: str
|
category: str
|
||||||
task_str: str = "singlesequence"
|
task_str: str = "singlesequence"
|
||||||
dataset_root: str = _CO3D_DATASET_ROOT
|
dataset_root: str = _CO3D_DATASET_ROOT
|
||||||
@@ -105,10 +104,8 @@ class JsonIndexDatasetMapProvider(DatasetMapProviderBase):
|
|||||||
test_restrict_sequence_id: int = -1
|
test_restrict_sequence_id: int = -1
|
||||||
assert_single_seq: bool = False
|
assert_single_seq: bool = False
|
||||||
only_test_set: bool = False
|
only_test_set: bool = False
|
||||||
# pyre-fixme[13]: Attribute `dataset` is never initialized.
|
|
||||||
dataset: JsonIndexDataset
|
dataset: JsonIndexDataset
|
||||||
dataset_class_type: str = "JsonIndexDataset"
|
dataset_class_type: str = "JsonIndexDataset"
|
||||||
# pyre-fixme[13]: Attribute `path_manager_factory` is never initialized.
|
|
||||||
path_manager_factory: PathManagerFactory
|
path_manager_factory: PathManagerFactory
|
||||||
path_manager_factory_class_type: str = "PathManagerFactory"
|
path_manager_factory_class_type: str = "PathManagerFactory"
|
||||||
|
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
@registry.register
|
@registry.register
|
||||||
class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase):
|
class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
|
||||||
"""
|
"""
|
||||||
Generates the training, validation, and testing dataset objects for
|
Generates the training, validation, and testing dataset objects for
|
||||||
a dataset laid out on disk like CO3Dv2, with annotations in gzipped json files.
|
a dataset laid out on disk like CO3Dv2, with annotations in gzipped json files.
|
||||||
@@ -171,9 +171,7 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase):
|
|||||||
path_manager_factory_class_type: The class type of `path_manager_factory`.
|
path_manager_factory_class_type: The class type of `path_manager_factory`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# pyre-fixme[13]: Attribute `category` is never initialized.
|
|
||||||
category: str
|
category: str
|
||||||
# pyre-fixme[13]: Attribute `subset_name` is never initialized.
|
|
||||||
subset_name: str
|
subset_name: str
|
||||||
dataset_root: str = _CO3DV2_DATASET_ROOT
|
dataset_root: str = _CO3DV2_DATASET_ROOT
|
||||||
|
|
||||||
@@ -185,10 +183,8 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase):
|
|||||||
n_known_frames_for_test: int = 0
|
n_known_frames_for_test: int = 0
|
||||||
|
|
||||||
dataset_class_type: str = "JsonIndexDataset"
|
dataset_class_type: str = "JsonIndexDataset"
|
||||||
# pyre-fixme[13]: Attribute `dataset` is never initialized.
|
|
||||||
dataset: JsonIndexDataset
|
dataset: JsonIndexDataset
|
||||||
|
|
||||||
# pyre-fixme[13]: Attribute `path_manager_factory` is never initialized.
|
|
||||||
path_manager_factory: PathManagerFactory
|
path_manager_factory: PathManagerFactory
|
||||||
path_manager_factory_class_type: str = "PathManagerFactory"
|
path_manager_factory_class_type: str = "PathManagerFactory"
|
||||||
|
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ from .utils import DATASET_TYPE_KNOWN
|
|||||||
|
|
||||||
|
|
||||||
@registry.register
|
@registry.register
|
||||||
class RenderedMeshDatasetMapProvider(DatasetMapProviderBase):
|
class RenderedMeshDatasetMapProvider(DatasetMapProviderBase): # pyre-ignore [13]
|
||||||
"""
|
"""
|
||||||
A simple single-scene dataset based on PyTorch3D renders of a mesh.
|
A simple single-scene dataset based on PyTorch3D renders of a mesh.
|
||||||
Provides `num_views` renders of the mesh as train, with no val
|
Provides `num_views` renders of the mesh as train, with no val
|
||||||
@@ -76,7 +76,6 @@ class RenderedMeshDatasetMapProvider(DatasetMapProviderBase):
|
|||||||
resolution: int = 128
|
resolution: int = 128
|
||||||
use_point_light: bool = True
|
use_point_light: bool = True
|
||||||
gpu_idx: Optional[int] = 0
|
gpu_idx: Optional[int] = 0
|
||||||
# pyre-fixme[13]: Attribute `path_manager_factory` is never initialized.
|
|
||||||
path_manager_factory: PathManagerFactory
|
path_manager_factory: PathManagerFactory
|
||||||
path_manager_factory_class_type: str = "PathManagerFactory"
|
path_manager_factory_class_type: str = "PathManagerFactory"
|
||||||
|
|
||||||
|
|||||||
@@ -83,6 +83,7 @@ class SingleSceneDataset(DatasetBase, Configurable):
|
|||||||
return self.eval_batches
|
return self.eval_batches
|
||||||
|
|
||||||
|
|
||||||
|
# pyre-fixme[13]: Uninitialized attribute
|
||||||
class SingleSceneDatasetMapProviderBase(DatasetMapProviderBase):
|
class SingleSceneDatasetMapProviderBase(DatasetMapProviderBase):
|
||||||
"""
|
"""
|
||||||
Base for provider of data for one scene from LLFF or blender datasets.
|
Base for provider of data for one scene from LLFF or blender datasets.
|
||||||
@@ -99,11 +100,8 @@ class SingleSceneDatasetMapProviderBase(DatasetMapProviderBase):
|
|||||||
testing frame.
|
testing frame.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# pyre-fixme[13]: Attribute `base_dir` is never initialized.
|
|
||||||
base_dir: str
|
base_dir: str
|
||||||
# pyre-fixme[13]: Attribute `object_name` is never initialized.
|
|
||||||
object_name: str
|
object_name: str
|
||||||
# pyre-fixme[13]: Attribute `path_manager_factory` is never initialized.
|
|
||||||
path_manager_factory: PathManagerFactory
|
path_manager_factory: PathManagerFactory
|
||||||
path_manager_factory_class_type: str = "PathManagerFactory"
|
path_manager_factory_class_type: str = "PathManagerFactory"
|
||||||
n_known_frames_for_test: Optional[int] = None
|
n_known_frames_for_test: Optional[int] = None
|
||||||
|
|||||||
@@ -348,7 +348,6 @@ def adjust_camera_to_image_scale_(
|
|||||||
camera: PerspectiveCameras,
|
camera: PerspectiveCameras,
|
||||||
original_size_wh: torch.Tensor,
|
original_size_wh: torch.Tensor,
|
||||||
new_size_wh: torch.LongTensor,
|
new_size_wh: torch.LongTensor,
|
||||||
# pyre-fixme[7]: Expected `PerspectiveCameras` but got implicit return value of `None`.
|
|
||||||
) -> PerspectiveCameras:
|
) -> PerspectiveCameras:
|
||||||
focal_length_px, principal_point_px = _convert_ndc_to_pixels(
|
focal_length_px, principal_point_px = _convert_ndc_to_pixels(
|
||||||
camera.focal_length[0],
|
camera.focal_length[0],
|
||||||
@@ -368,7 +367,7 @@ def adjust_camera_to_image_scale_(
|
|||||||
image_size_wh_output,
|
image_size_wh_output,
|
||||||
)
|
)
|
||||||
camera.focal_length = focal_length_scaled[None]
|
camera.focal_length = focal_length_scaled[None]
|
||||||
camera.principal_point = principal_point_scaled[None]
|
camera.principal_point = principal_point_scaled[None] # pyre-ignore
|
||||||
|
|
||||||
|
|
||||||
# NOTE this cache is per-worker; they are implemented as processes.
|
# NOTE this cache is per-worker; they are implemented as processes.
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
@registry.register
|
@registry.register
|
||||||
class GenericModel(ImplicitronModelBase):
|
class GenericModel(ImplicitronModelBase): # pyre-ignore: 13
|
||||||
"""
|
"""
|
||||||
GenericModel is a wrapper for the neural implicit
|
GenericModel is a wrapper for the neural implicit
|
||||||
rendering and reconstruction pipeline which consists
|
rendering and reconstruction pipeline which consists
|
||||||
@@ -226,42 +226,34 @@ class GenericModel(ImplicitronModelBase):
|
|||||||
|
|
||||||
# ---- global encoder settings
|
# ---- global encoder settings
|
||||||
global_encoder_class_type: Optional[str] = None
|
global_encoder_class_type: Optional[str] = None
|
||||||
# pyre-fixme[13]: Attribute `global_encoder` is never initialized.
|
|
||||||
global_encoder: Optional[GlobalEncoderBase]
|
global_encoder: Optional[GlobalEncoderBase]
|
||||||
|
|
||||||
# ---- raysampler
|
# ---- raysampler
|
||||||
raysampler_class_type: str = "AdaptiveRaySampler"
|
raysampler_class_type: str = "AdaptiveRaySampler"
|
||||||
# pyre-fixme[13]: Attribute `raysampler` is never initialized.
|
|
||||||
raysampler: RaySamplerBase
|
raysampler: RaySamplerBase
|
||||||
|
|
||||||
# ---- renderer configs
|
# ---- renderer configs
|
||||||
renderer_class_type: str = "MultiPassEmissionAbsorptionRenderer"
|
renderer_class_type: str = "MultiPassEmissionAbsorptionRenderer"
|
||||||
# pyre-fixme[13]: Attribute `renderer` is never initialized.
|
|
||||||
renderer: BaseRenderer
|
renderer: BaseRenderer
|
||||||
|
|
||||||
# ---- image feature extractor settings
|
# ---- image feature extractor settings
|
||||||
# (This is only created if view_pooler is enabled)
|
# (This is only created if view_pooler is enabled)
|
||||||
# pyre-fixme[13]: Attribute `image_feature_extractor` is never initialized.
|
|
||||||
image_feature_extractor: Optional[FeatureExtractorBase]
|
image_feature_extractor: Optional[FeatureExtractorBase]
|
||||||
image_feature_extractor_class_type: Optional[str] = None
|
image_feature_extractor_class_type: Optional[str] = None
|
||||||
# ---- view pooler settings
|
# ---- view pooler settings
|
||||||
view_pooler_enabled: bool = False
|
view_pooler_enabled: bool = False
|
||||||
# pyre-fixme[13]: Attribute `view_pooler` is never initialized.
|
|
||||||
view_pooler: Optional[ViewPooler]
|
view_pooler: Optional[ViewPooler]
|
||||||
|
|
||||||
# ---- implicit function settings
|
# ---- implicit function settings
|
||||||
implicit_function_class_type: str = "NeuralRadianceFieldImplicitFunction"
|
implicit_function_class_type: str = "NeuralRadianceFieldImplicitFunction"
|
||||||
# This is just a model, never constructed.
|
# This is just a model, never constructed.
|
||||||
# The actual implicit functions live in self._implicit_functions
|
# The actual implicit functions live in self._implicit_functions
|
||||||
# pyre-fixme[13]: Attribute `implicit_function` is never initialized.
|
|
||||||
implicit_function: ImplicitFunctionBase
|
implicit_function: ImplicitFunctionBase
|
||||||
|
|
||||||
# ----- metrics
|
# ----- metrics
|
||||||
# pyre-fixme[13]: Attribute `view_metrics` is never initialized.
|
|
||||||
view_metrics: ViewMetricsBase
|
view_metrics: ViewMetricsBase
|
||||||
view_metrics_class_type: str = "ViewMetrics"
|
view_metrics_class_type: str = "ViewMetrics"
|
||||||
|
|
||||||
# pyre-fixme[13]: Attribute `regularization_metrics` is never initialized.
|
|
||||||
regularization_metrics: RegularizationMetricsBase
|
regularization_metrics: RegularizationMetricsBase
|
||||||
regularization_metrics_class_type: str = "RegularizationMetrics"
|
regularization_metrics_class_type: str = "RegularizationMetrics"
|
||||||
|
|
||||||
|
|||||||
@@ -59,13 +59,12 @@ class GlobalEncoderBase(ReplaceableBase):
|
|||||||
|
|
||||||
# TODO: probabilistic embeddings?
|
# TODO: probabilistic embeddings?
|
||||||
@registry.register
|
@registry.register
|
||||||
class SequenceAutodecoder(GlobalEncoderBase, torch.nn.Module):
|
class SequenceAutodecoder(GlobalEncoderBase, torch.nn.Module): # pyre-ignore: 13
|
||||||
"""
|
"""
|
||||||
A global encoder implementation which provides an autodecoder encoding
|
A global encoder implementation which provides an autodecoder encoding
|
||||||
of the frame's sequence identifier.
|
of the frame's sequence identifier.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# pyre-fixme[13]: Attribute `autodecoder` is never initialized.
|
|
||||||
autodecoder: Autodecoder
|
autodecoder: Autodecoder
|
||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
|
|||||||
@@ -244,6 +244,7 @@ class MLPWithInputSkips(Configurable, torch.nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
@registry.register
|
@registry.register
|
||||||
|
# pyre-fixme[13]: Attribute `network` is never initialized.
|
||||||
class MLPDecoder(DecoderFunctionBase):
|
class MLPDecoder(DecoderFunctionBase):
|
||||||
"""
|
"""
|
||||||
Decoding function which uses `MLPWithIputSkips` to convert the embedding to output.
|
Decoding function which uses `MLPWithIputSkips` to convert the embedding to output.
|
||||||
@@ -271,7 +272,6 @@ class MLPDecoder(DecoderFunctionBase):
|
|||||||
|
|
||||||
input_dim: int = 3
|
input_dim: int = 3
|
||||||
param_groups: Dict[str, str] = field(default_factory=lambda: {})
|
param_groups: Dict[str, str] = field(default_factory=lambda: {})
|
||||||
# pyre-fixme[13]: Attribute `network` is never initialized.
|
|
||||||
network: MLPWithInputSkips
|
network: MLPWithInputSkips
|
||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
|
|||||||
@@ -318,11 +318,10 @@ class SRNRaymarchHyperNet(Configurable, torch.nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
@registry.register
|
@registry.register
|
||||||
|
# pyre-fixme[13]: Uninitialized attribute
|
||||||
class SRNImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
|
class SRNImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
|
||||||
latent_dim: int = 0
|
latent_dim: int = 0
|
||||||
# pyre-fixme[13]: Attribute `raymarch_function` is never initialized.
|
|
||||||
raymarch_function: SRNRaymarchFunction
|
raymarch_function: SRNRaymarchFunction
|
||||||
# pyre-fixme[13]: Attribute `pixel_generator` is never initialized.
|
|
||||||
pixel_generator: SRNPixelGenerator
|
pixel_generator: SRNPixelGenerator
|
||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
@@ -367,6 +366,7 @@ class SRNImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
@registry.register
|
@registry.register
|
||||||
|
# pyre-fixme[13]: Uninitialized attribute
|
||||||
class SRNHyperNetImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
|
class SRNHyperNetImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
|
||||||
"""
|
"""
|
||||||
This implicit function uses a hypernetwork to generate the
|
This implicit function uses a hypernetwork to generate the
|
||||||
@@ -377,9 +377,7 @@ class SRNHyperNetImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
|
|||||||
|
|
||||||
latent_dim_hypernet: int = 0
|
latent_dim_hypernet: int = 0
|
||||||
latent_dim: int = 0
|
latent_dim: int = 0
|
||||||
# pyre-fixme[13]: Attribute `hypernet` is never initialized.
|
|
||||||
hypernet: SRNRaymarchHyperNet
|
hypernet: SRNRaymarchHyperNet
|
||||||
# pyre-fixme[13]: Attribute `pixel_generator` is never initialized.
|
|
||||||
pixel_generator: SRNPixelGenerator
|
pixel_generator: SRNPixelGenerator
|
||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
|
|||||||
@@ -805,6 +805,7 @@ class VMFactorizedVoxelGrid(VoxelGridBase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# pyre-fixme[13]: Attribute `voxel_grid` is never initialized.
|
||||||
class VoxelGridModule(Configurable, torch.nn.Module):
|
class VoxelGridModule(Configurable, torch.nn.Module):
|
||||||
"""
|
"""
|
||||||
A wrapper torch.nn.Module for the VoxelGrid classes, which
|
A wrapper torch.nn.Module for the VoxelGrid classes, which
|
||||||
@@ -844,7 +845,6 @@ class VoxelGridModule(Configurable, torch.nn.Module):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
voxel_grid_class_type: str = "FullResolutionVoxelGrid"
|
voxel_grid_class_type: str = "FullResolutionVoxelGrid"
|
||||||
# pyre-fixme[13]: Attribute `voxel_grid` is never initialized.
|
|
||||||
voxel_grid: VoxelGridBase
|
voxel_grid: VoxelGridBase
|
||||||
|
|
||||||
extents: Tuple[float, float, float] = (2.0, 2.0, 2.0)
|
extents: Tuple[float, float, float] = (2.0, 2.0, 2.0)
|
||||||
|
|||||||
@@ -39,6 +39,7 @@ enable_get_default_args(HarmonicEmbedding)
|
|||||||
|
|
||||||
|
|
||||||
@registry.register
|
@registry.register
|
||||||
|
# pyre-ignore[13]
|
||||||
class VoxelGridImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
|
class VoxelGridImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
|
||||||
"""
|
"""
|
||||||
This implicit function consists of two streams, one for the density calculation and one
|
This implicit function consists of two streams, one for the density calculation and one
|
||||||
@@ -144,11 +145,9 @@ class VoxelGridImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# ---- voxel grid for density
|
# ---- voxel grid for density
|
||||||
# pyre-fixme[13]: Attribute `voxel_grid_density` is never initialized.
|
|
||||||
voxel_grid_density: VoxelGridModule
|
voxel_grid_density: VoxelGridModule
|
||||||
|
|
||||||
# ---- voxel grid for color
|
# ---- voxel grid for color
|
||||||
# pyre-fixme[13]: Attribute `voxel_grid_color` is never initialized.
|
|
||||||
voxel_grid_color: VoxelGridModule
|
voxel_grid_color: VoxelGridModule
|
||||||
|
|
||||||
# ---- harmonic embeddings density
|
# ---- harmonic embeddings density
|
||||||
@@ -164,12 +163,10 @@ class VoxelGridImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
|
|||||||
|
|
||||||
# ---- decoder function for density
|
# ---- decoder function for density
|
||||||
decoder_density_class_type: str = "MLPDecoder"
|
decoder_density_class_type: str = "MLPDecoder"
|
||||||
# pyre-fixme[13]: Attribute `decoder_density` is never initialized.
|
|
||||||
decoder_density: DecoderFunctionBase
|
decoder_density: DecoderFunctionBase
|
||||||
|
|
||||||
# ---- decoder function for color
|
# ---- decoder function for color
|
||||||
decoder_color_class_type: str = "MLPDecoder"
|
decoder_color_class_type: str = "MLPDecoder"
|
||||||
# pyre-fixme[13]: Attribute `decoder_color` is never initialized.
|
|
||||||
decoder_color: DecoderFunctionBase
|
decoder_color: DecoderFunctionBase
|
||||||
|
|
||||||
# ---- cuda streams
|
# ---- cuda streams
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ IMPLICIT_FUNCTION_ARGS_TO_REMOVE: List[str] = [
|
|||||||
|
|
||||||
|
|
||||||
@registry.register
|
@registry.register
|
||||||
class OverfitModel(ImplicitronModelBase):
|
class OverfitModel(ImplicitronModelBase): # pyre-ignore: 13
|
||||||
"""
|
"""
|
||||||
OverfitModel is a wrapper for the neural implicit
|
OverfitModel is a wrapper for the neural implicit
|
||||||
rendering and reconstruction pipeline which consists
|
rendering and reconstruction pipeline which consists
|
||||||
@@ -198,34 +198,27 @@ class OverfitModel(ImplicitronModelBase):
|
|||||||
|
|
||||||
# ---- global encoder settings
|
# ---- global encoder settings
|
||||||
global_encoder_class_type: Optional[str] = None
|
global_encoder_class_type: Optional[str] = None
|
||||||
# pyre-fixme[13]: Attribute `global_encoder` is never initialized.
|
|
||||||
global_encoder: Optional[GlobalEncoderBase]
|
global_encoder: Optional[GlobalEncoderBase]
|
||||||
|
|
||||||
# ---- raysampler
|
# ---- raysampler
|
||||||
raysampler_class_type: str = "AdaptiveRaySampler"
|
raysampler_class_type: str = "AdaptiveRaySampler"
|
||||||
# pyre-fixme[13]: Attribute `raysampler` is never initialized.
|
|
||||||
raysampler: RaySamplerBase
|
raysampler: RaySamplerBase
|
||||||
|
|
||||||
# ---- renderer configs
|
# ---- renderer configs
|
||||||
renderer_class_type: str = "MultiPassEmissionAbsorptionRenderer"
|
renderer_class_type: str = "MultiPassEmissionAbsorptionRenderer"
|
||||||
# pyre-fixme[13]: Attribute `renderer` is never initialized.
|
|
||||||
renderer: BaseRenderer
|
renderer: BaseRenderer
|
||||||
|
|
||||||
# ---- implicit function settings
|
# ---- implicit function settings
|
||||||
share_implicit_function_across_passes: bool = False
|
share_implicit_function_across_passes: bool = False
|
||||||
implicit_function_class_type: str = "NeuralRadianceFieldImplicitFunction"
|
implicit_function_class_type: str = "NeuralRadianceFieldImplicitFunction"
|
||||||
# pyre-fixme[13]: Attribute `implicit_function` is never initialized.
|
|
||||||
implicit_function: ImplicitFunctionBase
|
implicit_function: ImplicitFunctionBase
|
||||||
coarse_implicit_function_class_type: Optional[str] = None
|
coarse_implicit_function_class_type: Optional[str] = None
|
||||||
# pyre-fixme[13]: Attribute `coarse_implicit_function` is never initialized.
|
|
||||||
coarse_implicit_function: Optional[ImplicitFunctionBase]
|
coarse_implicit_function: Optional[ImplicitFunctionBase]
|
||||||
|
|
||||||
# ----- metrics
|
# ----- metrics
|
||||||
# pyre-fixme[13]: Attribute `view_metrics` is never initialized.
|
|
||||||
view_metrics: ViewMetricsBase
|
view_metrics: ViewMetricsBase
|
||||||
view_metrics_class_type: str = "ViewMetrics"
|
view_metrics_class_type: str = "ViewMetrics"
|
||||||
|
|
||||||
# pyre-fixme[13]: Attribute `regularization_metrics` is never initialized.
|
|
||||||
regularization_metrics: RegularizationMetricsBase
|
regularization_metrics: RegularizationMetricsBase
|
||||||
regularization_metrics_class_type: str = "RegularizationMetrics"
|
regularization_metrics_class_type: str = "RegularizationMetrics"
|
||||||
|
|
||||||
|
|||||||
@@ -18,7 +18,9 @@ from .raymarcher import RaymarcherBase
|
|||||||
|
|
||||||
|
|
||||||
@registry.register
|
@registry.register
|
||||||
class MultiPassEmissionAbsorptionRenderer(BaseRenderer, torch.nn.Module):
|
class MultiPassEmissionAbsorptionRenderer( # pyre-ignore: 13
|
||||||
|
BaseRenderer, torch.nn.Module
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Implements the multi-pass rendering function, in particular,
|
Implements the multi-pass rendering function, in particular,
|
||||||
with emission-absorption ray marching used in NeRF [1]. First, it evaluates
|
with emission-absorption ray marching used in NeRF [1]. First, it evaluates
|
||||||
@@ -84,7 +86,6 @@ class MultiPassEmissionAbsorptionRenderer(BaseRenderer, torch.nn.Module):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
raymarcher_class_type: str = "EmissionAbsorptionRaymarcher"
|
raymarcher_class_type: str = "EmissionAbsorptionRaymarcher"
|
||||||
# pyre-fixme[13]: Attribute `raymarcher` is never initialized.
|
|
||||||
raymarcher: RaymarcherBase
|
raymarcher: RaymarcherBase
|
||||||
|
|
||||||
n_pts_per_ray_fine_training: int = 64
|
n_pts_per_ray_fine_training: int = 64
|
||||||
|
|||||||
@@ -16,6 +16,8 @@ from pytorch3d.renderer.implicit.sample_pdf import sample_pdf
|
|||||||
|
|
||||||
|
|
||||||
@expand_args_fields
|
@expand_args_fields
|
||||||
|
# pyre-fixme[13]: Attribute `n_pts_per_ray` is never initialized.
|
||||||
|
# pyre-fixme[13]: Attribute `random_sampling` is never initialized.
|
||||||
class RayPointRefiner(Configurable, torch.nn.Module):
|
class RayPointRefiner(Configurable, torch.nn.Module):
|
||||||
"""
|
"""
|
||||||
Implements the importance sampling of points along rays.
|
Implements the importance sampling of points along rays.
|
||||||
@@ -43,9 +45,7 @@ class RayPointRefiner(Configurable, torch.nn.Module):
|
|||||||
for Anti-Aliasing Neural Radiance Fields." ICCV 2021.
|
for Anti-Aliasing Neural Radiance Fields." ICCV 2021.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# pyre-fixme[13]: Attribute `n_pts_per_ray` is never initialized.
|
|
||||||
n_pts_per_ray: int
|
n_pts_per_ray: int
|
||||||
# pyre-fixme[13]: Attribute `random_sampling` is never initialized.
|
|
||||||
random_sampling: bool
|
random_sampling: bool
|
||||||
add_input_samples: bool = True
|
add_input_samples: bool = True
|
||||||
blurpool_weights: bool = False
|
blurpool_weights: bool = False
|
||||||
|
|||||||
@@ -24,10 +24,9 @@ from .rgb_net import RayNormalColoringNetwork
|
|||||||
|
|
||||||
|
|
||||||
@registry.register
|
@registry.register
|
||||||
class SignedDistanceFunctionRenderer(BaseRenderer, torch.nn.Module):
|
class SignedDistanceFunctionRenderer(BaseRenderer, torch.nn.Module): # pyre-ignore[13]
|
||||||
render_features_dimensions: int = 3
|
render_features_dimensions: int = 3
|
||||||
object_bounding_sphere: float = 1.0
|
object_bounding_sphere: float = 1.0
|
||||||
# pyre-fixme[13]: Attribute `ray_tracer` is never initialized.
|
|
||||||
ray_tracer: RayTracing
|
ray_tracer: RayTracing
|
||||||
ray_normal_coloring_network_args: DictConfig = get_default_args_field(
|
ray_normal_coloring_network_args: DictConfig = get_default_args_field(
|
||||||
RayNormalColoringNetwork
|
RayNormalColoringNetwork
|
||||||
|
|||||||
@@ -121,6 +121,7 @@ def weighted_sum_losses(
|
|||||||
return None
|
return None
|
||||||
loss = sum(losses_weighted)
|
loss = sum(losses_weighted)
|
||||||
assert torch.is_tensor(loss)
|
assert torch.is_tensor(loss)
|
||||||
|
# pyre-fixme[7]: Expected `Optional[Tensor]` but got `int`.
|
||||||
return loss
|
return loss
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ from .feature_aggregator import FeatureAggregatorBase
|
|||||||
from .view_sampler import ViewSampler
|
from .view_sampler import ViewSampler
|
||||||
|
|
||||||
|
|
||||||
|
# pyre-ignore: 13
|
||||||
class ViewPooler(Configurable, torch.nn.Module):
|
class ViewPooler(Configurable, torch.nn.Module):
|
||||||
"""
|
"""
|
||||||
Implements sampling of image-based features at the 2d projections of a set
|
Implements sampling of image-based features at the 2d projections of a set
|
||||||
@@ -34,10 +35,8 @@ class ViewPooler(Configurable, torch.nn.Module):
|
|||||||
from a set of source images. FeatureAggregator executes step (4) above.
|
from a set of source images. FeatureAggregator executes step (4) above.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# pyre-fixme[13]: Attribute `view_sampler` is never initialized.
|
|
||||||
view_sampler: ViewSampler
|
view_sampler: ViewSampler
|
||||||
feature_aggregator_class_type: str = "AngleWeightedReductionFeatureAggregator"
|
feature_aggregator_class_type: str = "AngleWeightedReductionFeatureAggregator"
|
||||||
# pyre-fixme[13]: Attribute `feature_aggregator` is never initialized.
|
|
||||||
feature_aggregator: FeatureAggregatorBase
|
feature_aggregator: FeatureAggregatorBase
|
||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
|
|||||||
@@ -156,6 +156,7 @@ def render_point_cloud_pytorch3d(
|
|||||||
cumprod = torch.cat((torch.ones_like(cumprod[..., :1]), cumprod[..., :-1]), dim=-1)
|
cumprod = torch.cat((torch.ones_like(cumprod[..., :1]), cumprod[..., :-1]), dim=-1)
|
||||||
depths = (weights * cumprod * fragments.zbuf).sum(dim=-1)
|
depths = (weights * cumprod * fragments.zbuf).sum(dim=-1)
|
||||||
# add the rendering mask
|
# add the rendering mask
|
||||||
|
# pyre-fixme[6]: For 1st param expected `Tensor` but got `float`.
|
||||||
render_mask = -torch.prod(1.0 - weights, dim=-1) + 1.0
|
render_mask = -torch.prod(1.0 - weights, dim=-1) + 1.0
|
||||||
|
|
||||||
# cat depths and render mask
|
# cat depths and render mask
|
||||||
|
|||||||
@@ -409,7 +409,6 @@ def _parse_mtl(
|
|||||||
texture_files = {}
|
texture_files = {}
|
||||||
material_name = ""
|
material_name = ""
|
||||||
|
|
||||||
# pyre-fixme[9]: f has type `str`; used as `IO[typing.Any]`.
|
|
||||||
with _open_file(f, path_manager, "r") as f:
|
with _open_file(f, path_manager, "r") as f:
|
||||||
for line in f:
|
for line in f:
|
||||||
tokens = line.strip().split()
|
tokens = line.strip().split()
|
||||||
|
|||||||
@@ -649,7 +649,8 @@ def _load_obj(
|
|||||||
# Create an array of strings of material names for each face.
|
# Create an array of strings of material names for each face.
|
||||||
# If faces_materials_idx == -1 then that face doesn't have a material.
|
# If faces_materials_idx == -1 then that face doesn't have a material.
|
||||||
idx = faces_materials_idx.cpu().numpy()
|
idx = faces_materials_idx.cpu().numpy()
|
||||||
face_material_names = np.array([""] + material_names)[idx + 1] # (F,)
|
face_material_names = np.array(material_names)[idx] # (F,)
|
||||||
|
face_material_names[idx == -1] = ""
|
||||||
|
|
||||||
# Construct the atlas.
|
# Construct the atlas.
|
||||||
texture_atlas = make_mesh_texture_atlas(
|
texture_atlas = make_mesh_texture_atlas(
|
||||||
@@ -755,13 +756,10 @@ def save_obj(
|
|||||||
output_path = Path(f)
|
output_path = Path(f)
|
||||||
|
|
||||||
# Save the .obj file
|
# Save the .obj file
|
||||||
# pyre-fixme[9]: f has type `Union[Path, str]`; used as `IO[typing.Any]`.
|
|
||||||
with _open_file(f, path_manager, "w") as f:
|
with _open_file(f, path_manager, "w") as f:
|
||||||
if save_texture:
|
if save_texture:
|
||||||
# Add the header required for the texture info to be loaded correctly
|
# Add the header required for the texture info to be loaded correctly
|
||||||
obj_header = "\nmtllib {0}.mtl\nusemtl mesh\n\n".format(output_path.stem)
|
obj_header = "\nmtllib {0}.mtl\nusemtl mesh\n\n".format(output_path.stem)
|
||||||
# pyre-fixme[16]: Item `Path` of `Union[Path, str]` has no attribute
|
|
||||||
# `write`.
|
|
||||||
f.write(obj_header)
|
f.write(obj_header)
|
||||||
_save(
|
_save(
|
||||||
f,
|
f,
|
||||||
|
|||||||
@@ -27,10 +27,8 @@ def _validate_chamfer_reduction_inputs(
|
|||||||
"""
|
"""
|
||||||
if batch_reduction is not None and batch_reduction not in ["mean", "sum"]:
|
if batch_reduction is not None and batch_reduction not in ["mean", "sum"]:
|
||||||
raise ValueError('batch_reduction must be one of ["mean", "sum"] or None')
|
raise ValueError('batch_reduction must be one of ["mean", "sum"] or None')
|
||||||
if point_reduction is not None and point_reduction not in ["mean", "sum", "max"]:
|
if point_reduction is not None and point_reduction not in ["mean", "sum"]:
|
||||||
raise ValueError(
|
raise ValueError('point_reduction must be one of ["mean", "sum"] or None')
|
||||||
'point_reduction must be one of ["mean", "sum", "max"] or None'
|
|
||||||
)
|
|
||||||
if point_reduction is None and batch_reduction is not None:
|
if point_reduction is None and batch_reduction is not None:
|
||||||
raise ValueError("Batch reduction must be None if point_reduction is None")
|
raise ValueError("Batch reduction must be None if point_reduction is None")
|
||||||
|
|
||||||
@@ -82,6 +80,7 @@ def _chamfer_distance_single_direction(
|
|||||||
x_normals,
|
x_normals,
|
||||||
y_normals,
|
y_normals,
|
||||||
weights,
|
weights,
|
||||||
|
batch_reduction: Union[str, None],
|
||||||
point_reduction: Union[str, None],
|
point_reduction: Union[str, None],
|
||||||
norm: int,
|
norm: int,
|
||||||
abs_cosine: bool,
|
abs_cosine: bool,
|
||||||
@@ -104,6 +103,11 @@ def _chamfer_distance_single_direction(
|
|||||||
raise ValueError("weights cannot be negative.")
|
raise ValueError("weights cannot be negative.")
|
||||||
if weights.sum() == 0.0:
|
if weights.sum() == 0.0:
|
||||||
weights = weights.view(N, 1)
|
weights = weights.view(N, 1)
|
||||||
|
if batch_reduction in ["mean", "sum"]:
|
||||||
|
return (
|
||||||
|
(x.sum((1, 2)) * weights).sum() * 0.0,
|
||||||
|
(x.sum((1, 2)) * weights).sum() * 0.0,
|
||||||
|
)
|
||||||
return ((x.sum((1, 2)) * weights) * 0.0, (x.sum((1, 2)) * weights) * 0.0)
|
return ((x.sum((1, 2)) * weights) * 0.0, (x.sum((1, 2)) * weights) * 0.0)
|
||||||
|
|
||||||
cham_norm_x = x.new_zeros(())
|
cham_norm_x = x.new_zeros(())
|
||||||
@@ -131,10 +135,7 @@ def _chamfer_distance_single_direction(
|
|||||||
if weights is not None:
|
if weights is not None:
|
||||||
cham_norm_x *= weights.view(N, 1)
|
cham_norm_x *= weights.view(N, 1)
|
||||||
|
|
||||||
if point_reduction == "max":
|
if point_reduction is not None:
|
||||||
assert not return_normals
|
|
||||||
cham_x = cham_x.max(1).values # (N,)
|
|
||||||
elif point_reduction is not None:
|
|
||||||
# Apply point reduction
|
# Apply point reduction
|
||||||
cham_x = cham_x.sum(1) # (N,)
|
cham_x = cham_x.sum(1) # (N,)
|
||||||
if return_normals:
|
if return_normals:
|
||||||
@@ -145,34 +146,22 @@ def _chamfer_distance_single_direction(
|
|||||||
if return_normals:
|
if return_normals:
|
||||||
cham_norm_x /= x_lengths_clamped
|
cham_norm_x /= x_lengths_clamped
|
||||||
|
|
||||||
|
if batch_reduction is not None:
|
||||||
|
# batch_reduction == "sum"
|
||||||
|
cham_x = cham_x.sum()
|
||||||
|
if return_normals:
|
||||||
|
cham_norm_x = cham_norm_x.sum()
|
||||||
|
if batch_reduction == "mean":
|
||||||
|
div = weights.sum() if weights is not None else max(N, 1)
|
||||||
|
cham_x /= div
|
||||||
|
if return_normals:
|
||||||
|
cham_norm_x /= div
|
||||||
|
|
||||||
cham_dist = cham_x
|
cham_dist = cham_x
|
||||||
cham_normals = cham_norm_x if return_normals else None
|
cham_normals = cham_norm_x if return_normals else None
|
||||||
return cham_dist, cham_normals
|
return cham_dist, cham_normals
|
||||||
|
|
||||||
|
|
||||||
def _apply_batch_reduction(
|
|
||||||
cham_x, cham_norm_x, weights, batch_reduction: Union[str, None]
|
|
||||||
):
|
|
||||||
if batch_reduction is None:
|
|
||||||
return (cham_x, cham_norm_x)
|
|
||||||
# batch_reduction == "sum"
|
|
||||||
N = cham_x.shape[0]
|
|
||||||
cham_x = cham_x.sum()
|
|
||||||
if cham_norm_x is not None:
|
|
||||||
cham_norm_x = cham_norm_x.sum()
|
|
||||||
if batch_reduction == "mean":
|
|
||||||
if weights is None:
|
|
||||||
div = max(N, 1)
|
|
||||||
elif weights.sum() == 0.0:
|
|
||||||
div = 1
|
|
||||||
else:
|
|
||||||
div = weights.sum()
|
|
||||||
cham_x /= div
|
|
||||||
if cham_norm_x is not None:
|
|
||||||
cham_norm_x /= div
|
|
||||||
return (cham_x, cham_norm_x)
|
|
||||||
|
|
||||||
|
|
||||||
def chamfer_distance(
|
def chamfer_distance(
|
||||||
x,
|
x,
|
||||||
y,
|
y,
|
||||||
@@ -208,8 +197,7 @@ def chamfer_distance(
|
|||||||
batch_reduction: Reduction operation to apply for the loss across the
|
batch_reduction: Reduction operation to apply for the loss across the
|
||||||
batch, can be one of ["mean", "sum"] or None.
|
batch, can be one of ["mean", "sum"] or None.
|
||||||
point_reduction: Reduction operation to apply for the loss across the
|
point_reduction: Reduction operation to apply for the loss across the
|
||||||
points, can be one of ["mean", "sum", "max"] or None. Using "max" leads to the
|
points, can be one of ["mean", "sum"] or None.
|
||||||
Hausdorff distance.
|
|
||||||
norm: int indicates the norm used for the distance. Supports 1 for L1 and 2 for L2.
|
norm: int indicates the norm used for the distance. Supports 1 for L1 and 2 for L2.
|
||||||
single_directional: If False (default), loss comes from both the distance between
|
single_directional: If False (default), loss comes from both the distance between
|
||||||
each point in x and its nearest neighbor in y and each point in y and its nearest
|
each point in x and its nearest neighbor in y and each point in y and its nearest
|
||||||
@@ -239,10 +227,6 @@ def chamfer_distance(
|
|||||||
|
|
||||||
if not ((norm == 1) or (norm == 2)):
|
if not ((norm == 1) or (norm == 2)):
|
||||||
raise ValueError("Support for 1 or 2 norm.")
|
raise ValueError("Support for 1 or 2 norm.")
|
||||||
|
|
||||||
if point_reduction == "max" and (x_normals is not None or y_normals is not None):
|
|
||||||
raise ValueError('Normals must be None if point_reduction is "max"')
|
|
||||||
|
|
||||||
x, x_lengths, x_normals = _handle_pointcloud_input(x, x_lengths, x_normals)
|
x, x_lengths, x_normals = _handle_pointcloud_input(x, x_lengths, x_normals)
|
||||||
y, y_lengths, y_normals = _handle_pointcloud_input(y, y_lengths, y_normals)
|
y, y_lengths, y_normals = _handle_pointcloud_input(y, y_lengths, y_normals)
|
||||||
|
|
||||||
@@ -254,13 +238,13 @@ def chamfer_distance(
|
|||||||
x_normals,
|
x_normals,
|
||||||
y_normals,
|
y_normals,
|
||||||
weights,
|
weights,
|
||||||
|
batch_reduction,
|
||||||
point_reduction,
|
point_reduction,
|
||||||
norm,
|
norm,
|
||||||
abs_cosine,
|
abs_cosine,
|
||||||
)
|
)
|
||||||
if single_directional:
|
if single_directional:
|
||||||
loss = cham_x
|
return cham_x, cham_norm_x
|
||||||
loss_normals = cham_norm_x
|
|
||||||
else:
|
else:
|
||||||
cham_y, cham_norm_y = _chamfer_distance_single_direction(
|
cham_y, cham_norm_y = _chamfer_distance_single_direction(
|
||||||
y,
|
y,
|
||||||
@@ -270,23 +254,17 @@ def chamfer_distance(
|
|||||||
y_normals,
|
y_normals,
|
||||||
x_normals,
|
x_normals,
|
||||||
weights,
|
weights,
|
||||||
|
batch_reduction,
|
||||||
point_reduction,
|
point_reduction,
|
||||||
norm,
|
norm,
|
||||||
abs_cosine,
|
abs_cosine,
|
||||||
)
|
)
|
||||||
if point_reduction == "max":
|
if point_reduction is not None:
|
||||||
loss = torch.maximum(cham_x, cham_y)
|
return (
|
||||||
loss_normals = None
|
cham_x + cham_y,
|
||||||
elif point_reduction is not None:
|
(cham_norm_x + cham_norm_y) if cham_norm_x is not None else None,
|
||||||
loss = cham_x + cham_y
|
)
|
||||||
if cham_norm_x is not None:
|
return (
|
||||||
loss_normals = cham_norm_x + cham_norm_y
|
(cham_x, cham_y),
|
||||||
else:
|
(cham_norm_x, cham_norm_y) if cham_norm_x is not None else None,
|
||||||
loss_normals = None
|
)
|
||||||
else:
|
|
||||||
loss = (cham_x, cham_y)
|
|
||||||
if cham_norm_x is not None:
|
|
||||||
loss_normals = (cham_norm_x, cham_norm_y)
|
|
||||||
else:
|
|
||||||
loss_normals = None
|
|
||||||
return _apply_batch_reduction(loss, loss_normals, weights, batch_reduction)
|
|
||||||
|
|||||||
@@ -617,7 +617,6 @@ def _splat_points_to_volumes(
|
|||||||
w = wX * wY * wZ
|
w = wX * wY * wZ
|
||||||
|
|
||||||
# valid - binary indicators of votes that fall into the volume
|
# valid - binary indicators of votes that fall into the volume
|
||||||
# pyre-fixme[16]: `int` has no attribute `long`.
|
|
||||||
valid = (
|
valid = (
|
||||||
(0 <= X_)
|
(0 <= X_)
|
||||||
* (X_ < grid_sizes_xyz[:, None, 0:1])
|
* (X_ < grid_sizes_xyz[:, None, 0:1])
|
||||||
@@ -636,19 +635,14 @@ def _splat_points_to_volumes(
|
|||||||
idx_valid = idx * valid + rand_idx * (1 - valid)
|
idx_valid = idx * valid + rand_idx * (1 - valid)
|
||||||
w_valid = w * valid.type_as(w)
|
w_valid = w * valid.type_as(w)
|
||||||
if mask is not None:
|
if mask is not None:
|
||||||
# pyre-fixme[6]: For 1st argument expected `Tensor` but got `int`.
|
|
||||||
w_valid = w_valid * mask.type_as(w)[:, :, None]
|
w_valid = w_valid * mask.type_as(w)[:, :, None]
|
||||||
|
|
||||||
# scatter add casts the votes into the weight accumulator
|
# scatter add casts the votes into the weight accumulator
|
||||||
# and the feature accumulator
|
# and the feature accumulator
|
||||||
# pyre-fixme[6]: For 3rd argument expected `Tensor` but got
|
|
||||||
# `Union[int, Tensor]`.
|
|
||||||
volume_densities.scatter_add_(1, idx_valid, w_valid)
|
volume_densities.scatter_add_(1, idx_valid, w_valid)
|
||||||
|
|
||||||
# reshape idx_valid -> (minibatch, feature_dim, n_points)
|
# reshape idx_valid -> (minibatch, feature_dim, n_points)
|
||||||
idx_valid = idx_valid.view(ba, 1, n_points).expand_as(points_features)
|
idx_valid = idx_valid.view(ba, 1, n_points).expand_as(points_features)
|
||||||
# pyre-fixme[16]: Item `int` of `Union[int, Tensor]` has no
|
|
||||||
# attribute `view`.
|
|
||||||
w_valid = w_valid.view(ba, 1, n_points)
|
w_valid = w_valid.view(ba, 1, n_points)
|
||||||
|
|
||||||
# volume_features of shape (minibatch, feature_dim, n_voxels)
|
# volume_features of shape (minibatch, feature_dim, n_voxels)
|
||||||
@@ -730,7 +724,6 @@ def _round_points_to_volumes(
|
|||||||
# valid - binary indicators of votes that fall into the volume
|
# valid - binary indicators of votes that fall into the volume
|
||||||
# pyre-fixme[9]: grid_sizes has type `LongTensor`; used as `Tensor`.
|
# pyre-fixme[9]: grid_sizes has type `LongTensor`; used as `Tensor`.
|
||||||
grid_sizes = grid_sizes.type_as(XYZ)
|
grid_sizes = grid_sizes.type_as(XYZ)
|
||||||
# pyre-fixme[16]: `int` has no attribute `long`.
|
|
||||||
valid = (
|
valid = (
|
||||||
(0 <= X)
|
(0 <= X)
|
||||||
* (X < grid_sizes_xyz[:, None, 0:1])
|
* (X < grid_sizes_xyz[:, None, 0:1])
|
||||||
|
|||||||
@@ -143,6 +143,8 @@ def convert_pointclouds_to_tensor(pcl: Union[torch.Tensor, "Pointclouds"]):
|
|||||||
elif torch.is_tensor(pcl):
|
elif torch.is_tensor(pcl):
|
||||||
X = pcl
|
X = pcl
|
||||||
num_points = X.shape[1] * torch.ones( # type: ignore
|
num_points = X.shape[1] * torch.ones( # type: ignore
|
||||||
|
# pyre-fixme[16]: Item `Pointclouds` of `Union[Pointclouds, Tensor]` has
|
||||||
|
# no attribute `shape`.
|
||||||
X.shape[0],
|
X.shape[0],
|
||||||
device=X.device,
|
device=X.device,
|
||||||
dtype=torch.int64,
|
dtype=torch.int64,
|
||||||
|
|||||||
@@ -6,8 +6,6 @@
|
|||||||
|
|
||||||
# pyre-unsafe
|
# pyre-unsafe
|
||||||
|
|
||||||
import torch
|
|
||||||
|
|
||||||
from .blending import (
|
from .blending import (
|
||||||
BlendParams,
|
BlendParams,
|
||||||
hard_rgb_blend,
|
hard_rgb_blend,
|
||||||
@@ -76,13 +74,9 @@ from .points import (
|
|||||||
PointsRasterizationSettings,
|
PointsRasterizationSettings,
|
||||||
PointsRasterizer,
|
PointsRasterizer,
|
||||||
PointsRenderer,
|
PointsRenderer,
|
||||||
|
PulsarPointsRenderer,
|
||||||
rasterize_points,
|
rasterize_points,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Pulsar is not enabled on amd.
|
|
||||||
if not torch.version.hip:
|
|
||||||
from .points import PulsarPointsRenderer
|
|
||||||
|
|
||||||
from .splatter_blend import SplatterBlender
|
from .splatter_blend import SplatterBlender
|
||||||
from .utils import (
|
from .utils import (
|
||||||
convert_to_tensors_and_broadcast,
|
convert_to_tensors_and_broadcast,
|
||||||
|
|||||||
@@ -212,15 +212,15 @@ def softmax_rgb_blend(
|
|||||||
|
|
||||||
# Reshape to be compatible with (N, H, W, K) values in fragments
|
# Reshape to be compatible with (N, H, W, K) values in fragments
|
||||||
if torch.is_tensor(zfar):
|
if torch.is_tensor(zfar):
|
||||||
|
# pyre-fixme[16]
|
||||||
zfar = zfar[:, None, None, None]
|
zfar = zfar[:, None, None, None]
|
||||||
if torch.is_tensor(znear):
|
if torch.is_tensor(znear):
|
||||||
|
# pyre-fixme[16]: Item `float` of `Union[float, Tensor]` has no attribute
|
||||||
|
# `__getitem__`.
|
||||||
znear = znear[:, None, None, None]
|
znear = znear[:, None, None, None]
|
||||||
|
|
||||||
# pyre-fixme[6]: Expected `float` but got `Union[float, Tensor]`
|
|
||||||
z_inv = (zfar - fragments.zbuf) / (zfar - znear) * mask
|
z_inv = (zfar - fragments.zbuf) / (zfar - znear) * mask
|
||||||
# pyre-fixme[6]: Expected `Tensor` but got `float`
|
|
||||||
z_inv_max = torch.max(z_inv, dim=-1).values[..., None].clamp(min=eps)
|
z_inv_max = torch.max(z_inv, dim=-1).values[..., None].clamp(min=eps)
|
||||||
# pyre-fixme[6]: Expected `Tensor` but got `float`
|
|
||||||
weights_num = prob_map * torch.exp((z_inv - z_inv_max) / blend_params.gamma)
|
weights_num = prob_map * torch.exp((z_inv - z_inv_max) / blend_params.gamma)
|
||||||
|
|
||||||
# Also apply exp normalize trick for the background color weight.
|
# Also apply exp normalize trick for the background color weight.
|
||||||
|
|||||||
@@ -1782,6 +1782,8 @@ def get_ndc_to_screen_transform(
|
|||||||
K = torch.zeros((cameras._N, 4, 4), device=cameras.device, dtype=torch.float32)
|
K = torch.zeros((cameras._N, 4, 4), device=cameras.device, dtype=torch.float32)
|
||||||
if not torch.is_tensor(image_size):
|
if not torch.is_tensor(image_size):
|
||||||
image_size = torch.tensor(image_size, device=cameras.device)
|
image_size = torch.tensor(image_size, device=cameras.device)
|
||||||
|
# pyre-fixme[16]: Item `List` of `Union[List[typing.Any], Tensor, Tuple[Any,
|
||||||
|
# ...]]` has no attribute `view`.
|
||||||
image_size = image_size.view(-1, 2) # of shape (1 or B)x2
|
image_size = image_size.view(-1, 2) # of shape (1 or B)x2
|
||||||
height, width = image_size.unbind(1)
|
height, width = image_size.unbind(1)
|
||||||
|
|
||||||
|
|||||||
@@ -497,7 +497,6 @@ def clip_faces(
|
|||||||
faces_case3 = face_verts_unclipped[case3_unclipped_idx]
|
faces_case3 = face_verts_unclipped[case3_unclipped_idx]
|
||||||
|
|
||||||
# index (0, 1, or 2) of the vertex in front of the clipping plane
|
# index (0, 1, or 2) of the vertex in front of the clipping plane
|
||||||
# pyre-fixme[61]: `faces_clipped_verts` is undefined, or not always defined.
|
|
||||||
p1_face_ind = torch.where(~faces_clipped_verts[case3_unclipped_idx])[1]
|
p1_face_ind = torch.where(~faces_clipped_verts[case3_unclipped_idx])[1]
|
||||||
|
|
||||||
# Solve for the points p4, p5 that intersect the clipping plane
|
# Solve for the points p4, p5 that intersect the clipping plane
|
||||||
@@ -541,7 +540,6 @@ def clip_faces(
|
|||||||
faces_case4 = face_verts_unclipped[case4_unclipped_idx]
|
faces_case4 = face_verts_unclipped[case4_unclipped_idx]
|
||||||
|
|
||||||
# index (0, 1, or 2) of the vertex behind the clipping plane
|
# index (0, 1, or 2) of the vertex behind the clipping plane
|
||||||
# pyre-fixme[61]: `faces_clipped_verts` is undefined, or not always defined.
|
|
||||||
p1_face_ind = torch.where(faces_clipped_verts[case4_unclipped_idx])[1]
|
p1_face_ind = torch.where(faces_clipped_verts[case4_unclipped_idx])[1]
|
||||||
|
|
||||||
# Solve for the points p4, p5 that intersect the clipping plane
|
# Solve for the points p4, p5 that intersect the clipping plane
|
||||||
|
|||||||
@@ -6,13 +6,8 @@
|
|||||||
|
|
||||||
# pyre-unsafe
|
# pyre-unsafe
|
||||||
|
|
||||||
import torch
|
|
||||||
|
|
||||||
from .compositor import AlphaCompositor, NormWeightedCompositor
|
from .compositor import AlphaCompositor, NormWeightedCompositor
|
||||||
|
from .pulsar.unified import PulsarPointsRenderer
|
||||||
# Pulsar not enabled on amd.
|
|
||||||
if not torch.version.hip:
|
|
||||||
from .pulsar.unified import PulsarPointsRenderer
|
|
||||||
|
|
||||||
from .rasterize_points import rasterize_points
|
from .rasterize_points import rasterize_points
|
||||||
from .rasterizer import PointsRasterizationSettings, PointsRasterizer
|
from .rasterizer import PointsRasterizationSettings, PointsRasterizer
|
||||||
|
|||||||
@@ -270,8 +270,6 @@ class TensorProperties(nn.Module):
|
|||||||
# to have the same shape as the input tensor.
|
# to have the same shape as the input tensor.
|
||||||
new_dims = len(tensor_dims) - len(idx_dims)
|
new_dims = len(tensor_dims) - len(idx_dims)
|
||||||
new_shape = idx_dims + (1,) * new_dims
|
new_shape = idx_dims + (1,) * new_dims
|
||||||
# pyre-fixme[58]: `+` is not supported for operand types
|
|
||||||
# `Tuple[int]` and `torch._C.Size`
|
|
||||||
expand_dims = (-1,) + tensor_dims[1:]
|
expand_dims = (-1,) + tensor_dims[1:]
|
||||||
_batch_idx = _batch_idx.view(*new_shape)
|
_batch_idx = _batch_idx.view(*new_shape)
|
||||||
_batch_idx = _batch_idx.expand(*expand_dims)
|
_batch_idx = _batch_idx.expand(*expand_dims)
|
||||||
|
|||||||
@@ -97,10 +97,7 @@ def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
|
|||||||
"""
|
"""
|
||||||
ret = torch.zeros_like(x)
|
ret = torch.zeros_like(x)
|
||||||
positive_mask = x > 0
|
positive_mask = x > 0
|
||||||
if torch.is_grad_enabled():
|
|
||||||
ret[positive_mask] = torch.sqrt(x[positive_mask])
|
ret[positive_mask] = torch.sqrt(x[positive_mask])
|
||||||
else:
|
|
||||||
ret = torch.where(positive_mask, torch.sqrt(x), ret)
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -369,7 +369,6 @@ def plot_scene(
|
|||||||
# update camera viewpoint if provided
|
# update camera viewpoint if provided
|
||||||
if viewpoints_eye_at_up_world is not None:
|
if viewpoints_eye_at_up_world is not None:
|
||||||
# Use camera params for batch index or the first camera if only one provided.
|
# Use camera params for batch index or the first camera if only one provided.
|
||||||
# pyre-fixme[61]: `n_viewpoint_cameras` is undefined, or not always defined.
|
|
||||||
viewpoint_idx = min(n_viewpoint_cameras - 1, subplot_idx)
|
viewpoint_idx = min(n_viewpoint_cameras - 1, subplot_idx)
|
||||||
|
|
||||||
eye, at, up = (i[viewpoint_idx] for i in viewpoints_eye_at_up_world)
|
eye, at, up = (i[viewpoint_idx] for i in viewpoints_eye_at_up_world)
|
||||||
@@ -628,7 +627,7 @@ def _add_struct_from_batch(
|
|||||||
|
|
||||||
|
|
||||||
def _add_mesh_trace(
|
def _add_mesh_trace(
|
||||||
fig: go.Figure,
|
fig: go.Figure, # pyre-ignore[11]
|
||||||
meshes: Meshes,
|
meshes: Meshes,
|
||||||
trace_name: str,
|
trace_name: str,
|
||||||
subplot_idx: int,
|
subplot_idx: int,
|
||||||
@@ -674,7 +673,6 @@ def _add_mesh_trace(
|
|||||||
verts[~verts_used] = verts_center
|
verts[~verts_used] = verts_center
|
||||||
|
|
||||||
row, col = subplot_idx // ncols + 1, subplot_idx % ncols + 1
|
row, col = subplot_idx // ncols + 1, subplot_idx % ncols + 1
|
||||||
# pyre-fixme[16]: `Figure` has no attribute `add_trace`.
|
|
||||||
fig.add_trace(
|
fig.add_trace(
|
||||||
go.Mesh3d(
|
go.Mesh3d(
|
||||||
x=verts[:, 0],
|
x=verts[:, 0],
|
||||||
@@ -741,7 +739,6 @@ def _add_pointcloud_trace(
|
|||||||
|
|
||||||
row = subplot_idx // ncols + 1
|
row = subplot_idx // ncols + 1
|
||||||
col = subplot_idx % ncols + 1
|
col = subplot_idx % ncols + 1
|
||||||
# pyre-fixme[16]: `Figure` has no attribute `add_trace`.
|
|
||||||
fig.add_trace(
|
fig.add_trace(
|
||||||
go.Scatter3d(
|
go.Scatter3d(
|
||||||
x=verts[:, 0],
|
x=verts[:, 0],
|
||||||
@@ -803,7 +800,6 @@ def _add_camera_trace(
|
|||||||
x, y, z = all_cam_wires.detach().cpu().numpy().T.astype(float)
|
x, y, z = all_cam_wires.detach().cpu().numpy().T.astype(float)
|
||||||
|
|
||||||
row, col = subplot_idx // ncols + 1, subplot_idx % ncols + 1
|
row, col = subplot_idx // ncols + 1, subplot_idx % ncols + 1
|
||||||
# pyre-fixme[16]: `Figure` has no attribute `add_trace`.
|
|
||||||
fig.add_trace(
|
fig.add_trace(
|
||||||
go.Scatter3d(x=x, y=y, z=z, marker={"size": 1}, name=trace_name),
|
go.Scatter3d(x=x, y=y, z=z, marker={"size": 1}, name=trace_name),
|
||||||
row=row,
|
row=row,
|
||||||
@@ -898,7 +894,6 @@ def _add_ray_bundle_trace(
|
|||||||
ray_lines = torch.cat((ray_lines, nan_tensor, ray_line))
|
ray_lines = torch.cat((ray_lines, nan_tensor, ray_line))
|
||||||
x, y, z = ray_lines.detach().cpu().numpy().T.astype(float)
|
x, y, z = ray_lines.detach().cpu().numpy().T.astype(float)
|
||||||
row, col = subplot_idx // ncols + 1, subplot_idx % ncols + 1
|
row, col = subplot_idx // ncols + 1, subplot_idx % ncols + 1
|
||||||
# pyre-fixme[16]: `Figure` has no attribute `add_trace`.
|
|
||||||
fig.add_trace(
|
fig.add_trace(
|
||||||
go.Scatter3d(
|
go.Scatter3d(
|
||||||
x=x,
|
x=x,
|
||||||
@@ -993,7 +988,7 @@ def _gen_fig_with_subplots(
|
|||||||
def _update_axes_bounds(
|
def _update_axes_bounds(
|
||||||
verts_center: torch.Tensor,
|
verts_center: torch.Tensor,
|
||||||
max_expand: float,
|
max_expand: float,
|
||||||
current_layout: go.Scene,
|
current_layout: go.Scene, # pyre-ignore[11]
|
||||||
) -> None: # pragma: no cover
|
) -> None: # pragma: no cover
|
||||||
"""
|
"""
|
||||||
Takes in the vertices' center point and max spread, and the current plotly figure
|
Takes in the vertices' center point and max spread, and the current plotly figure
|
||||||
@@ -1010,7 +1005,6 @@ def _update_axes_bounds(
|
|||||||
|
|
||||||
# Ensure that within a subplot, the bounds capture all traces
|
# Ensure that within a subplot, the bounds capture all traces
|
||||||
old_xrange, old_yrange, old_zrange = (
|
old_xrange, old_yrange, old_zrange = (
|
||||||
# pyre-fixme[16]: `Scene` has no attribute `__getitem__`.
|
|
||||||
current_layout["xaxis"]["range"],
|
current_layout["xaxis"]["range"],
|
||||||
current_layout["yaxis"]["range"],
|
current_layout["yaxis"]["range"],
|
||||||
current_layout["zaxis"]["range"],
|
current_layout["zaxis"]["range"],
|
||||||
@@ -1029,7 +1023,6 @@ def _update_axes_bounds(
|
|||||||
xaxis = {"range": x_range}
|
xaxis = {"range": x_range}
|
||||||
yaxis = {"range": y_range}
|
yaxis = {"range": y_range}
|
||||||
zaxis = {"range": z_range}
|
zaxis = {"range": z_range}
|
||||||
# pyre-fixme[16]: `Scene` has no attribute `update`.
|
|
||||||
current_layout.update({"xaxis": xaxis, "yaxis": yaxis, "zaxis": zaxis})
|
current_layout.update({"xaxis": xaxis, "yaxis": yaxis, "zaxis": zaxis})
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
2
setup.py
2
setup.py
@@ -153,7 +153,7 @@ setup(
|
|||||||
)
|
)
|
||||||
+ [trainer],
|
+ [trainer],
|
||||||
package_dir={trainer: "projects/implicitron_trainer"},
|
package_dir={trainer: "projects/implicitron_trainer"},
|
||||||
install_requires=["iopath"],
|
install_requires=["fvcore", "iopath"],
|
||||||
extras_require={
|
extras_require={
|
||||||
"all": ["matplotlib", "tqdm>4.29.0", "imageio", "ipywidgets"],
|
"all": ["matplotlib", "tqdm>4.29.0", "imageio", "ipywidgets"],
|
||||||
"dev": ["flake8", "usort"],
|
"dev": ["flake8", "usort"],
|
||||||
|
|||||||
@@ -847,85 +847,6 @@ class TestChamfer(TestCaseMixin, unittest.TestCase):
|
|||||||
loss, loss_norm, pred_loss[0], pred_loss_norm[0], p1, p11, p2, p22
|
loss, loss_norm, pred_loss[0], pred_loss_norm[0], p1, p11, p2, p22
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_chamfer_point_reduction_max(self):
|
|
||||||
"""
|
|
||||||
Compare output of vectorized chamfer loss with naive implementation
|
|
||||||
for point_reduction = "max" and batch_reduction = None.
|
|
||||||
"""
|
|
||||||
N, P1, P2 = 7, 10, 18
|
|
||||||
device = get_random_cuda_device()
|
|
||||||
points_normals = TestChamfer.init_pointclouds(N, P1, P2, device)
|
|
||||||
p1 = points_normals.p1
|
|
||||||
p2 = points_normals.p2
|
|
||||||
weights = points_normals.weights
|
|
||||||
p11 = p1.detach().clone()
|
|
||||||
p22 = p2.detach().clone()
|
|
||||||
p11.requires_grad = True
|
|
||||||
p22.requires_grad = True
|
|
||||||
|
|
||||||
pred_loss, unused_pred_loss_norm = TestChamfer.chamfer_distance_naive(
|
|
||||||
p1, p2, x_normals=None, y_normals=None
|
|
||||||
)
|
|
||||||
|
|
||||||
loss, loss_norm = chamfer_distance(
|
|
||||||
p11,
|
|
||||||
p22,
|
|
||||||
x_normals=None,
|
|
||||||
y_normals=None,
|
|
||||||
weights=weights,
|
|
||||||
batch_reduction=None,
|
|
||||||
point_reduction="max",
|
|
||||||
)
|
|
||||||
pred_loss_max = torch.maximum(
|
|
||||||
pred_loss[0].max(1).values, pred_loss[1].max(1).values
|
|
||||||
)
|
|
||||||
pred_loss_max *= weights
|
|
||||||
self.assertClose(loss, pred_loss_max)
|
|
||||||
|
|
||||||
self.assertIsNone(loss_norm)
|
|
||||||
|
|
||||||
# Check gradients
|
|
||||||
self._check_gradients(loss, loss_norm, pred_loss_max, None, p1, p11, p2, p22)
|
|
||||||
|
|
||||||
def test_single_directional_chamfer_point_reduction_max(self):
|
|
||||||
"""
|
|
||||||
Compare output of vectorized single directional chamfer loss with naive implementation
|
|
||||||
for point_reduction = "max" and batch_reduction = None.
|
|
||||||
"""
|
|
||||||
N, P1, P2 = 7, 10, 18
|
|
||||||
device = get_random_cuda_device()
|
|
||||||
points_normals = TestChamfer.init_pointclouds(N, P1, P2, device)
|
|
||||||
p1 = points_normals.p1
|
|
||||||
p2 = points_normals.p2
|
|
||||||
weights = points_normals.weights
|
|
||||||
p11 = p1.detach().clone()
|
|
||||||
p22 = p2.detach().clone()
|
|
||||||
p11.requires_grad = True
|
|
||||||
p22.requires_grad = True
|
|
||||||
|
|
||||||
pred_loss, unused_pred_loss_norm = TestChamfer.chamfer_distance_naive(
|
|
||||||
p1, p2, x_normals=None, y_normals=None
|
|
||||||
)
|
|
||||||
|
|
||||||
loss, loss_norm = chamfer_distance(
|
|
||||||
p11,
|
|
||||||
p22,
|
|
||||||
x_normals=None,
|
|
||||||
y_normals=None,
|
|
||||||
weights=weights,
|
|
||||||
batch_reduction=None,
|
|
||||||
point_reduction="max",
|
|
||||||
single_directional=True,
|
|
||||||
)
|
|
||||||
pred_loss_max = pred_loss[0].max(1).values
|
|
||||||
pred_loss_max *= weights
|
|
||||||
self.assertClose(loss, pred_loss_max)
|
|
||||||
|
|
||||||
self.assertIsNone(loss_norm)
|
|
||||||
|
|
||||||
# Check gradients
|
|
||||||
self._check_gradients(loss, loss_norm, pred_loss_max, None, p1, p11, p2, p22)
|
|
||||||
|
|
||||||
def _check_gradients(
|
def _check_gradients(
|
||||||
self,
|
self,
|
||||||
loss,
|
loss,
|
||||||
@@ -1099,9 +1020,9 @@ class TestChamfer(TestCaseMixin, unittest.TestCase):
|
|||||||
with self.assertRaisesRegex(ValueError, "batch_reduction must be one of"):
|
with self.assertRaisesRegex(ValueError, "batch_reduction must be one of"):
|
||||||
chamfer_distance(p1, p2, weights=weights, batch_reduction="max")
|
chamfer_distance(p1, p2, weights=weights, batch_reduction="max")
|
||||||
|
|
||||||
# Error when point_reduction is not in ["mean", "sum", "max"] or None.
|
# Error when point_reduction is not in ["mean", "sum"] or None.
|
||||||
with self.assertRaisesRegex(ValueError, "point_reduction must be one of"):
|
with self.assertRaisesRegex(ValueError, "point_reduction must be one of"):
|
||||||
chamfer_distance(p1, p2, weights=weights, point_reduction="min")
|
chamfer_distance(p1, p2, weights=weights, point_reduction="max")
|
||||||
|
|
||||||
def test_incorrect_weights(self):
|
def test_incorrect_weights(self):
|
||||||
N, P1, P2 = 16, 64, 128
|
N, P1, P2 = 16, 64, 128
|
||||||
|
|||||||
Reference in New Issue
Block a user