mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2026-02-27 16:56:01 +08:00
Compare commits
1 Commits
v0.7.3
...
bottler-pa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c9a23bb832 |
@@ -178,169 +178,6 @@ workflows:
|
||||
version: 2
|
||||
build_and_test:
|
||||
jobs:
|
||||
# - main:
|
||||
# context: DOCKERHUB_TOKEN
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt190
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.9.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py38_cu111_pyt190
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.9.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt191
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.9.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py38_cu111_pyt191
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.9.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt1100
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py38_cu111_pyt1100
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py38_cu113_pyt1100
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt1101
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py38_cu111_pyt1101
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py38_cu113_pyt1101
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt1102
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py38_cu111_pyt1102
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py38_cu113_pyt1102
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt1110
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py38_cu111_pyt1110
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py38_cu113_pyt1110
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda115
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu115
|
||||
name: linux_conda_py38_cu115_pyt1110
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt1120
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py38_cu113_pyt1120
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py38_cu116_pyt1120
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt1121
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py38_cu113_pyt1121
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py38_cu116_pyt1121
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py38_cu116_pyt1130
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py38_cu117_pyt1130
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
@@ -355,181 +192,6 @@ workflows:
|
||||
name: linux_conda_py38_cu117_pyt1131
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py38_cu117_pyt200
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.0.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt200
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.0.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py39_cu102_pyt190
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.9.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py39_cu111_pyt190
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.9.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py39_cu102_pyt191
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.9.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py39_cu111_pyt191
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.9.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py39_cu102_pyt1100
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py39_cu111_pyt1100
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py39_cu113_pyt1100
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py39_cu102_pyt1101
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py39_cu111_pyt1101
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py39_cu113_pyt1101
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py39_cu102_pyt1102
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py39_cu111_pyt1102
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py39_cu113_pyt1102
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py39_cu102_pyt1110
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py39_cu111_pyt1110
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py39_cu113_pyt1110
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda115
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu115
|
||||
name: linux_conda_py39_cu115_pyt1110
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py39_cu102_pyt1120
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py39_cu113_pyt1120
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py39_cu116_pyt1120
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py39_cu102_pyt1121
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py39_cu113_pyt1121
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py39_cu116_pyt1121
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py39_cu116_pyt1130
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py39_cu117_pyt1130
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
@@ -544,100 +206,6 @@ workflows:
|
||||
name: linux_conda_py39_cu117_pyt1131
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py39_cu117_pyt200
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.0.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt200
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.0.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py310_cu102_pyt1110
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py310_cu111_pyt1110
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py310_cu113_pyt1110
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda115
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu115
|
||||
name: linux_conda_py310_cu115_pyt1110
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py310_cu102_pyt1120
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py310_cu113_pyt1120
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py310_cu116_pyt1120
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py310_cu102_pyt1121
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py310_cu113_pyt1121
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py310_cu116_pyt1121
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py310_cu116_pyt1130
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py310_cu117_pyt1130
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
@@ -652,38 +220,3 @@ workflows:
|
||||
name: linux_conda_py310_cu117_pyt1131
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py310_cu117_pyt200
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.0.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt200
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.0.0
|
||||
- binary_linux_conda_cuda:
|
||||
name: testrun_conda_cuda_py38_cu102_pyt190
|
||||
context: DOCKERHUB_TOKEN
|
||||
python_version: "3.8"
|
||||
pytorch_version: '1.9.0'
|
||||
cu_version: "cu102"
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py3.8_cpu
|
||||
python_version: '3.8'
|
||||
pytorch_version: '1.13.0'
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py3.9_cpu
|
||||
python_version: '3.9'
|
||||
pytorch_version: '1.13.0'
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py3.10_cpu
|
||||
python_version: '3.10'
|
||||
pytorch_version: '1.13.0'
|
||||
|
||||
@@ -29,22 +29,32 @@ CONDA_CUDA_VERSIONS = {
|
||||
"1.12.0": ["cu102", "cu113", "cu116"],
|
||||
"1.12.1": ["cu102", "cu113", "cu116"],
|
||||
"1.13.0": ["cu116", "cu117"],
|
||||
"1.13.1": ["cu116", "cu117"],
|
||||
"2.0.0": ["cu117", "cu118"],
|
||||
}
|
||||
|
||||
|
||||
def conda_docker_image_for_cuda(cuda_version):
|
||||
if cuda_version in ("cu101", "cu102", "cu111"):
|
||||
return None
|
||||
if len(cuda_version) != 5:
|
||||
raise ValueError("Unknown cuda version")
|
||||
return "pytorch/conda-builder:cuda" + cuda_version[2:]
|
||||
if cuda_version == "cu113":
|
||||
return "pytorch/conda-builder:cuda113"
|
||||
if cuda_version == "cu115":
|
||||
return "pytorch/conda-builder:cuda115"
|
||||
if cuda_version == "cu116":
|
||||
return "pytorch/conda-builder:cuda116"
|
||||
if cuda_version == "cu117":
|
||||
return "pytorch/conda-builder:cuda117"
|
||||
raise ValueError("Unknown cuda version")
|
||||
|
||||
|
||||
def pytorch_versions_for_python(python_version):
|
||||
if python_version in ["3.8", "3.9"]:
|
||||
if python_version in ["3.7", "3.8"]:
|
||||
return list(CONDA_CUDA_VERSIONS)
|
||||
if python_version == "3.9":
|
||||
return [
|
||||
i
|
||||
for i in CONDA_CUDA_VERSIONS
|
||||
if version.Version(i) > version.Version("1.7.0")
|
||||
]
|
||||
if python_version == "3.10":
|
||||
return [
|
||||
i
|
||||
|
||||
@@ -9,19 +9,19 @@ The core library is written in PyTorch. Several components have underlying imple
|
||||
|
||||
- Linux or macOS or Windows
|
||||
- Python 3.8, 3.9 or 3.10
|
||||
- PyTorch 1.9.0, 1.9.1, 1.10.0, 1.10.1, 1.10.2, 1.11.0, 1.12.0, 1.12.1, 1.13.0 or 2.0.0.
|
||||
- PyTorch 1.9.0, 1.9.1, 1.10.0, 1.10.1, 1.10.2, 1.11.0, 1.12.0, 1.12.1 or 1.13.0.
|
||||
- torchvision that matches the PyTorch installation. You can install them together as explained at pytorch.org to make sure of this.
|
||||
- gcc & g++ ≥ 4.9
|
||||
- [fvcore](https://github.com/facebookresearch/fvcore)
|
||||
- [ioPath](https://github.com/facebookresearch/iopath)
|
||||
- If CUDA is to be used, use a version which is supported by the corresponding pytorch version and at least version 9.2.
|
||||
- If CUDA older than 11.7 is to be used and you are building from source, the CUB library must be available. We recommend version 1.10.0.
|
||||
- If CUDA is to be used and you are building from source, the CUB library must be available. We recommend version 1.10.0.
|
||||
|
||||
The runtime dependencies can be installed by running:
|
||||
```
|
||||
conda create -n pytorch3d python=3.9
|
||||
conda activate pytorch3d
|
||||
conda install pytorch=1.13.0 torchvision pytorch-cuda=11.6 -c pytorch -c nvidia
|
||||
conda install -c pytorch pytorch=1.9.1 torchvision cudatoolkit=11.6
|
||||
conda install -c fvcore -c iopath -c conda-forge fvcore iopath
|
||||
```
|
||||
|
||||
@@ -102,7 +102,6 @@ version_str="".join([
|
||||
torch.version.cuda.replace(".",""),
|
||||
f"_pyt{pyt_version_str}"
|
||||
])
|
||||
!pip install fvcore iopath
|
||||
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||
```
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ Key features include:
|
||||
- Data structure for storing and manipulating triangle meshes
|
||||
- Efficient operations on triangle meshes (projective transformations, graph convolution, sampling, loss functions)
|
||||
- A differentiable mesh renderer
|
||||
- Implicitron, see [its README](projects/implicitron_trainer), a framework for new-view synthesis via implicit representations. ([blog post](https://ai.facebook.com/blog/implicitron-a-new-modular-extensible-framework-for-neural-implicit-representations-in-pytorch3d/))
|
||||
- Implicitron, see [its README](projects/implicitron_trainer), a framework for new-view synthesis via implicit representations.
|
||||
|
||||
PyTorch3D is designed to integrate smoothly with deep learning methods for predicting and manipulating 3D data.
|
||||
For this reason, all operators in PyTorch3D:
|
||||
@@ -24,8 +24,6 @@ For this reason, all operators in PyTorch3D:
|
||||
|
||||
Within FAIR, PyTorch3D has been used to power research projects such as [Mesh R-CNN](https://arxiv.org/abs/1906.02739).
|
||||
|
||||
See our [blog post](https://ai.facebook.com/blog/-introducing-pytorch3d-an-open-source-library-for-3d-deep-learning/) to see more demos and learn about PyTorch3D.
|
||||
|
||||
## Installation
|
||||
|
||||
For detailed instructions refer to [INSTALL.md](INSTALL.md).
|
||||
@@ -146,8 +144,6 @@ If you are using the pulsar backend for sphere-rendering (the `PulsarPointRender
|
||||
|
||||
Please see below for a timeline of the codebase updates in reverse chronological order. We are sharing updates on the releases as well as research projects which are built with PyTorch3D. The changelogs for the releases are available under [`Releases`](https://github.com/facebookresearch/pytorch3d/releases), and the builds can be installed using `conda` as per the instructions in [INSTALL.md](INSTALL.md).
|
||||
|
||||
**[Dec 19th 2022]:** PyTorch3D [v0.7.2](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.2) released.
|
||||
|
||||
**[Oct 23rd 2022]:** PyTorch3D [v0.7.1](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.1) released.
|
||||
|
||||
**[Aug 10th 2022]:** PyTorch3D [v0.7.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.0) released with Implicitron and MeshRasterizerOpenGL.
|
||||
|
||||
@@ -7,20 +7,20 @@ sidebar_label: File IO
|
||||
There is a flexible interface for loading and saving point clouds and meshes from different formats.
|
||||
|
||||
The main usage is via the `pytorch3d.io.IO` object, and its methods
|
||||
`load_mesh`, `save_mesh`, `load_pointcloud` and `save_pointcloud`.
|
||||
`load_mesh`, `save_mesh`, `load_point_cloud` and `save_point_cloud`.
|
||||
|
||||
For example, to load a mesh you might do
|
||||
```
|
||||
from pytorch3d.io import IO
|
||||
|
||||
device=torch.device("cuda:0")
|
||||
mesh = IO().load_mesh("mymesh.obj", device=device)
|
||||
mesh = IO().load_mesh("mymesh.ply", device=device)
|
||||
```
|
||||
|
||||
and to save a pointcloud you might do
|
||||
```
|
||||
pcl = Pointclouds(...)
|
||||
IO().save_pointcloud(pcl, "output_pointcloud.ply")
|
||||
IO().save_point_cloud(pcl, "output_pointcloud.obj")
|
||||
```
|
||||
|
||||
For meshes, this supports OBJ, PLY and OFF files.
|
||||
@@ -31,4 +31,4 @@ In addition, there is experimental support for loading meshes from
|
||||
[glTF 2 assets](https://github.com/KhronosGroup/glTF/tree/master/specification/2.0)
|
||||
stored either in a GLB container file or a glTF JSON file with embedded binary data.
|
||||
This must be enabled explicitly, as described in
|
||||
`pytorch3d/io/experimental_gltf_io.py`.
|
||||
`pytorch3d/io/experimental_gltf_io.ply`.
|
||||
|
||||
@@ -89,7 +89,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.13.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
@@ -101,6 +101,9 @@
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -76,7 +76,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.13.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
@@ -88,6 +88,9 @@
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.13.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
@@ -63,6 +63,9 @@
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.13.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
@@ -102,6 +102,9 @@
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
@@ -189,7 +192,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load the dolphin mesh.\n",
|
||||
"trg_obj = 'dolphin.obj'"
|
||||
"trg_obj = os.path.join('dolphin.obj')"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -244,7 +247,7 @@
|
||||
"id": "dYWDl4VGWHRK"
|
||||
},
|
||||
"source": [
|
||||
"## 2. Visualize the source and target meshes"
|
||||
"### Visualize the source and target meshes"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -482,7 +485,7 @@
|
||||
"final_verts = final_verts * scale + center\n",
|
||||
"\n",
|
||||
"# Store the predicted mesh using save_obj\n",
|
||||
"final_obj = 'final_model.obj'\n",
|
||||
"final_obj = os.path.join('./', 'final_model.obj')\n",
|
||||
"save_obj(final_obj, final_verts, final_faces)"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.13.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
@@ -68,6 +68,9 @@
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.13.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
@@ -80,6 +80,9 @@
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -47,7 +47,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.13.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
@@ -59,6 +59,9 @@
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -78,7 +78,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.13.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
@@ -90,6 +90,9 @@
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -72,7 +72,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.13.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
@@ -84,6 +84,9 @@
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -50,7 +50,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.13.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
@@ -62,6 +62,9 @@
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -57,7 +57,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.13.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
@@ -69,6 +69,9 @@
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -73,7 +73,7 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.13.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
@@ -85,6 +85,9 @@
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -26,6 +26,5 @@ version_str="".join([
|
||||
torch.version.cuda.replace(".",""),
|
||||
f"_pyt{pyt_version_str}"
|
||||
])
|
||||
!pip install fvcore iopath
|
||||
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||
```
|
||||
|
||||
@@ -212,7 +212,9 @@ from pytorch3d.implicitron.tools.config import registry
|
||||
class XRayRenderer(BaseRenderer, torch.nn.Module):
|
||||
n_pts_per_ray: int = 64
|
||||
|
||||
# if there are other base classes, make sure to call `super().__init__()` explicitly
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
# custom initialization
|
||||
|
||||
def forward(
|
||||
@@ -248,7 +250,7 @@ The main object for this trainer loop is `Experiment`. It has four top-level rep
|
||||
* `data_source`: This is a `DataSourceBase` which defaults to `ImplicitronDataSource`.
|
||||
It constructs the data sets and dataloaders.
|
||||
* `model_factory`: This is a `ModelFactoryBase` which defaults to `ImplicitronModelFactory`.
|
||||
It constructs the model, which is usually an instance of `OverfitModel` (for NeRF-style training with overfitting to one scene) or `GenericModel` (that is able to generalize to multiple scenes by NeRFormer-style conditioning on other scene views), and can load its weights from a checkpoint.
|
||||
It constructs the model, which is usually an instance of implicitron's main `GenericModel` class, and can load its weights from a checkpoint.
|
||||
* `optimizer_factory`: This is an `OptimizerFactoryBase` which defaults to `ImplicitronOptimizerFactory`.
|
||||
It constructs the optimizer and can load its weights from a checkpoint.
|
||||
* `training_loop`: This is a `TrainingLoopBase` which defaults to `ImplicitronTrainingLoop` and defines the main training loop.
|
||||
@@ -292,43 +294,6 @@ model_GenericModel_args: GenericModel
|
||||
╘== ReductionFeatureAggregator
|
||||
```
|
||||
|
||||
Here is the class structure of OverfitModel:
|
||||
|
||||
```
|
||||
model_OverfitModel_args: OverfitModel
|
||||
└-- raysampler_*_args: RaySampler
|
||||
╘== AdaptiveRaysampler
|
||||
╘== NearFarRaysampler
|
||||
└-- renderer_*_args: BaseRenderer
|
||||
╘== MultiPassEmissionAbsorptionRenderer
|
||||
╘== LSTMRenderer
|
||||
╘== SignedDistanceFunctionRenderer
|
||||
└-- ray_tracer_args: RayTracing
|
||||
└-- ray_normal_coloring_network_args: RayNormalColoringNetwork
|
||||
└-- implicit_function_*_args: ImplicitFunctionBase
|
||||
╘== NeuralRadianceFieldImplicitFunction
|
||||
╘== SRNImplicitFunction
|
||||
└-- raymarch_function_args: SRNRaymarchFunction
|
||||
└-- pixel_generator_args: SRNPixelGenerator
|
||||
╘== SRNHyperNetImplicitFunction
|
||||
└-- hypernet_args: SRNRaymarchHyperNet
|
||||
└-- pixel_generator_args: SRNPixelGenerator
|
||||
╘== IdrFeatureField
|
||||
└-- coarse_implicit_function_*_args: ImplicitFunctionBase
|
||||
╘== NeuralRadianceFieldImplicitFunction
|
||||
╘== SRNImplicitFunction
|
||||
└-- raymarch_function_args: SRNRaymarchFunction
|
||||
└-- pixel_generator_args: SRNPixelGenerator
|
||||
╘== SRNHyperNetImplicitFunction
|
||||
└-- hypernet_args: SRNRaymarchHyperNet
|
||||
└-- pixel_generator_args: SRNPixelGenerator
|
||||
╘== IdrFeatureField
|
||||
```
|
||||
|
||||
OverfitModel has been introduced to create a simple class to disantagle Nerfs which the overfit pattern
|
||||
from the GenericModel.
|
||||
|
||||
|
||||
Please look at the annotations of the respective classes or functions for the lists of hyperparameters.
|
||||
`tests/experiment.yaml` shows every possible option if you have no user-defined classes.
|
||||
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
defaults:
|
||||
- default_config
|
||||
- _self_
|
||||
exp_dir: ./data/exps/overfit_base/
|
||||
training_loop_ImplicitronTrainingLoop_args:
|
||||
visdom_port: 8097
|
||||
visualize_interval: 0
|
||||
max_epochs: 1000
|
||||
data_source_ImplicitronDataSource_args:
|
||||
data_loader_map_provider_class_type: SequenceDataLoaderMapProvider
|
||||
dataset_map_provider_class_type: JsonIndexDatasetMapProvider
|
||||
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
||||
dataset_length_train: 1000
|
||||
dataset_length_val: 1
|
||||
num_workers: 8
|
||||
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
||||
dataset_root: ${oc.env:CO3D_DATASET_ROOT}
|
||||
n_frames_per_sequence: -1
|
||||
test_on_train: true
|
||||
test_restrict_sequence_id: 0
|
||||
dataset_JsonIndexDataset_args:
|
||||
load_point_clouds: false
|
||||
mask_depths: false
|
||||
mask_images: false
|
||||
model_factory_ImplicitronModelFactory_args:
|
||||
model_class_type: "OverfitModel"
|
||||
model_OverfitModel_args:
|
||||
loss_weights:
|
||||
loss_mask_bce: 1.0
|
||||
loss_prev_stage_mask_bce: 1.0
|
||||
loss_autodecoder_norm: 0.01
|
||||
loss_rgb_mse: 1.0
|
||||
loss_prev_stage_rgb_mse: 1.0
|
||||
output_rasterized_mc: false
|
||||
chunk_size_grid: 102400
|
||||
render_image_height: 400
|
||||
render_image_width: 400
|
||||
share_implicit_function_across_passes: false
|
||||
implicit_function_class_type: "NeuralRadianceFieldImplicitFunction"
|
||||
implicit_function_NeuralRadianceFieldImplicitFunction_args:
|
||||
n_harmonic_functions_xyz: 10
|
||||
n_harmonic_functions_dir: 4
|
||||
n_hidden_neurons_xyz: 256
|
||||
n_hidden_neurons_dir: 128
|
||||
n_layers_xyz: 8
|
||||
append_xyz:
|
||||
- 5
|
||||
coarse_implicit_function_class_type: "NeuralRadianceFieldImplicitFunction"
|
||||
coarse_implicit_function_NeuralRadianceFieldImplicitFunction_args:
|
||||
n_harmonic_functions_xyz: 10
|
||||
n_harmonic_functions_dir: 4
|
||||
n_hidden_neurons_xyz: 256
|
||||
n_hidden_neurons_dir: 128
|
||||
n_layers_xyz: 8
|
||||
append_xyz:
|
||||
- 5
|
||||
raysampler_AdaptiveRaySampler_args:
|
||||
n_rays_per_image_sampled_from_mask: 1024
|
||||
scene_extent: 8.0
|
||||
n_pts_per_ray_training: 64
|
||||
n_pts_per_ray_evaluation: 64
|
||||
stratified_point_sampling_training: true
|
||||
stratified_point_sampling_evaluation: false
|
||||
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
||||
n_pts_per_ray_fine_training: 64
|
||||
n_pts_per_ray_fine_evaluation: 64
|
||||
append_coarse_samples_to_fine: true
|
||||
density_noise_std_train: 1.0
|
||||
optimizer_factory_ImplicitronOptimizerFactory_args:
|
||||
breed: Adam
|
||||
weight_decay: 0.0
|
||||
lr_policy: MultiStepLR
|
||||
multistep_lr_milestones: []
|
||||
lr: 0.0005
|
||||
gamma: 0.1
|
||||
momentum: 0.9
|
||||
betas:
|
||||
- 0.9
|
||||
- 0.999
|
||||
@@ -1,42 +0,0 @@
|
||||
defaults:
|
||||
- overfit_base
|
||||
- _self_
|
||||
data_source_ImplicitronDataSource_args:
|
||||
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
||||
batch_size: 1
|
||||
dataset_length_train: 1000
|
||||
dataset_length_val: 1
|
||||
num_workers: 8
|
||||
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
||||
assert_single_seq: true
|
||||
n_frames_per_sequence: -1
|
||||
test_restrict_sequence_id: 0
|
||||
test_on_train: false
|
||||
model_factory_ImplicitronModelFactory_args:
|
||||
model_class_type: "OverfitModel"
|
||||
model_OverfitModel_args:
|
||||
render_image_height: 800
|
||||
render_image_width: 800
|
||||
log_vars:
|
||||
- loss_rgb_psnr_fg
|
||||
- loss_rgb_psnr
|
||||
- loss_eikonal
|
||||
- loss_prev_stage_rgb_psnr
|
||||
- loss_mask_bce
|
||||
- loss_prev_stage_mask_bce
|
||||
- loss_rgb_mse
|
||||
- loss_prev_stage_rgb_mse
|
||||
- loss_depth_abs
|
||||
- loss_depth_abs_fg
|
||||
- loss_kl
|
||||
- loss_mask_neg_iou
|
||||
- objective
|
||||
- epoch
|
||||
- sec/it
|
||||
optimizer_factory_ImplicitronOptimizerFactory_args:
|
||||
lr: 0.0005
|
||||
multistep_lr_milestones:
|
||||
- 200
|
||||
- 300
|
||||
training_loop_ImplicitronTrainingLoop_args:
|
||||
max_epochs: 400
|
||||
@@ -1,56 +0,0 @@
|
||||
defaults:
|
||||
- overfit_singleseq_base
|
||||
- _self_
|
||||
exp_dir: "./data/overfit_nerf_blender_repro/${oc.env:BLENDER_SINGLESEQ_CLASS}"
|
||||
data_source_ImplicitronDataSource_args:
|
||||
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
||||
dataset_length_train: 100
|
||||
dataset_map_provider_class_type: BlenderDatasetMapProvider
|
||||
dataset_map_provider_BlenderDatasetMapProvider_args:
|
||||
base_dir: ${oc.env:BLENDER_DATASET_ROOT}/${oc.env:BLENDER_SINGLESEQ_CLASS}
|
||||
n_known_frames_for_test: null
|
||||
object_name: ${oc.env:BLENDER_SINGLESEQ_CLASS}
|
||||
path_manager_factory_class_type: PathManagerFactory
|
||||
path_manager_factory_PathManagerFactory_args:
|
||||
silence_logs: true
|
||||
|
||||
model_factory_ImplicitronModelFactory_args:
|
||||
model_class_type: "OverfitModel"
|
||||
model_OverfitModel_args:
|
||||
mask_images: false
|
||||
raysampler_class_type: AdaptiveRaySampler
|
||||
raysampler_AdaptiveRaySampler_args:
|
||||
n_pts_per_ray_training: 64
|
||||
n_pts_per_ray_evaluation: 64
|
||||
n_rays_per_image_sampled_from_mask: 4096
|
||||
stratified_point_sampling_training: true
|
||||
stratified_point_sampling_evaluation: false
|
||||
scene_extent: 2.0
|
||||
scene_center:
|
||||
- 0.0
|
||||
- 0.0
|
||||
- 0.0
|
||||
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
||||
density_noise_std_train: 0.0
|
||||
n_pts_per_ray_fine_training: 128
|
||||
n_pts_per_ray_fine_evaluation: 128
|
||||
raymarcher_EmissionAbsorptionRaymarcher_args:
|
||||
blend_output: false
|
||||
loss_weights:
|
||||
loss_rgb_mse: 1.0
|
||||
loss_prev_stage_rgb_mse: 1.0
|
||||
loss_mask_bce: 0.0
|
||||
loss_prev_stage_mask_bce: 0.0
|
||||
loss_autodecoder_norm: 0.00
|
||||
|
||||
optimizer_factory_ImplicitronOptimizerFactory_args:
|
||||
exponential_lr_step_size: 3001
|
||||
lr_policy: LinearExponential
|
||||
linear_exponential_lr_milestone: 200
|
||||
|
||||
training_loop_ImplicitronTrainingLoop_args:
|
||||
max_epochs: 6000
|
||||
metric_print_interval: 10
|
||||
store_checkpoints_purge: 3
|
||||
test_when_finished: true
|
||||
validation_interval: 100
|
||||
@@ -59,7 +59,7 @@ from pytorch3d.implicitron.dataset.data_source import (
|
||||
DataSourceBase,
|
||||
ImplicitronDataSource,
|
||||
)
|
||||
from pytorch3d.implicitron.models.base_model import ImplicitronModelBase
|
||||
from pytorch3d.implicitron.models.generic_model import ImplicitronModelBase
|
||||
|
||||
from pytorch3d.implicitron.models.renderer.multipass_ea import (
|
||||
MultiPassEmissionAbsorptionRenderer,
|
||||
@@ -207,6 +207,12 @@ class Experiment(Configurable): # pyre-ignore: 13
|
||||
val_loader,
|
||||
) = accelerator.prepare(model, optimizer, train_loader, val_loader)
|
||||
|
||||
# pyre-fixme[16]: Optional type has no attribute `is_multisequence`.
|
||||
if not self.training_loop.evaluator.is_multisequence:
|
||||
all_train_cameras = self.data_source.all_train_cameras
|
||||
else:
|
||||
all_train_cameras = None
|
||||
|
||||
# Enter the main training loop.
|
||||
self.training_loop.run(
|
||||
train_loader=train_loader,
|
||||
@@ -217,6 +223,7 @@ class Experiment(Configurable): # pyre-ignore: 13
|
||||
model=model,
|
||||
optimizer=optimizer,
|
||||
scheduler=scheduler,
|
||||
all_train_cameras=all_train_cameras,
|
||||
accelerator=accelerator,
|
||||
device=device,
|
||||
exp_dir=self.exp_dir,
|
||||
|
||||
@@ -122,6 +122,7 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
optimizer: torch.optim.Optimizer,
|
||||
scheduler: Any,
|
||||
accelerator: Optional[Accelerator],
|
||||
all_train_cameras: Optional[CamerasBase],
|
||||
device: torch.device,
|
||||
exp_dir: str,
|
||||
stats: Stats,
|
||||
@@ -141,6 +142,7 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
if test_loader is not None:
|
||||
# pyre-fixme[16]: `Optional` has no attribute `run`.
|
||||
self.evaluator.run(
|
||||
all_train_cameras=all_train_cameras,
|
||||
dataloader=test_loader,
|
||||
device=device,
|
||||
dump_to_json=True,
|
||||
@@ -198,6 +200,7 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
and epoch % self.test_interval == 0
|
||||
):
|
||||
self.evaluator.run(
|
||||
all_train_cameras=all_train_cameras,
|
||||
device=device,
|
||||
dataloader=test_loader,
|
||||
model=model,
|
||||
@@ -214,6 +217,7 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
if self.test_when_finished:
|
||||
if test_loader is not None:
|
||||
self.evaluator.run(
|
||||
all_train_cameras=all_train_cameras,
|
||||
device=device,
|
||||
dump_to_json=True,
|
||||
epoch=stats.epoch,
|
||||
|
||||
@@ -103,10 +103,8 @@ data_source_ImplicitronDataSource_args:
|
||||
num_views: 40
|
||||
data_file: null
|
||||
azimuth_range: 180.0
|
||||
distance: 2.7
|
||||
resolution: 128
|
||||
use_point_light: true
|
||||
gpu_idx: 0
|
||||
path_manager_factory_class_type: PathManagerFactory
|
||||
path_manager_factory_PathManagerFactory_args:
|
||||
silence_logs: true
|
||||
@@ -561,623 +559,6 @@ model_factory_ImplicitronModelFactory_args:
|
||||
use_xavier_init: true
|
||||
view_metrics_ViewMetrics_args: {}
|
||||
regularization_metrics_RegularizationMetrics_args: {}
|
||||
model_OverfitModel_args:
|
||||
log_vars:
|
||||
- loss_rgb_psnr_fg
|
||||
- loss_rgb_psnr
|
||||
- loss_rgb_mse
|
||||
- loss_rgb_huber
|
||||
- loss_depth_abs
|
||||
- loss_depth_abs_fg
|
||||
- loss_mask_neg_iou
|
||||
- loss_mask_bce
|
||||
- loss_mask_beta_prior
|
||||
- loss_eikonal
|
||||
- loss_density_tv
|
||||
- loss_depth_neg_penalty
|
||||
- loss_autodecoder_norm
|
||||
- loss_prev_stage_rgb_mse
|
||||
- loss_prev_stage_rgb_psnr_fg
|
||||
- loss_prev_stage_rgb_psnr
|
||||
- loss_prev_stage_mask_bce
|
||||
- objective
|
||||
- epoch
|
||||
- sec/it
|
||||
mask_images: true
|
||||
mask_depths: true
|
||||
render_image_width: 400
|
||||
render_image_height: 400
|
||||
mask_threshold: 0.5
|
||||
output_rasterized_mc: false
|
||||
bg_color:
|
||||
- 0.0
|
||||
- 0.0
|
||||
- 0.0
|
||||
chunk_size_grid: 4096
|
||||
render_features_dimensions: 3
|
||||
tqdm_trigger_threshold: 16
|
||||
n_train_target_views: 1
|
||||
sampling_mode_training: mask_sample
|
||||
sampling_mode_evaluation: full_grid
|
||||
global_encoder_class_type: null
|
||||
raysampler_class_type: AdaptiveRaySampler
|
||||
renderer_class_type: MultiPassEmissionAbsorptionRenderer
|
||||
share_implicit_function_across_passes: false
|
||||
implicit_function_class_type: NeuralRadianceFieldImplicitFunction
|
||||
coarse_implicit_function_class_type: null
|
||||
view_metrics_class_type: ViewMetrics
|
||||
regularization_metrics_class_type: RegularizationMetrics
|
||||
loss_weights:
|
||||
loss_rgb_mse: 1.0
|
||||
loss_prev_stage_rgb_mse: 1.0
|
||||
loss_mask_bce: 0.0
|
||||
loss_prev_stage_mask_bce: 0.0
|
||||
global_encoder_HarmonicTimeEncoder_args:
|
||||
n_harmonic_functions: 10
|
||||
append_input: true
|
||||
time_divisor: 1.0
|
||||
global_encoder_SequenceAutodecoder_args:
|
||||
autodecoder_args:
|
||||
encoding_dim: 0
|
||||
n_instances: 1
|
||||
init_scale: 1.0
|
||||
ignore_input: false
|
||||
raysampler_AdaptiveRaySampler_args:
|
||||
n_pts_per_ray_training: 64
|
||||
n_pts_per_ray_evaluation: 64
|
||||
n_rays_per_image_sampled_from_mask: 1024
|
||||
n_rays_total_training: null
|
||||
stratified_point_sampling_training: true
|
||||
stratified_point_sampling_evaluation: false
|
||||
scene_extent: 8.0
|
||||
scene_center:
|
||||
- 0.0
|
||||
- 0.0
|
||||
- 0.0
|
||||
raysampler_NearFarRaySampler_args:
|
||||
n_pts_per_ray_training: 64
|
||||
n_pts_per_ray_evaluation: 64
|
||||
n_rays_per_image_sampled_from_mask: 1024
|
||||
n_rays_total_training: null
|
||||
stratified_point_sampling_training: true
|
||||
stratified_point_sampling_evaluation: false
|
||||
min_depth: 0.1
|
||||
max_depth: 8.0
|
||||
renderer_LSTMRenderer_args:
|
||||
num_raymarch_steps: 10
|
||||
init_depth: 17.0
|
||||
init_depth_noise_std: 0.0005
|
||||
hidden_size: 16
|
||||
n_feature_channels: 256
|
||||
bg_color: null
|
||||
verbose: false
|
||||
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
||||
raymarcher_class_type: EmissionAbsorptionRaymarcher
|
||||
n_pts_per_ray_fine_training: 64
|
||||
n_pts_per_ray_fine_evaluation: 64
|
||||
stratified_sampling_coarse_training: true
|
||||
stratified_sampling_coarse_evaluation: false
|
||||
append_coarse_samples_to_fine: true
|
||||
density_noise_std_train: 0.0
|
||||
return_weights: false
|
||||
raymarcher_CumsumRaymarcher_args:
|
||||
surface_thickness: 1
|
||||
bg_color:
|
||||
- 0.0
|
||||
replicate_last_interval: false
|
||||
background_opacity: 0.0
|
||||
density_relu: true
|
||||
blend_output: false
|
||||
raymarcher_EmissionAbsorptionRaymarcher_args:
|
||||
surface_thickness: 1
|
||||
bg_color:
|
||||
- 0.0
|
||||
replicate_last_interval: false
|
||||
background_opacity: 10000000000.0
|
||||
density_relu: true
|
||||
blend_output: false
|
||||
renderer_SignedDistanceFunctionRenderer_args:
|
||||
ray_normal_coloring_network_args:
|
||||
feature_vector_size: 3
|
||||
mode: idr
|
||||
d_in: 9
|
||||
d_out: 3
|
||||
dims:
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
weight_norm: true
|
||||
n_harmonic_functions_dir: 0
|
||||
pooled_feature_dim: 0
|
||||
bg_color:
|
||||
- 0.0
|
||||
soft_mask_alpha: 50.0
|
||||
ray_tracer_args:
|
||||
sdf_threshold: 5.0e-05
|
||||
line_search_step: 0.5
|
||||
line_step_iters: 1
|
||||
sphere_tracing_iters: 10
|
||||
n_steps: 100
|
||||
n_secant_steps: 8
|
||||
implicit_function_IdrFeatureField_args:
|
||||
d_in: 3
|
||||
d_out: 1
|
||||
dims:
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
geometric_init: true
|
||||
bias: 1.0
|
||||
skip_in: []
|
||||
weight_norm: true
|
||||
n_harmonic_functions_xyz: 0
|
||||
pooled_feature_dim: 0
|
||||
implicit_function_NeRFormerImplicitFunction_args:
|
||||
n_harmonic_functions_xyz: 10
|
||||
n_harmonic_functions_dir: 4
|
||||
n_hidden_neurons_dir: 128
|
||||
input_xyz: true
|
||||
xyz_ray_dir_in_camera_coords: false
|
||||
transformer_dim_down_factor: 2.0
|
||||
n_hidden_neurons_xyz: 80
|
||||
n_layers_xyz: 2
|
||||
append_xyz:
|
||||
- 1
|
||||
implicit_function_NeuralRadianceFieldImplicitFunction_args:
|
||||
n_harmonic_functions_xyz: 10
|
||||
n_harmonic_functions_dir: 4
|
||||
n_hidden_neurons_dir: 128
|
||||
input_xyz: true
|
||||
xyz_ray_dir_in_camera_coords: false
|
||||
transformer_dim_down_factor: 1.0
|
||||
n_hidden_neurons_xyz: 256
|
||||
n_layers_xyz: 8
|
||||
append_xyz:
|
||||
- 5
|
||||
implicit_function_SRNHyperNetImplicitFunction_args:
|
||||
latent_dim_hypernet: 0
|
||||
hypernet_args:
|
||||
n_harmonic_functions: 3
|
||||
n_hidden_units: 256
|
||||
n_layers: 2
|
||||
n_hidden_units_hypernet: 256
|
||||
n_layers_hypernet: 1
|
||||
in_features: 3
|
||||
out_features: 256
|
||||
xyz_in_camera_coords: false
|
||||
pixel_generator_args:
|
||||
n_harmonic_functions: 4
|
||||
n_hidden_units: 256
|
||||
n_hidden_units_color: 128
|
||||
n_layers: 2
|
||||
in_features: 256
|
||||
out_features: 3
|
||||
ray_dir_in_camera_coords: false
|
||||
implicit_function_SRNImplicitFunction_args:
|
||||
raymarch_function_args:
|
||||
n_harmonic_functions: 3
|
||||
n_hidden_units: 256
|
||||
n_layers: 2
|
||||
in_features: 3
|
||||
out_features: 256
|
||||
xyz_in_camera_coords: false
|
||||
raymarch_function: null
|
||||
pixel_generator_args:
|
||||
n_harmonic_functions: 4
|
||||
n_hidden_units: 256
|
||||
n_hidden_units_color: 128
|
||||
n_layers: 2
|
||||
in_features: 256
|
||||
out_features: 3
|
||||
ray_dir_in_camera_coords: false
|
||||
implicit_function_VoxelGridImplicitFunction_args:
|
||||
harmonic_embedder_xyz_density_args:
|
||||
n_harmonic_functions: 6
|
||||
omega_0: 1.0
|
||||
logspace: true
|
||||
append_input: true
|
||||
harmonic_embedder_xyz_color_args:
|
||||
n_harmonic_functions: 6
|
||||
omega_0: 1.0
|
||||
logspace: true
|
||||
append_input: true
|
||||
harmonic_embedder_dir_color_args:
|
||||
n_harmonic_functions: 6
|
||||
omega_0: 1.0
|
||||
logspace: true
|
||||
append_input: true
|
||||
decoder_density_class_type: MLPDecoder
|
||||
decoder_color_class_type: MLPDecoder
|
||||
use_multiple_streams: true
|
||||
xyz_ray_dir_in_camera_coords: false
|
||||
scaffold_calculating_epochs: []
|
||||
scaffold_resolution:
|
||||
- 128
|
||||
- 128
|
||||
- 128
|
||||
scaffold_empty_space_threshold: 0.001
|
||||
scaffold_occupancy_chunk_size: -1
|
||||
scaffold_max_pool_kernel_size: 3
|
||||
scaffold_filter_points: true
|
||||
volume_cropping_epochs: []
|
||||
voxel_grid_density_args:
|
||||
voxel_grid_class_type: FullResolutionVoxelGrid
|
||||
extents:
|
||||
- 2.0
|
||||
- 2.0
|
||||
- 2.0
|
||||
translation:
|
||||
- 0.0
|
||||
- 0.0
|
||||
- 0.0
|
||||
init_std: 0.1
|
||||
init_mean: 0.0
|
||||
hold_voxel_grid_as_parameters: true
|
||||
param_groups: {}
|
||||
voxel_grid_CPFactorizedVoxelGrid_args:
|
||||
align_corners: true
|
||||
padding: zeros
|
||||
mode: bilinear
|
||||
n_features: 1
|
||||
resolution_changes:
|
||||
0:
|
||||
- 128
|
||||
- 128
|
||||
- 128
|
||||
n_components: 24
|
||||
basis_matrix: true
|
||||
voxel_grid_FullResolutionVoxelGrid_args:
|
||||
align_corners: true
|
||||
padding: zeros
|
||||
mode: bilinear
|
||||
n_features: 1
|
||||
resolution_changes:
|
||||
0:
|
||||
- 128
|
||||
- 128
|
||||
- 128
|
||||
voxel_grid_VMFactorizedVoxelGrid_args:
|
||||
align_corners: true
|
||||
padding: zeros
|
||||
mode: bilinear
|
||||
n_features: 1
|
||||
resolution_changes:
|
||||
0:
|
||||
- 128
|
||||
- 128
|
||||
- 128
|
||||
n_components: null
|
||||
distribution_of_components: null
|
||||
basis_matrix: true
|
||||
voxel_grid_color_args:
|
||||
voxel_grid_class_type: FullResolutionVoxelGrid
|
||||
extents:
|
||||
- 2.0
|
||||
- 2.0
|
||||
- 2.0
|
||||
translation:
|
||||
- 0.0
|
||||
- 0.0
|
||||
- 0.0
|
||||
init_std: 0.1
|
||||
init_mean: 0.0
|
||||
hold_voxel_grid_as_parameters: true
|
||||
param_groups: {}
|
||||
voxel_grid_CPFactorizedVoxelGrid_args:
|
||||
align_corners: true
|
||||
padding: zeros
|
||||
mode: bilinear
|
||||
n_features: 1
|
||||
resolution_changes:
|
||||
0:
|
||||
- 128
|
||||
- 128
|
||||
- 128
|
||||
n_components: 24
|
||||
basis_matrix: true
|
||||
voxel_grid_FullResolutionVoxelGrid_args:
|
||||
align_corners: true
|
||||
padding: zeros
|
||||
mode: bilinear
|
||||
n_features: 1
|
||||
resolution_changes:
|
||||
0:
|
||||
- 128
|
||||
- 128
|
||||
- 128
|
||||
voxel_grid_VMFactorizedVoxelGrid_args:
|
||||
align_corners: true
|
||||
padding: zeros
|
||||
mode: bilinear
|
||||
n_features: 1
|
||||
resolution_changes:
|
||||
0:
|
||||
- 128
|
||||
- 128
|
||||
- 128
|
||||
n_components: null
|
||||
distribution_of_components: null
|
||||
basis_matrix: true
|
||||
decoder_density_ElementwiseDecoder_args:
|
||||
scale: 1.0
|
||||
shift: 0.0
|
||||
operation: IDENTITY
|
||||
decoder_density_MLPDecoder_args:
|
||||
param_groups: {}
|
||||
network_args:
|
||||
n_layers: 8
|
||||
output_dim: 256
|
||||
skip_dim: 39
|
||||
hidden_dim: 256
|
||||
input_skips:
|
||||
- 5
|
||||
skip_affine_trans: false
|
||||
last_layer_bias_init: null
|
||||
last_activation: RELU
|
||||
use_xavier_init: true
|
||||
decoder_color_ElementwiseDecoder_args:
|
||||
scale: 1.0
|
||||
shift: 0.0
|
||||
operation: IDENTITY
|
||||
decoder_color_MLPDecoder_args:
|
||||
param_groups: {}
|
||||
network_args:
|
||||
n_layers: 8
|
||||
output_dim: 256
|
||||
skip_dim: 39
|
||||
hidden_dim: 256
|
||||
input_skips:
|
||||
- 5
|
||||
skip_affine_trans: false
|
||||
last_layer_bias_init: null
|
||||
last_activation: RELU
|
||||
use_xavier_init: true
|
||||
coarse_implicit_function_IdrFeatureField_args:
|
||||
d_in: 3
|
||||
d_out: 1
|
||||
dims:
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
geometric_init: true
|
||||
bias: 1.0
|
||||
skip_in: []
|
||||
weight_norm: true
|
||||
n_harmonic_functions_xyz: 0
|
||||
pooled_feature_dim: 0
|
||||
coarse_implicit_function_NeRFormerImplicitFunction_args:
|
||||
n_harmonic_functions_xyz: 10
|
||||
n_harmonic_functions_dir: 4
|
||||
n_hidden_neurons_dir: 128
|
||||
input_xyz: true
|
||||
xyz_ray_dir_in_camera_coords: false
|
||||
transformer_dim_down_factor: 2.0
|
||||
n_hidden_neurons_xyz: 80
|
||||
n_layers_xyz: 2
|
||||
append_xyz:
|
||||
- 1
|
||||
coarse_implicit_function_NeuralRadianceFieldImplicitFunction_args:
|
||||
n_harmonic_functions_xyz: 10
|
||||
n_harmonic_functions_dir: 4
|
||||
n_hidden_neurons_dir: 128
|
||||
input_xyz: true
|
||||
xyz_ray_dir_in_camera_coords: false
|
||||
transformer_dim_down_factor: 1.0
|
||||
n_hidden_neurons_xyz: 256
|
||||
n_layers_xyz: 8
|
||||
append_xyz:
|
||||
- 5
|
||||
coarse_implicit_function_SRNHyperNetImplicitFunction_args:
|
||||
latent_dim_hypernet: 0
|
||||
hypernet_args:
|
||||
n_harmonic_functions: 3
|
||||
n_hidden_units: 256
|
||||
n_layers: 2
|
||||
n_hidden_units_hypernet: 256
|
||||
n_layers_hypernet: 1
|
||||
in_features: 3
|
||||
out_features: 256
|
||||
xyz_in_camera_coords: false
|
||||
pixel_generator_args:
|
||||
n_harmonic_functions: 4
|
||||
n_hidden_units: 256
|
||||
n_hidden_units_color: 128
|
||||
n_layers: 2
|
||||
in_features: 256
|
||||
out_features: 3
|
||||
ray_dir_in_camera_coords: false
|
||||
coarse_implicit_function_SRNImplicitFunction_args:
|
||||
raymarch_function_args:
|
||||
n_harmonic_functions: 3
|
||||
n_hidden_units: 256
|
||||
n_layers: 2
|
||||
in_features: 3
|
||||
out_features: 256
|
||||
xyz_in_camera_coords: false
|
||||
raymarch_function: null
|
||||
pixel_generator_args:
|
||||
n_harmonic_functions: 4
|
||||
n_hidden_units: 256
|
||||
n_hidden_units_color: 128
|
||||
n_layers: 2
|
||||
in_features: 256
|
||||
out_features: 3
|
||||
ray_dir_in_camera_coords: false
|
||||
coarse_implicit_function_VoxelGridImplicitFunction_args:
|
||||
harmonic_embedder_xyz_density_args:
|
||||
n_harmonic_functions: 6
|
||||
omega_0: 1.0
|
||||
logspace: true
|
||||
append_input: true
|
||||
harmonic_embedder_xyz_color_args:
|
||||
n_harmonic_functions: 6
|
||||
omega_0: 1.0
|
||||
logspace: true
|
||||
append_input: true
|
||||
harmonic_embedder_dir_color_args:
|
||||
n_harmonic_functions: 6
|
||||
omega_0: 1.0
|
||||
logspace: true
|
||||
append_input: true
|
||||
decoder_density_class_type: MLPDecoder
|
||||
decoder_color_class_type: MLPDecoder
|
||||
use_multiple_streams: true
|
||||
xyz_ray_dir_in_camera_coords: false
|
||||
scaffold_calculating_epochs: []
|
||||
scaffold_resolution:
|
||||
- 128
|
||||
- 128
|
||||
- 128
|
||||
scaffold_empty_space_threshold: 0.001
|
||||
scaffold_occupancy_chunk_size: -1
|
||||
scaffold_max_pool_kernel_size: 3
|
||||
scaffold_filter_points: true
|
||||
volume_cropping_epochs: []
|
||||
voxel_grid_density_args:
|
||||
voxel_grid_class_type: FullResolutionVoxelGrid
|
||||
extents:
|
||||
- 2.0
|
||||
- 2.0
|
||||
- 2.0
|
||||
translation:
|
||||
- 0.0
|
||||
- 0.0
|
||||
- 0.0
|
||||
init_std: 0.1
|
||||
init_mean: 0.0
|
||||
hold_voxel_grid_as_parameters: true
|
||||
param_groups: {}
|
||||
voxel_grid_CPFactorizedVoxelGrid_args:
|
||||
align_corners: true
|
||||
padding: zeros
|
||||
mode: bilinear
|
||||
n_features: 1
|
||||
resolution_changes:
|
||||
0:
|
||||
- 128
|
||||
- 128
|
||||
- 128
|
||||
n_components: 24
|
||||
basis_matrix: true
|
||||
voxel_grid_FullResolutionVoxelGrid_args:
|
||||
align_corners: true
|
||||
padding: zeros
|
||||
mode: bilinear
|
||||
n_features: 1
|
||||
resolution_changes:
|
||||
0:
|
||||
- 128
|
||||
- 128
|
||||
- 128
|
||||
voxel_grid_VMFactorizedVoxelGrid_args:
|
||||
align_corners: true
|
||||
padding: zeros
|
||||
mode: bilinear
|
||||
n_features: 1
|
||||
resolution_changes:
|
||||
0:
|
||||
- 128
|
||||
- 128
|
||||
- 128
|
||||
n_components: null
|
||||
distribution_of_components: null
|
||||
basis_matrix: true
|
||||
voxel_grid_color_args:
|
||||
voxel_grid_class_type: FullResolutionVoxelGrid
|
||||
extents:
|
||||
- 2.0
|
||||
- 2.0
|
||||
- 2.0
|
||||
translation:
|
||||
- 0.0
|
||||
- 0.0
|
||||
- 0.0
|
||||
init_std: 0.1
|
||||
init_mean: 0.0
|
||||
hold_voxel_grid_as_parameters: true
|
||||
param_groups: {}
|
||||
voxel_grid_CPFactorizedVoxelGrid_args:
|
||||
align_corners: true
|
||||
padding: zeros
|
||||
mode: bilinear
|
||||
n_features: 1
|
||||
resolution_changes:
|
||||
0:
|
||||
- 128
|
||||
- 128
|
||||
- 128
|
||||
n_components: 24
|
||||
basis_matrix: true
|
||||
voxel_grid_FullResolutionVoxelGrid_args:
|
||||
align_corners: true
|
||||
padding: zeros
|
||||
mode: bilinear
|
||||
n_features: 1
|
||||
resolution_changes:
|
||||
0:
|
||||
- 128
|
||||
- 128
|
||||
- 128
|
||||
voxel_grid_VMFactorizedVoxelGrid_args:
|
||||
align_corners: true
|
||||
padding: zeros
|
||||
mode: bilinear
|
||||
n_features: 1
|
||||
resolution_changes:
|
||||
0:
|
||||
- 128
|
||||
- 128
|
||||
- 128
|
||||
n_components: null
|
||||
distribution_of_components: null
|
||||
basis_matrix: true
|
||||
decoder_density_ElementwiseDecoder_args:
|
||||
scale: 1.0
|
||||
shift: 0.0
|
||||
operation: IDENTITY
|
||||
decoder_density_MLPDecoder_args:
|
||||
param_groups: {}
|
||||
network_args:
|
||||
n_layers: 8
|
||||
output_dim: 256
|
||||
skip_dim: 39
|
||||
hidden_dim: 256
|
||||
input_skips:
|
||||
- 5
|
||||
skip_affine_trans: false
|
||||
last_layer_bias_init: null
|
||||
last_activation: RELU
|
||||
use_xavier_init: true
|
||||
decoder_color_ElementwiseDecoder_args:
|
||||
scale: 1.0
|
||||
shift: 0.0
|
||||
operation: IDENTITY
|
||||
decoder_color_MLPDecoder_args:
|
||||
param_groups: {}
|
||||
network_args:
|
||||
n_layers: 8
|
||||
output_dim: 256
|
||||
skip_dim: 39
|
||||
hidden_dim: 256
|
||||
input_skips:
|
||||
- 5
|
||||
skip_affine_trans: false
|
||||
last_layer_bias_init: null
|
||||
last_activation: RELU
|
||||
use_xavier_init: true
|
||||
view_metrics_ViewMetrics_args: {}
|
||||
regularization_metrics_RegularizationMetrics_args: {}
|
||||
optimizer_factory_ImplicitronOptimizerFactory_args:
|
||||
betas:
|
||||
- 0.9
|
||||
|
||||
@@ -141,11 +141,7 @@ class TestExperiment(unittest.TestCase):
|
||||
# Check that all the pre-prepared configs are valid.
|
||||
config_files = []
|
||||
|
||||
for pattern in (
|
||||
"repro_singleseq*.yaml",
|
||||
"repro_multiseq*.yaml",
|
||||
"overfit_singleseq*.yaml",
|
||||
):
|
||||
for pattern in ("repro_singleseq*.yaml", "repro_multiseq*.yaml"):
|
||||
config_files.extend(
|
||||
[
|
||||
f
|
||||
|
||||
@@ -39,7 +39,6 @@ def visualize_reconstruction(
|
||||
visdom_server: str = "http://127.0.0.1",
|
||||
visdom_port: int = 8097,
|
||||
visdom_env: Optional[str] = None,
|
||||
**render_flyaround_kwargs,
|
||||
) -> None:
|
||||
"""
|
||||
Given an `exp_dir` containing a trained Implicitron model, generates videos consisting
|
||||
@@ -61,8 +60,6 @@ def visualize_reconstruction(
|
||||
visdom_server: The address of the visdom server.
|
||||
visdom_port: The port of the visdom server.
|
||||
visdom_env: If set, defines a custom name for the visdom environment.
|
||||
render_flyaround_kwargs: Keyword arguments passed to the invoked `render_flyaround`
|
||||
function (see `pytorch3d.implicitron.models.visualization.render_flyaround`).
|
||||
"""
|
||||
|
||||
# In case an output directory is specified use it. If no output_directory
|
||||
@@ -118,22 +115,20 @@ def visualize_reconstruction(
|
||||
# iterate over the sequences in the dataset
|
||||
for sequence_name in dataset.sequence_names():
|
||||
with torch.no_grad():
|
||||
render_kwargs = {
|
||||
"dataset": dataset,
|
||||
"sequence_name": sequence_name,
|
||||
"model": model,
|
||||
"output_video_path": os.path.join(output_directory, "video"),
|
||||
"n_source_views": n_source_views,
|
||||
"visdom_show_preds": visdom_show_preds,
|
||||
"n_flyaround_poses": n_eval_cameras,
|
||||
"visdom_server": visdom_server,
|
||||
"visdom_port": visdom_port,
|
||||
"visdom_environment": visdom_env,
|
||||
"video_resize": video_size,
|
||||
"device": device,
|
||||
**render_flyaround_kwargs,
|
||||
}
|
||||
render_flyaround(**render_kwargs)
|
||||
render_flyaround(
|
||||
dataset=dataset,
|
||||
sequence_name=sequence_name,
|
||||
model=model,
|
||||
output_video_path=os.path.join(output_directory, "video"),
|
||||
n_source_views=n_source_views,
|
||||
visdom_show_preds=visdom_show_preds,
|
||||
n_flyaround_poses=n_eval_cameras,
|
||||
visdom_server=visdom_server,
|
||||
visdom_port=visdom_port,
|
||||
visdom_environment=visdom_env,
|
||||
video_resize=video_size,
|
||||
device=device,
|
||||
)
|
||||
|
||||
|
||||
enable_get_default_args(visualize_reconstruction)
|
||||
|
||||
@@ -4,4 +4,4 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
__version__ = "0.7.3"
|
||||
__version__ = "0.7.2"
|
||||
|
||||
@@ -57,3 +57,19 @@ def get_device(x, device: Optional[Device] = None) -> torch.device:
|
||||
|
||||
# Default device is cpu
|
||||
return torch.device("cpu")
|
||||
|
||||
|
||||
# Provide get_origin and get_args even in Python 3.7.
|
||||
|
||||
if sys.version_info >= (3, 8, 0):
|
||||
from typing import get_args, get_origin
|
||||
elif sys.version_info >= (3, 7, 0):
|
||||
|
||||
def get_origin(cls): # pragma: no cover
|
||||
return getattr(cls, "__origin__", None)
|
||||
|
||||
def get_args(cls): # pragma: no cover
|
||||
return getattr(cls, "__args__", None)
|
||||
|
||||
else:
|
||||
raise ImportError("This module requires Python 3.7+")
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include <torch/csrc/autograd/VariableTypeUtils.h>
|
||||
#include <torch/extension.h>
|
||||
#include <cstdio>
|
||||
#include <tuple>
|
||||
@@ -97,8 +96,6 @@ inline void PointsToVolumesForward(
|
||||
point_weight,
|
||||
align_corners,
|
||||
splat);
|
||||
torch::autograd::increment_version(volume_features);
|
||||
torch::autograd::increment_version(volume_densities);
|
||||
return;
|
||||
#else
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
* LICENSE file in the root directory of this source tree.
|
||||
*/
|
||||
|
||||
#include <torch/csrc/autograd/VariableTypeUtils.h>
|
||||
#include <torch/extension.h>
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
@@ -149,8 +148,6 @@ void PointsToVolumesForwardCpu(
|
||||
}
|
||||
}
|
||||
}
|
||||
torch::autograd::increment_version(volume_features);
|
||||
torch::autograd::increment_version(volume_densities);
|
||||
}
|
||||
|
||||
// With nearest, the only smooth dependence is that volume features
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include <torch/csrc/autograd/VariableTypeUtils.h>
|
||||
#include <torch/extension.h>
|
||||
#include <cstdio>
|
||||
#include <tuple>
|
||||
@@ -64,7 +63,6 @@ inline void SamplePdf(
|
||||
#ifdef WITH_CUDA
|
||||
CHECK_CUDA(weights);
|
||||
CHECK_CONTIGUOUS_CUDA(outputs);
|
||||
torch::autograd::increment_version(outputs);
|
||||
SamplePdfCuda(bins, weights, outputs, eps);
|
||||
return;
|
||||
#else
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
* LICENSE file in the root directory of this source tree.
|
||||
*/
|
||||
|
||||
#include <torch/csrc/autograd/VariableTypeUtils.h>
|
||||
#include <torch/extension.h>
|
||||
#include <algorithm>
|
||||
#include <thread>
|
||||
@@ -138,5 +137,4 @@ void SamplePdfCpu(
|
||||
for (auto&& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
torch::autograd::increment_version(outputs);
|
||||
}
|
||||
|
||||
@@ -12,15 +12,14 @@ import torch
|
||||
from pytorch3d.implicitron.tools.config import registry, ReplaceableBase
|
||||
from torch.utils.data import (
|
||||
BatchSampler,
|
||||
ConcatDataset,
|
||||
ChainDataset,
|
||||
DataLoader,
|
||||
RandomSampler,
|
||||
Sampler,
|
||||
)
|
||||
|
||||
from .dataset_base import DatasetBase
|
||||
from .dataset_base import DatasetBase, FrameData
|
||||
from .dataset_map_provider import DatasetMap
|
||||
from .frame_data import FrameData
|
||||
from .scene_batch_sampler import SceneBatchSampler
|
||||
from .utils import is_known_frame_scalar
|
||||
|
||||
@@ -483,7 +482,7 @@ class SequenceDataLoaderMapProvider(DataLoaderMapProviderBase):
|
||||
num_batches=num_batches,
|
||||
)
|
||||
return DataLoader(
|
||||
ConcatDataset([dataset, train_dataset]),
|
||||
ChainDataset([dataset, train_dataset]),
|
||||
batch_sampler=sampler,
|
||||
**data_loader_kwargs,
|
||||
)
|
||||
|
||||
@@ -13,8 +13,13 @@ from pytorch3d.implicitron.tools.config import (
|
||||
)
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
|
||||
from .blender_dataset_map_provider import BlenderDatasetMapProvider # noqa
|
||||
from .data_loader_map_provider import DataLoaderMap, DataLoaderMapProviderBase
|
||||
from .dataset_map_provider import DatasetMap, DatasetMapProviderBase
|
||||
from .json_index_dataset_map_provider import JsonIndexDatasetMapProvider # noqa
|
||||
from .json_index_dataset_map_provider_v2 import JsonIndexDatasetMapProviderV2 # noqa
|
||||
from .llff_dataset_map_provider import LlffDatasetMapProvider # noqa
|
||||
from .rendered_mesh_dataset_map_provider import RenderedMeshDatasetMapProvider # noqa
|
||||
|
||||
|
||||
class DataSourceBase(ReplaceableBase):
|
||||
@@ -29,7 +34,6 @@ class DataSourceBase(ReplaceableBase):
|
||||
@property
|
||||
def all_train_cameras(self) -> Optional[CamerasBase]:
|
||||
"""
|
||||
DEPRECATED! The property will be removed in future versions.
|
||||
If the data is all for a single scene, a list
|
||||
of the known training cameras for that scene, which is
|
||||
used for evaluating the viewpoint difficulty of the
|
||||
@@ -55,26 +59,6 @@ class ImplicitronDataSource(DataSourceBase): # pyre-ignore[13]
|
||||
data_loader_map_provider: DataLoaderMapProviderBase
|
||||
data_loader_map_provider_class_type: str = "SequenceDataLoaderMapProvider"
|
||||
|
||||
@classmethod
|
||||
def pre_expand(cls) -> None:
|
||||
# use try/finally to bypass cinder's lazy imports
|
||||
try:
|
||||
from .blender_dataset_map_provider import ( # noqa: F401
|
||||
BlenderDatasetMapProvider,
|
||||
)
|
||||
from .json_index_dataset_map_provider import ( # noqa: F401
|
||||
JsonIndexDatasetMapProvider,
|
||||
)
|
||||
from .json_index_dataset_map_provider_v2 import ( # noqa: F401
|
||||
JsonIndexDatasetMapProviderV2,
|
||||
)
|
||||
from .llff_dataset_map_provider import LlffDatasetMapProvider # noqa: F401
|
||||
from .rendered_mesh_dataset_map_provider import ( # noqa: F401
|
||||
RenderedMeshDatasetMapProvider,
|
||||
)
|
||||
finally:
|
||||
pass
|
||||
|
||||
def __post_init__(self):
|
||||
run_auto_creation(self)
|
||||
self._all_train_cameras_cache: Optional[Tuple[Optional[CamerasBase]]] = None
|
||||
@@ -86,9 +70,6 @@ class ImplicitronDataSource(DataSourceBase): # pyre-ignore[13]
|
||||
|
||||
@property
|
||||
def all_train_cameras(self) -> Optional[CamerasBase]:
|
||||
"""
|
||||
DEPRECATED! The property will be removed in future versions.
|
||||
"""
|
||||
if self._all_train_cameras_cache is None: # pyre-ignore[16]
|
||||
all_train_cameras = self.dataset_map_provider.get_all_train_cameras()
|
||||
self._all_train_cameras_cache = (all_train_cameras,)
|
||||
|
||||
@@ -5,27 +5,217 @@
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import dataclass, field, fields
|
||||
from typing import (
|
||||
Any,
|
||||
ClassVar,
|
||||
Dict,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from pytorch3d.renderer.camera_utils import join_cameras_as_batch
|
||||
from pytorch3d.renderer.cameras import CamerasBase, PerspectiveCameras
|
||||
from pytorch3d.structures.pointclouds import join_pointclouds_as_batch, Pointclouds
|
||||
|
||||
from pytorch3d.implicitron.dataset.frame_data import FrameData
|
||||
from pytorch3d.implicitron.dataset.utils import GenericWorkaround
|
||||
|
||||
@dataclass
|
||||
class FrameData(Mapping[str, Any]):
|
||||
"""
|
||||
A type of the elements returned by indexing the dataset object.
|
||||
It can represent both individual frames and batches of thereof;
|
||||
in this documentation, the sizes of tensors refer to single frames;
|
||||
add the first batch dimension for the collation result.
|
||||
|
||||
Args:
|
||||
frame_number: The number of the frame within its sequence.
|
||||
0-based continuous integers.
|
||||
sequence_name: The unique name of the frame's sequence.
|
||||
sequence_category: The object category of the sequence.
|
||||
frame_timestamp: The time elapsed since the start of a sequence in sec.
|
||||
image_size_hw: The size of the image in pixels; (height, width) tensor
|
||||
of shape (2,).
|
||||
image_path: The qualified path to the loaded image (with dataset_root).
|
||||
image_rgb: A Tensor of shape `(3, H, W)` holding the RGB image
|
||||
of the frame; elements are floats in [0, 1].
|
||||
mask_crop: A binary mask of shape `(1, H, W)` denoting the valid image
|
||||
regions. Regions can be invalid (mask_crop[i,j]=0) in case they
|
||||
are a result of zero-padding of the image after cropping around
|
||||
the object bounding box; elements are floats in {0.0, 1.0}.
|
||||
depth_path: The qualified path to the frame's depth map.
|
||||
depth_map: A float Tensor of shape `(1, H, W)` holding the depth map
|
||||
of the frame; values correspond to distances from the camera;
|
||||
use `depth_mask` and `mask_crop` to filter for valid pixels.
|
||||
depth_mask: A binary mask of shape `(1, H, W)` denoting pixels of the
|
||||
depth map that are valid for evaluation, they have been checked for
|
||||
consistency across views; elements are floats in {0.0, 1.0}.
|
||||
mask_path: A qualified path to the foreground probability mask.
|
||||
fg_probability: A Tensor of `(1, H, W)` denoting the probability of the
|
||||
pixels belonging to the captured object; elements are floats
|
||||
in [0, 1].
|
||||
bbox_xywh: The bounding box tightly enclosing the foreground object in the
|
||||
format (x0, y0, width, height). The convention assumes that
|
||||
`x0+width` and `y0+height` includes the boundary of the box.
|
||||
I.e., to slice out the corresponding crop from an image tensor `I`
|
||||
we execute `crop = I[..., y0:y0+height, x0:x0+width]`
|
||||
crop_bbox_xywh: The bounding box denoting the boundaries of `image_rgb`
|
||||
in the original image coordinates in the format (x0, y0, width, height).
|
||||
The convention is the same as for `bbox_xywh`. `crop_bbox_xywh` differs
|
||||
from `bbox_xywh` due to padding (which can happen e.g. due to
|
||||
setting `JsonIndexDataset.box_crop_context > 0`)
|
||||
camera: A PyTorch3D camera object corresponding the frame's viewpoint,
|
||||
corrected for cropping if it happened.
|
||||
camera_quality_score: The score proportional to the confidence of the
|
||||
frame's camera estimation (the higher the more accurate).
|
||||
point_cloud_quality_score: The score proportional to the accuracy of the
|
||||
frame's sequence point cloud (the higher the more accurate).
|
||||
sequence_point_cloud_path: The path to the sequence's point cloud.
|
||||
sequence_point_cloud: A PyTorch3D Pointclouds object holding the
|
||||
point cloud corresponding to the frame's sequence. When the object
|
||||
represents a batch of frames, point clouds may be deduplicated;
|
||||
see `sequence_point_cloud_idx`.
|
||||
sequence_point_cloud_idx: Integer indices mapping frame indices to the
|
||||
corresponding point clouds in `sequence_point_cloud`; to get the
|
||||
corresponding point cloud to `image_rgb[i]`, use
|
||||
`sequence_point_cloud[sequence_point_cloud_idx[i]]`.
|
||||
frame_type: The type of the loaded frame specified in
|
||||
`subset_lists_file`, if provided.
|
||||
meta: A dict for storing additional frame information.
|
||||
"""
|
||||
|
||||
frame_number: Optional[torch.LongTensor]
|
||||
sequence_name: Union[str, List[str]]
|
||||
sequence_category: Union[str, List[str]]
|
||||
frame_timestamp: Optional[torch.Tensor] = None
|
||||
image_size_hw: Optional[torch.Tensor] = None
|
||||
image_path: Union[str, List[str], None] = None
|
||||
image_rgb: Optional[torch.Tensor] = None
|
||||
# masks out padding added due to cropping the square bit
|
||||
mask_crop: Optional[torch.Tensor] = None
|
||||
depth_path: Union[str, List[str], None] = None
|
||||
depth_map: Optional[torch.Tensor] = None
|
||||
depth_mask: Optional[torch.Tensor] = None
|
||||
mask_path: Union[str, List[str], None] = None
|
||||
fg_probability: Optional[torch.Tensor] = None
|
||||
bbox_xywh: Optional[torch.Tensor] = None
|
||||
crop_bbox_xywh: Optional[torch.Tensor] = None
|
||||
camera: Optional[PerspectiveCameras] = None
|
||||
camera_quality_score: Optional[torch.Tensor] = None
|
||||
point_cloud_quality_score: Optional[torch.Tensor] = None
|
||||
sequence_point_cloud_path: Union[str, List[str], None] = None
|
||||
sequence_point_cloud: Optional[Pointclouds] = None
|
||||
sequence_point_cloud_idx: Optional[torch.Tensor] = None
|
||||
frame_type: Union[str, List[str], None] = None # known | unseen
|
||||
meta: dict = field(default_factory=lambda: {})
|
||||
|
||||
def to(self, *args, **kwargs):
|
||||
new_params = {}
|
||||
for f in fields(self):
|
||||
value = getattr(self, f.name)
|
||||
if isinstance(value, (torch.Tensor, Pointclouds, CamerasBase)):
|
||||
new_params[f.name] = value.to(*args, **kwargs)
|
||||
else:
|
||||
new_params[f.name] = value
|
||||
return type(self)(**new_params)
|
||||
|
||||
def cpu(self):
|
||||
return self.to(device=torch.device("cpu"))
|
||||
|
||||
def cuda(self):
|
||||
return self.to(device=torch.device("cuda"))
|
||||
|
||||
# the following functions make sure **frame_data can be passed to functions
|
||||
def __iter__(self):
|
||||
for f in fields(self):
|
||||
yield f.name
|
||||
|
||||
def __getitem__(self, key):
|
||||
return getattr(self, key)
|
||||
|
||||
def __len__(self):
|
||||
return len(fields(self))
|
||||
|
||||
@classmethod
|
||||
def collate(cls, batch):
|
||||
"""
|
||||
Given a list objects `batch` of class `cls`, collates them into a batched
|
||||
representation suitable for processing with deep networks.
|
||||
"""
|
||||
|
||||
elem = batch[0]
|
||||
|
||||
if isinstance(elem, cls):
|
||||
pointcloud_ids = [id(el.sequence_point_cloud) for el in batch]
|
||||
id_to_idx = defaultdict(list)
|
||||
for i, pc_id in enumerate(pointcloud_ids):
|
||||
id_to_idx[pc_id].append(i)
|
||||
|
||||
sequence_point_cloud = []
|
||||
sequence_point_cloud_idx = -np.ones((len(batch),))
|
||||
for i, ind in enumerate(id_to_idx.values()):
|
||||
sequence_point_cloud_idx[ind] = i
|
||||
sequence_point_cloud.append(batch[ind[0]].sequence_point_cloud)
|
||||
assert (sequence_point_cloud_idx >= 0).all()
|
||||
|
||||
override_fields = {
|
||||
"sequence_point_cloud": sequence_point_cloud,
|
||||
"sequence_point_cloud_idx": sequence_point_cloud_idx.tolist(),
|
||||
}
|
||||
# note that the pre-collate value of sequence_point_cloud_idx is unused
|
||||
|
||||
collated = {}
|
||||
for f in fields(elem):
|
||||
list_values = override_fields.get(
|
||||
f.name, [getattr(d, f.name) for d in batch]
|
||||
)
|
||||
collated[f.name] = (
|
||||
cls.collate(list_values)
|
||||
if all(list_value is not None for list_value in list_values)
|
||||
else None
|
||||
)
|
||||
return cls(**collated)
|
||||
|
||||
elif isinstance(elem, Pointclouds):
|
||||
return join_pointclouds_as_batch(batch)
|
||||
|
||||
elif isinstance(elem, CamerasBase):
|
||||
# TODO: don't store K; enforce working in NDC space
|
||||
return join_cameras_as_batch(batch)
|
||||
else:
|
||||
return torch.utils.data._utils.collate.default_collate(batch)
|
||||
|
||||
|
||||
class _GenericWorkaround:
|
||||
"""
|
||||
OmegaConf.structured has a weirdness when you try to apply
|
||||
it to a dataclass whose first base class is a Generic which is not
|
||||
Dict. The issue is with a function called get_dict_key_value_types
|
||||
in omegaconf/_utils.py.
|
||||
For example this fails:
|
||||
|
||||
@dataclass(eq=False)
|
||||
class D(torch.utils.data.Dataset[int]):
|
||||
a: int = 3
|
||||
|
||||
OmegaConf.structured(D)
|
||||
|
||||
We avoid the problem by adding this class as an extra base class.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class DatasetBase(GenericWorkaround, torch.utils.data.Dataset[FrameData]):
|
||||
class DatasetBase(_GenericWorkaround, torch.utils.data.Dataset[FrameData]):
|
||||
"""
|
||||
Base class to describe a dataset to be used with Implicitron.
|
||||
|
||||
@@ -47,7 +237,7 @@ class DatasetBase(GenericWorkaround, torch.utils.data.Dataset[FrameData]):
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_frame_numbers_and_timestamps(
|
||||
self, idxs: Sequence[int], subset_filter: Optional[Sequence[str]] = None
|
||||
self, idxs: Sequence[int]
|
||||
) -> List[Tuple[int, float]]:
|
||||
"""
|
||||
If the sequences in the dataset are videos rather than
|
||||
@@ -61,9 +251,7 @@ class DatasetBase(GenericWorkaround, torch.utils.data.Dataset[FrameData]):
|
||||
frames.
|
||||
|
||||
Args:
|
||||
idxs: frame index in self
|
||||
subset_filter: If given, an index in idxs is ignored if the
|
||||
corresponding frame is not in any of the named subsets.
|
||||
idx: frame index in self
|
||||
|
||||
Returns:
|
||||
tuple of
|
||||
@@ -103,7 +291,7 @@ class DatasetBase(GenericWorkaround, torch.utils.data.Dataset[FrameData]):
|
||||
return dict(c2seq)
|
||||
|
||||
def sequence_frames_in_order(
|
||||
self, seq_name: str, subset_filter: Optional[Sequence[str]] = None
|
||||
self, seq_name: str
|
||||
) -> Iterator[Tuple[float, int, int]]:
|
||||
"""Returns an iterator over the frame indices in a given sequence.
|
||||
We attempt to first sort by timestamp (if they are available),
|
||||
@@ -120,9 +308,7 @@ class DatasetBase(GenericWorkaround, torch.utils.data.Dataset[FrameData]):
|
||||
"""
|
||||
# pyre-ignore[16]
|
||||
seq_frame_indices = self._seq_to_idx[seq_name]
|
||||
nos_timestamps = self.get_frame_numbers_and_timestamps(
|
||||
seq_frame_indices, subset_filter
|
||||
)
|
||||
nos_timestamps = self.get_frame_numbers_and_timestamps(seq_frame_indices)
|
||||
|
||||
yield from sorted(
|
||||
[
|
||||
@@ -131,13 +317,11 @@ class DatasetBase(GenericWorkaround, torch.utils.data.Dataset[FrameData]):
|
||||
]
|
||||
)
|
||||
|
||||
def sequence_indices_in_order(
|
||||
self, seq_name: str, subset_filter: Optional[Sequence[str]] = None
|
||||
) -> Iterator[int]:
|
||||
def sequence_indices_in_order(self, seq_name: str) -> Iterator[int]:
|
||||
"""Same as `sequence_frames_in_order` but returns the iterator over
|
||||
only dataset indices.
|
||||
"""
|
||||
for _, _, idx in self.sequence_frames_in_order(seq_name, subset_filter):
|
||||
for _, _, idx in self.sequence_frames_in_order(seq_name):
|
||||
yield idx
|
||||
|
||||
# frame_data_type is the actual type of frames returned by the dataset.
|
||||
|
||||
@@ -95,7 +95,6 @@ class DatasetMapProviderBase(ReplaceableBase):
|
||||
|
||||
def get_all_train_cameras(self) -> Optional[CamerasBase]:
|
||||
"""
|
||||
DEPRECATED! The function will be removed in future versions.
|
||||
If the data is all for a single scene, returns a list
|
||||
of the known training cameras for that scene, which is
|
||||
used for evaluating the difficulty of the unknown
|
||||
|
||||
@@ -1,728 +0,0 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass, field, fields
|
||||
from typing import (
|
||||
Any,
|
||||
ClassVar,
|
||||
Generic,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.dataset import types
|
||||
from pytorch3d.implicitron.dataset.utils import (
|
||||
adjust_camera_to_bbox_crop_,
|
||||
adjust_camera_to_image_scale_,
|
||||
bbox_xyxy_to_xywh,
|
||||
clamp_box_to_image_bounds_and_round,
|
||||
crop_around_box,
|
||||
GenericWorkaround,
|
||||
get_bbox_from_mask,
|
||||
get_clamp_bbox,
|
||||
load_depth,
|
||||
load_depth_mask,
|
||||
load_image,
|
||||
load_mask,
|
||||
load_pointcloud,
|
||||
rescale_bbox,
|
||||
resize_image,
|
||||
safe_as_tensor,
|
||||
)
|
||||
from pytorch3d.implicitron.tools.config import registry, ReplaceableBase
|
||||
from pytorch3d.renderer.camera_utils import join_cameras_as_batch
|
||||
from pytorch3d.renderer.cameras import CamerasBase, PerspectiveCameras
|
||||
from pytorch3d.structures.pointclouds import join_pointclouds_as_batch, Pointclouds
|
||||
|
||||
|
||||
@dataclass
|
||||
class FrameData(Mapping[str, Any]):
|
||||
"""
|
||||
A type of the elements returned by indexing the dataset object.
|
||||
It can represent both individual frames and batches of thereof;
|
||||
in this documentation, the sizes of tensors refer to single frames;
|
||||
add the first batch dimension for the collation result.
|
||||
|
||||
Args:
|
||||
frame_number: The number of the frame within its sequence.
|
||||
0-based continuous integers.
|
||||
sequence_name: The unique name of the frame's sequence.
|
||||
sequence_category: The object category of the sequence.
|
||||
frame_timestamp: The time elapsed since the start of a sequence in sec.
|
||||
image_size_hw: The size of the original image in pixels; (height, width)
|
||||
tensor of shape (2,). Note that it is optional, e.g. it can be `None`
|
||||
if the frame annotation has no size ans image_rgb has not [yet] been
|
||||
loaded. Image-less FrameData is valid but mutators like crop/resize
|
||||
may fail if the original image size cannot be deduced.
|
||||
effective_image_size_hw: The size of the image after mutations such as
|
||||
crop/resize in pixels; (height, width). if the image has not been mutated,
|
||||
it is equal to `image_size_hw`. Note that it is also optional, for the
|
||||
same reason as `image_size_hw`.
|
||||
image_path: The qualified path to the loaded image (with dataset_root).
|
||||
image_rgb: A Tensor of shape `(3, H, W)` holding the RGB image
|
||||
of the frame; elements are floats in [0, 1].
|
||||
mask_crop: A binary mask of shape `(1, H, W)` denoting the valid image
|
||||
regions. Regions can be invalid (mask_crop[i,j]=0) in case they
|
||||
are a result of zero-padding of the image after cropping around
|
||||
the object bounding box; elements are floats in {0.0, 1.0}.
|
||||
depth_path: The qualified path to the frame's depth map.
|
||||
depth_map: A float Tensor of shape `(1, H, W)` holding the depth map
|
||||
of the frame; values correspond to distances from the camera;
|
||||
use `depth_mask` and `mask_crop` to filter for valid pixels.
|
||||
depth_mask: A binary mask of shape `(1, H, W)` denoting pixels of the
|
||||
depth map that are valid for evaluation, they have been checked for
|
||||
consistency across views; elements are floats in {0.0, 1.0}.
|
||||
mask_path: A qualified path to the foreground probability mask.
|
||||
fg_probability: A Tensor of `(1, H, W)` denoting the probability of the
|
||||
pixels belonging to the captured object; elements are floats
|
||||
in [0, 1].
|
||||
bbox_xywh: The bounding box tightly enclosing the foreground object in the
|
||||
format (x0, y0, width, height). The convention assumes that
|
||||
`x0+width` and `y0+height` includes the boundary of the box.
|
||||
I.e., to slice out the corresponding crop from an image tensor `I`
|
||||
we execute `crop = I[..., y0:y0+height, x0:x0+width]`
|
||||
crop_bbox_xywh: The bounding box denoting the boundaries of `image_rgb`
|
||||
in the original image coordinates in the format (x0, y0, width, height).
|
||||
The convention is the same as for `bbox_xywh`. `crop_bbox_xywh` differs
|
||||
from `bbox_xywh` due to padding (which can happen e.g. due to
|
||||
setting `JsonIndexDataset.box_crop_context > 0`)
|
||||
camera: A PyTorch3D camera object corresponding the frame's viewpoint,
|
||||
corrected for cropping if it happened.
|
||||
camera_quality_score: The score proportional to the confidence of the
|
||||
frame's camera estimation (the higher the more accurate).
|
||||
point_cloud_quality_score: The score proportional to the accuracy of the
|
||||
frame's sequence point cloud (the higher the more accurate).
|
||||
sequence_point_cloud_path: The path to the sequence's point cloud.
|
||||
sequence_point_cloud: A PyTorch3D Pointclouds object holding the
|
||||
point cloud corresponding to the frame's sequence. When the object
|
||||
represents a batch of frames, point clouds may be deduplicated;
|
||||
see `sequence_point_cloud_idx`.
|
||||
sequence_point_cloud_idx: Integer indices mapping frame indices to the
|
||||
corresponding point clouds in `sequence_point_cloud`; to get the
|
||||
corresponding point cloud to `image_rgb[i]`, use
|
||||
`sequence_point_cloud[sequence_point_cloud_idx[i]]`.
|
||||
frame_type: The type of the loaded frame specified in
|
||||
`subset_lists_file`, if provided.
|
||||
meta: A dict for storing additional frame information.
|
||||
"""
|
||||
|
||||
frame_number: Optional[torch.LongTensor]
|
||||
sequence_name: Union[str, List[str]]
|
||||
sequence_category: Union[str, List[str]]
|
||||
frame_timestamp: Optional[torch.Tensor] = None
|
||||
image_size_hw: Optional[torch.LongTensor] = None
|
||||
effective_image_size_hw: Optional[torch.LongTensor] = None
|
||||
image_path: Union[str, List[str], None] = None
|
||||
image_rgb: Optional[torch.Tensor] = None
|
||||
# masks out padding added due to cropping the square bit
|
||||
mask_crop: Optional[torch.Tensor] = None
|
||||
depth_path: Union[str, List[str], None] = None
|
||||
depth_map: Optional[torch.Tensor] = None
|
||||
depth_mask: Optional[torch.Tensor] = None
|
||||
mask_path: Union[str, List[str], None] = None
|
||||
fg_probability: Optional[torch.Tensor] = None
|
||||
bbox_xywh: Optional[torch.Tensor] = None
|
||||
crop_bbox_xywh: Optional[torch.Tensor] = None
|
||||
camera: Optional[PerspectiveCameras] = None
|
||||
camera_quality_score: Optional[torch.Tensor] = None
|
||||
point_cloud_quality_score: Optional[torch.Tensor] = None
|
||||
sequence_point_cloud_path: Union[str, List[str], None] = None
|
||||
sequence_point_cloud: Optional[Pointclouds] = None
|
||||
sequence_point_cloud_idx: Optional[torch.Tensor] = None
|
||||
frame_type: Union[str, List[str], None] = None # known | unseen
|
||||
meta: dict = field(default_factory=lambda: {})
|
||||
|
||||
# NOTE that batching resets this attribute
|
||||
_uncropped: bool = field(init=False, default=True)
|
||||
|
||||
def to(self, *args, **kwargs):
|
||||
new_params = {}
|
||||
for field_name in iter(self):
|
||||
value = getattr(self, field_name)
|
||||
if isinstance(value, (torch.Tensor, Pointclouds, CamerasBase)):
|
||||
new_params[field_name] = value.to(*args, **kwargs)
|
||||
else:
|
||||
new_params[field_name] = value
|
||||
frame_data = type(self)(**new_params)
|
||||
frame_data._uncropped = self._uncropped
|
||||
return frame_data
|
||||
|
||||
def cpu(self):
|
||||
return self.to(device=torch.device("cpu"))
|
||||
|
||||
def cuda(self):
|
||||
return self.to(device=torch.device("cuda"))
|
||||
|
||||
# the following functions make sure **frame_data can be passed to functions
|
||||
def __iter__(self):
|
||||
for f in fields(self):
|
||||
if f.name.startswith("_"):
|
||||
continue
|
||||
|
||||
yield f.name
|
||||
|
||||
def __getitem__(self, key):
|
||||
return getattr(self, key)
|
||||
|
||||
def __len__(self):
|
||||
return sum(1 for f in iter(self))
|
||||
|
||||
def crop_by_metadata_bbox_(
|
||||
self,
|
||||
box_crop_context: float,
|
||||
) -> None:
|
||||
"""Crops the frame data in-place by (possibly expanded) bounding box.
|
||||
The bounding box is taken from the object state (usually taken from
|
||||
the frame annotation or estimated from the foregroubnd mask).
|
||||
If the expanded bounding box does not fit the image, it is clamped,
|
||||
i.e. the image is *not* padded.
|
||||
|
||||
Args:
|
||||
box_crop_context: rate of expansion for bbox; 0 means no expansion,
|
||||
|
||||
Raises:
|
||||
ValueError: If the object does not contain a bounding box (usually when no
|
||||
mask annotation is provided)
|
||||
ValueError: If the frame data have been cropped or resized, thus the intrinsic
|
||||
bounding box is not valid for the current image size.
|
||||
ValueError: If the frame does not have an image size (usually a corner case
|
||||
when no image has been loaded)
|
||||
"""
|
||||
if self.bbox_xywh is None:
|
||||
raise ValueError("Attempted cropping by metadata with empty bounding box")
|
||||
|
||||
if not self._uncropped:
|
||||
raise ValueError(
|
||||
"Trying to apply the metadata bounding box to already cropped "
|
||||
"or resized image; coordinates have changed."
|
||||
)
|
||||
|
||||
self._crop_by_bbox_(
|
||||
box_crop_context,
|
||||
self.bbox_xywh,
|
||||
)
|
||||
|
||||
def crop_by_given_bbox_(
|
||||
self,
|
||||
box_crop_context: float,
|
||||
bbox_xywh: torch.Tensor,
|
||||
) -> None:
|
||||
"""Crops the frame data in-place by (possibly expanded) bounding box.
|
||||
If the expanded bounding box does not fit the image, it is clamped,
|
||||
i.e. the image is *not* padded.
|
||||
|
||||
Args:
|
||||
box_crop_context: rate of expansion for bbox; 0 means no expansion,
|
||||
bbox_xywh: bounding box in [x0, y0, width, height] format. If float
|
||||
tensor, values are floored (after converting to [x0, y0, x1, y1]).
|
||||
|
||||
Raises:
|
||||
ValueError: If the frame does not have an image size (usually a corner case
|
||||
when no image has been loaded)
|
||||
"""
|
||||
self._crop_by_bbox_(
|
||||
box_crop_context,
|
||||
bbox_xywh,
|
||||
)
|
||||
|
||||
def _crop_by_bbox_(
|
||||
self,
|
||||
box_crop_context: float,
|
||||
bbox_xywh: torch.Tensor,
|
||||
) -> None:
|
||||
"""Crops the frame data in-place by (possibly expanded) bounding box.
|
||||
If the expanded bounding box does not fit the image, it is clamped,
|
||||
i.e. the image is *not* padded.
|
||||
|
||||
Args:
|
||||
box_crop_context: rate of expansion for bbox; 0 means no expansion,
|
||||
bbox_xywh: bounding box in [x0, y0, width, height] format. If float
|
||||
tensor, values are floored (after converting to [x0, y0, x1, y1]).
|
||||
|
||||
Raises:
|
||||
ValueError: If the frame does not have an image size (usually a corner case
|
||||
when no image has been loaded)
|
||||
"""
|
||||
effective_image_size_hw = self.effective_image_size_hw
|
||||
if effective_image_size_hw is None:
|
||||
raise ValueError("Calling crop on image-less FrameData")
|
||||
|
||||
bbox_xyxy = get_clamp_bbox(
|
||||
bbox_xywh,
|
||||
image_path=self.image_path, # pyre-ignore
|
||||
box_crop_context=box_crop_context,
|
||||
)
|
||||
clamp_bbox_xyxy = clamp_box_to_image_bounds_and_round(
|
||||
bbox_xyxy,
|
||||
image_size_hw=tuple(self.effective_image_size_hw), # pyre-ignore
|
||||
)
|
||||
crop_bbox_xywh = bbox_xyxy_to_xywh(clamp_bbox_xyxy)
|
||||
|
||||
if self.fg_probability is not None:
|
||||
self.fg_probability = crop_around_box(
|
||||
self.fg_probability,
|
||||
clamp_bbox_xyxy,
|
||||
self.mask_path, # pyre-ignore
|
||||
)
|
||||
if self.image_rgb is not None:
|
||||
self.image_rgb = crop_around_box(
|
||||
self.image_rgb,
|
||||
clamp_bbox_xyxy,
|
||||
self.image_path, # pyre-ignore
|
||||
)
|
||||
|
||||
depth_map = self.depth_map
|
||||
if depth_map is not None:
|
||||
clamp_bbox_xyxy_depth = rescale_bbox(
|
||||
clamp_bbox_xyxy, tuple(depth_map.shape[-2:]), effective_image_size_hw
|
||||
).long()
|
||||
self.depth_map = crop_around_box(
|
||||
depth_map,
|
||||
clamp_bbox_xyxy_depth,
|
||||
self.depth_path, # pyre-ignore
|
||||
)
|
||||
|
||||
depth_mask = self.depth_mask
|
||||
if depth_mask is not None:
|
||||
clamp_bbox_xyxy_depth = rescale_bbox(
|
||||
clamp_bbox_xyxy, tuple(depth_mask.shape[-2:]), effective_image_size_hw
|
||||
).long()
|
||||
self.depth_mask = crop_around_box(
|
||||
depth_mask,
|
||||
clamp_bbox_xyxy_depth,
|
||||
self.mask_path, # pyre-ignore
|
||||
)
|
||||
|
||||
# changing principal_point according to bbox_crop
|
||||
if self.camera is not None:
|
||||
adjust_camera_to_bbox_crop_(
|
||||
camera=self.camera,
|
||||
image_size_wh=effective_image_size_hw.flip(dims=[-1]),
|
||||
clamp_bbox_xywh=crop_bbox_xywh,
|
||||
)
|
||||
|
||||
# pyre-ignore
|
||||
self.effective_image_size_hw = crop_bbox_xywh[..., 2:].flip(dims=[-1])
|
||||
self._uncropped = False
|
||||
|
||||
def resize_frame_(self, new_size_hw: torch.LongTensor) -> None:
|
||||
"""Resizes frame data in-place according to given dimensions.
|
||||
|
||||
Args:
|
||||
new_size_hw: target image size [height, width], a LongTensor of shape (2,)
|
||||
|
||||
Raises:
|
||||
ValueError: If the frame does not have an image size (usually a corner case
|
||||
when no image has been loaded)
|
||||
"""
|
||||
|
||||
effective_image_size_hw = self.effective_image_size_hw
|
||||
if effective_image_size_hw is None:
|
||||
raise ValueError("Calling resize on image-less FrameData")
|
||||
|
||||
image_height, image_width = new_size_hw.tolist()
|
||||
|
||||
if self.fg_probability is not None:
|
||||
self.fg_probability, _, _ = resize_image(
|
||||
self.fg_probability,
|
||||
image_height=image_height,
|
||||
image_width=image_width,
|
||||
mode="nearest",
|
||||
)
|
||||
|
||||
if self.image_rgb is not None:
|
||||
self.image_rgb, _, self.mask_crop = resize_image(
|
||||
self.image_rgb, image_height=image_height, image_width=image_width
|
||||
)
|
||||
|
||||
if self.depth_map is not None:
|
||||
self.depth_map, _, _ = resize_image(
|
||||
self.depth_map,
|
||||
image_height=image_height,
|
||||
image_width=image_width,
|
||||
mode="nearest",
|
||||
)
|
||||
|
||||
if self.depth_mask is not None:
|
||||
self.depth_mask, _, _ = resize_image(
|
||||
self.depth_mask,
|
||||
image_height=image_height,
|
||||
image_width=image_width,
|
||||
mode="nearest",
|
||||
)
|
||||
|
||||
if self.camera is not None:
|
||||
if self.image_size_hw is None:
|
||||
raise ValueError(
|
||||
"image_size_hw has to be defined for resizing FrameData with cameras."
|
||||
)
|
||||
adjust_camera_to_image_scale_(
|
||||
camera=self.camera,
|
||||
original_size_wh=effective_image_size_hw.flip(dims=[-1]),
|
||||
new_size_wh=new_size_hw.flip(dims=[-1]), # pyre-ignore
|
||||
)
|
||||
|
||||
self.effective_image_size_hw = new_size_hw
|
||||
self._uncropped = False
|
||||
|
||||
@classmethod
|
||||
def collate(cls, batch):
|
||||
"""
|
||||
Given a list objects `batch` of class `cls`, collates them into a batched
|
||||
representation suitable for processing with deep networks.
|
||||
"""
|
||||
|
||||
elem = batch[0]
|
||||
|
||||
if isinstance(elem, cls):
|
||||
pointcloud_ids = [id(el.sequence_point_cloud) for el in batch]
|
||||
id_to_idx = defaultdict(list)
|
||||
for i, pc_id in enumerate(pointcloud_ids):
|
||||
id_to_idx[pc_id].append(i)
|
||||
|
||||
sequence_point_cloud = []
|
||||
sequence_point_cloud_idx = -np.ones((len(batch),))
|
||||
for i, ind in enumerate(id_to_idx.values()):
|
||||
sequence_point_cloud_idx[ind] = i
|
||||
sequence_point_cloud.append(batch[ind[0]].sequence_point_cloud)
|
||||
assert (sequence_point_cloud_idx >= 0).all()
|
||||
|
||||
override_fields = {
|
||||
"sequence_point_cloud": sequence_point_cloud,
|
||||
"sequence_point_cloud_idx": sequence_point_cloud_idx.tolist(),
|
||||
}
|
||||
# note that the pre-collate value of sequence_point_cloud_idx is unused
|
||||
|
||||
collated = {}
|
||||
for f in fields(elem):
|
||||
if not f.init:
|
||||
continue
|
||||
|
||||
list_values = override_fields.get(
|
||||
f.name, [getattr(d, f.name) for d in batch]
|
||||
)
|
||||
collated[f.name] = (
|
||||
cls.collate(list_values)
|
||||
if all(list_value is not None for list_value in list_values)
|
||||
else None
|
||||
)
|
||||
return cls(**collated)
|
||||
|
||||
elif isinstance(elem, Pointclouds):
|
||||
return join_pointclouds_as_batch(batch)
|
||||
|
||||
elif isinstance(elem, CamerasBase):
|
||||
# TODO: don't store K; enforce working in NDC space
|
||||
return join_cameras_as_batch(batch)
|
||||
else:
|
||||
return torch.utils.data._utils.collate.default_collate(batch)
|
||||
|
||||
|
||||
FrameDataSubtype = TypeVar("FrameDataSubtype", bound=FrameData)
|
||||
|
||||
|
||||
class FrameDataBuilderBase(ReplaceableBase, Generic[FrameDataSubtype], ABC):
|
||||
"""A base class for FrameDataBuilders that build a FrameData object, load and
|
||||
process the binary data (crop and resize). Implementations should parametrize
|
||||
the class with a subtype of FrameData and set frame_data_type class variable to
|
||||
that type. They have to also implement `build` method.
|
||||
"""
|
||||
|
||||
# To be initialised to FrameDataSubtype
|
||||
frame_data_type: ClassVar[Type[FrameDataSubtype]]
|
||||
|
||||
@abstractmethod
|
||||
def build(
|
||||
self,
|
||||
frame_annotation: types.FrameAnnotation,
|
||||
sequence_annotation: types.SequenceAnnotation,
|
||||
) -> FrameDataSubtype:
|
||||
"""An abstract method to build the frame data based on raw frame/sequence
|
||||
annotations, load the binary data and adjust them according to the metadata.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class GenericFrameDataBuilder(FrameDataBuilderBase[FrameDataSubtype], ABC):
|
||||
"""
|
||||
A class to build a FrameData object, load and process the binary data (crop and
|
||||
resize). This is an abstract class for extending to build FrameData subtypes. Most
|
||||
users need to use concrete `FrameDataBuilder` class instead.
|
||||
Beware that modifications of frame data are done in-place.
|
||||
|
||||
Args:
|
||||
dataset_root: The root folder of the dataset; all the paths in jsons are
|
||||
specified relative to this root (but not json paths themselves).
|
||||
load_images: Enable loading the frame RGB data.
|
||||
load_depths: Enable loading the frame depth maps.
|
||||
load_depth_masks: Enable loading the frame depth map masks denoting the
|
||||
depth values used for evaluation (the points consistent across views).
|
||||
load_masks: Enable loading frame foreground masks.
|
||||
load_point_clouds: Enable loading sequence-level point clouds.
|
||||
max_points: Cap on the number of loaded points in the point cloud;
|
||||
if reached, they are randomly sampled without replacement.
|
||||
mask_images: Whether to mask the images with the loaded foreground masks;
|
||||
0 value is used for background.
|
||||
mask_depths: Whether to mask the depth maps with the loaded foreground
|
||||
masks; 0 value is used for background.
|
||||
image_height: The height of the returned images, masks, and depth maps;
|
||||
aspect ratio is preserved during cropping/resizing.
|
||||
image_width: The width of the returned images, masks, and depth maps;
|
||||
aspect ratio is preserved during cropping/resizing.
|
||||
box_crop: Enable cropping of the image around the bounding box inferred
|
||||
from the foreground region of the loaded segmentation mask; masks
|
||||
and depth maps are cropped accordingly; cameras are corrected.
|
||||
box_crop_mask_thr: The threshold used to separate pixels into foreground
|
||||
and background based on the foreground_probability mask; if no value
|
||||
is greater than this threshold, the loader lowers it and repeats.
|
||||
box_crop_context: The amount of additional padding added to each
|
||||
dimension of the cropping bounding box, relative to box size.
|
||||
path_manager: Optionally a PathManager for interpreting paths in a special way.
|
||||
"""
|
||||
|
||||
dataset_root: str = ""
|
||||
load_images: bool = True
|
||||
load_depths: bool = True
|
||||
load_depth_masks: bool = True
|
||||
load_masks: bool = True
|
||||
load_point_clouds: bool = False
|
||||
max_points: int = 0
|
||||
mask_images: bool = False
|
||||
mask_depths: bool = False
|
||||
image_height: Optional[int] = 800
|
||||
image_width: Optional[int] = 800
|
||||
box_crop: bool = True
|
||||
box_crop_mask_thr: float = 0.4
|
||||
box_crop_context: float = 0.3
|
||||
path_manager: Any = None
|
||||
|
||||
def build(
|
||||
self,
|
||||
frame_annotation: types.FrameAnnotation,
|
||||
sequence_annotation: types.SequenceAnnotation,
|
||||
load_blobs: bool = True,
|
||||
) -> FrameDataSubtype:
|
||||
"""Builds the frame data based on raw frame/sequence annotations, loads the
|
||||
binary data and adjust them according to the metadata. The processing includes:
|
||||
* if box_crop is set, the image/mask/depth are cropped with the bounding
|
||||
box provided or estimated from MaskAnnotation,
|
||||
* if image_height/image_width are set, the image/mask/depth are resized to
|
||||
fit that resolution. Note that the aspect ratio is preserved, and the
|
||||
(possibly cropped) image is pasted into the top-left corner. In the
|
||||
resulting frame_data, mask_crop field corresponds to the mask of the
|
||||
pasted image.
|
||||
|
||||
Args:
|
||||
frame_annotation: frame annotation
|
||||
sequence_annotation: sequence annotation
|
||||
load_blobs: if the function should attempt loading the image, depth map
|
||||
and mask, and foreground mask
|
||||
|
||||
Returns:
|
||||
The constructed FrameData object.
|
||||
"""
|
||||
|
||||
point_cloud = sequence_annotation.point_cloud
|
||||
|
||||
frame_data = self.frame_data_type(
|
||||
frame_number=safe_as_tensor(frame_annotation.frame_number, torch.long),
|
||||
frame_timestamp=safe_as_tensor(
|
||||
frame_annotation.frame_timestamp, torch.float
|
||||
),
|
||||
sequence_name=frame_annotation.sequence_name,
|
||||
sequence_category=sequence_annotation.category,
|
||||
camera_quality_score=safe_as_tensor(
|
||||
sequence_annotation.viewpoint_quality_score, torch.float
|
||||
),
|
||||
point_cloud_quality_score=safe_as_tensor(
|
||||
point_cloud.quality_score, torch.float
|
||||
)
|
||||
if point_cloud is not None
|
||||
else None,
|
||||
)
|
||||
|
||||
if load_blobs and self.load_masks and frame_annotation.mask is not None:
|
||||
(
|
||||
frame_data.fg_probability,
|
||||
frame_data.mask_path,
|
||||
frame_data.bbox_xywh,
|
||||
) = self._load_fg_probability(frame_annotation)
|
||||
|
||||
if frame_annotation.image is not None:
|
||||
image_size_hw = safe_as_tensor(frame_annotation.image.size, torch.long)
|
||||
frame_data.image_size_hw = image_size_hw # original image size
|
||||
# image size after crop/resize
|
||||
frame_data.effective_image_size_hw = image_size_hw
|
||||
|
||||
if load_blobs and self.load_images:
|
||||
(
|
||||
frame_data.image_rgb,
|
||||
frame_data.image_path,
|
||||
) = self._load_images(frame_annotation, frame_data.fg_probability)
|
||||
|
||||
if load_blobs and self.load_depths and frame_annotation.depth is not None:
|
||||
(
|
||||
frame_data.depth_map,
|
||||
frame_data.depth_path,
|
||||
frame_data.depth_mask,
|
||||
) = self._load_mask_depth(frame_annotation, frame_data.fg_probability)
|
||||
|
||||
if load_blobs and self.load_point_clouds and point_cloud is not None:
|
||||
pcl_path = self._fix_point_cloud_path(point_cloud.path)
|
||||
frame_data.sequence_point_cloud = load_pointcloud(
|
||||
self._local_path(pcl_path), max_points=self.max_points
|
||||
)
|
||||
frame_data.sequence_point_cloud_path = pcl_path
|
||||
|
||||
if frame_annotation.viewpoint is not None:
|
||||
frame_data.camera = self._get_pytorch3d_camera(frame_annotation)
|
||||
|
||||
if self.box_crop:
|
||||
frame_data.crop_by_metadata_bbox_(self.box_crop_context)
|
||||
|
||||
if self.image_height is not None and self.image_width is not None:
|
||||
new_size = (self.image_height, self.image_width)
|
||||
frame_data.resize_frame_(
|
||||
new_size_hw=torch.tensor(new_size, dtype=torch.long), # pyre-ignore
|
||||
)
|
||||
|
||||
return frame_data
|
||||
|
||||
def _load_fg_probability(
|
||||
self, entry: types.FrameAnnotation
|
||||
) -> Tuple[Optional[torch.Tensor], Optional[str], Optional[torch.Tensor]]:
|
||||
|
||||
full_path = os.path.join(self.dataset_root, entry.mask.path) # pyre-ignore
|
||||
fg_probability = load_mask(self._local_path(full_path))
|
||||
# we can use provided bbox_xywh or calculate it based on mask
|
||||
# saves time to skip bbox calculation
|
||||
# pyre-ignore
|
||||
bbox_xywh = entry.mask.bounding_box_xywh or get_bbox_from_mask(
|
||||
fg_probability, self.box_crop_mask_thr
|
||||
)
|
||||
if fg_probability.shape[-2:] != entry.image.size:
|
||||
raise ValueError(
|
||||
f"bad mask size: {fg_probability.shape[-2:]} vs {entry.image.size}!"
|
||||
)
|
||||
return (
|
||||
safe_as_tensor(fg_probability, torch.float),
|
||||
full_path,
|
||||
safe_as_tensor(bbox_xywh, torch.long),
|
||||
)
|
||||
|
||||
def _load_images(
|
||||
self,
|
||||
entry: types.FrameAnnotation,
|
||||
fg_probability: Optional[torch.Tensor],
|
||||
) -> Tuple[torch.Tensor, str]:
|
||||
assert self.dataset_root is not None and entry.image is not None
|
||||
path = os.path.join(self.dataset_root, entry.image.path)
|
||||
image_rgb = load_image(self._local_path(path))
|
||||
|
||||
if image_rgb.shape[-2:] != entry.image.size:
|
||||
raise ValueError(
|
||||
f"bad image size: {image_rgb.shape[-2:]} vs {entry.image.size}!"
|
||||
)
|
||||
|
||||
if self.mask_images:
|
||||
assert fg_probability is not None
|
||||
image_rgb *= fg_probability
|
||||
|
||||
return image_rgb, path
|
||||
|
||||
def _load_mask_depth(
|
||||
self,
|
||||
entry: types.FrameAnnotation,
|
||||
fg_probability: Optional[torch.Tensor],
|
||||
) -> Tuple[torch.Tensor, str, torch.Tensor]:
|
||||
entry_depth = entry.depth
|
||||
assert entry_depth is not None
|
||||
path = os.path.join(self.dataset_root, entry_depth.path)
|
||||
depth_map = load_depth(self._local_path(path), entry_depth.scale_adjustment)
|
||||
|
||||
if self.mask_depths:
|
||||
assert fg_probability is not None
|
||||
depth_map *= fg_probability
|
||||
|
||||
if self.load_depth_masks:
|
||||
assert entry_depth.mask_path is not None
|
||||
mask_path = os.path.join(self.dataset_root, entry_depth.mask_path)
|
||||
depth_mask = load_depth_mask(self._local_path(mask_path))
|
||||
else:
|
||||
depth_mask = torch.ones_like(depth_map)
|
||||
|
||||
return torch.tensor(depth_map), path, torch.tensor(depth_mask)
|
||||
|
||||
def _get_pytorch3d_camera(
|
||||
self,
|
||||
entry: types.FrameAnnotation,
|
||||
) -> PerspectiveCameras:
|
||||
entry_viewpoint = entry.viewpoint
|
||||
assert entry_viewpoint is not None
|
||||
# principal point and focal length
|
||||
principal_point = torch.tensor(
|
||||
entry_viewpoint.principal_point, dtype=torch.float
|
||||
)
|
||||
focal_length = torch.tensor(entry_viewpoint.focal_length, dtype=torch.float)
|
||||
|
||||
format = entry_viewpoint.intrinsics_format
|
||||
if entry_viewpoint.intrinsics_format == "ndc_norm_image_bounds":
|
||||
# legacy PyTorch3D NDC format
|
||||
# convert to pixels unequally and convert to ndc equally
|
||||
image_size_as_list = list(reversed(entry.image.size))
|
||||
image_size_wh = torch.tensor(image_size_as_list, dtype=torch.float)
|
||||
per_axis_scale = image_size_wh / image_size_wh.min()
|
||||
focal_length = focal_length * per_axis_scale
|
||||
principal_point = principal_point * per_axis_scale
|
||||
elif entry_viewpoint.intrinsics_format != "ndc_isotropic":
|
||||
raise ValueError(f"Unknown intrinsics format: {format}")
|
||||
|
||||
return PerspectiveCameras(
|
||||
focal_length=focal_length[None],
|
||||
principal_point=principal_point[None],
|
||||
R=torch.tensor(entry_viewpoint.R, dtype=torch.float)[None],
|
||||
T=torch.tensor(entry_viewpoint.T, dtype=torch.float)[None],
|
||||
)
|
||||
|
||||
def _fix_point_cloud_path(self, path: str) -> str:
|
||||
"""
|
||||
Fix up a point cloud path from the dataset.
|
||||
Some files in Co3Dv2 have an accidental absolute path stored.
|
||||
"""
|
||||
unwanted_prefix = (
|
||||
"/large_experiments/p3/replay/datasets/co3d/co3d45k_220512/export_v23/"
|
||||
)
|
||||
if path.startswith(unwanted_prefix):
|
||||
path = path[len(unwanted_prefix) :]
|
||||
return os.path.join(self.dataset_root, path)
|
||||
|
||||
def _local_path(self, path: str) -> str:
|
||||
if self.path_manager is None:
|
||||
return path
|
||||
return self.path_manager.get_local_path(path)
|
||||
|
||||
|
||||
@registry.register
|
||||
class FrameDataBuilder(GenericWorkaround, GenericFrameDataBuilder[FrameData]):
|
||||
"""
|
||||
A concrete class to build a FrameData object, load and process the binary data (crop
|
||||
and resize). Beware that modifications of frame data are done in-place. Please see
|
||||
the documentation for `GenericFrameDataBuilder` for the description of parameters
|
||||
and methods.
|
||||
"""
|
||||
|
||||
frame_data_type: ClassVar[Type[FrameData]] = FrameData
|
||||
@@ -15,6 +15,7 @@ import random
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
from itertools import islice
|
||||
from pathlib import Path
|
||||
from typing import (
|
||||
Any,
|
||||
ClassVar,
|
||||
@@ -29,16 +30,20 @@ from typing import (
|
||||
Union,
|
||||
)
|
||||
|
||||
from pytorch3d.implicitron.dataset import types
|
||||
from pytorch3d.implicitron.dataset.dataset_base import DatasetBase
|
||||
from pytorch3d.implicitron.dataset.frame_data import FrameData, FrameDataBuilder
|
||||
from pytorch3d.implicitron.dataset.utils import is_known_frame_scalar
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
from pytorch3d.implicitron.tools.config import registry, ReplaceableBase
|
||||
from pytorch3d.io import IO
|
||||
from pytorch3d.renderer.camera_utils import join_cameras_as_batch
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
|
||||
from pytorch3d.renderer.cameras import CamerasBase, PerspectiveCameras
|
||||
from pytorch3d.structures.pointclouds import Pointclouds
|
||||
from tqdm import tqdm
|
||||
|
||||
from . import types
|
||||
from .dataset_base import DatasetBase, FrameData
|
||||
from .utils import is_known_frame_scalar
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -60,7 +65,7 @@ class JsonIndexDataset(DatasetBase, ReplaceableBase):
|
||||
A dataset with annotations in json files like the Common Objects in 3D
|
||||
(CO3D) dataset.
|
||||
|
||||
Metadata-related args::
|
||||
Args:
|
||||
frame_annotations_file: A zipped json file containing metadata of the
|
||||
frames in the dataset, serialized List[types.FrameAnnotation].
|
||||
sequence_annotations_file: A zipped json file containing metadata of the
|
||||
@@ -78,24 +83,6 @@ class JsonIndexDataset(DatasetBase, ReplaceableBase):
|
||||
pick_sequence: A list of sequence names to restrict the dataset to.
|
||||
exclude_sequence: A list of the names of the sequences to exclude.
|
||||
limit_category_to: Restrict the dataset to the given list of categories.
|
||||
remove_empty_masks: Removes the frames with no active foreground pixels
|
||||
in the segmentation mask after thresholding (see box_crop_mask_thr).
|
||||
n_frames_per_sequence: If > 0, randomly samples #n_frames_per_sequence
|
||||
frames in each sequences uniformly without replacement if it has
|
||||
more frames than that; applied before other frame-level filters.
|
||||
seed: The seed of the random generator sampling #n_frames_per_sequence
|
||||
random frames per sequence.
|
||||
sort_frames: Enable frame annotations sorting to group frames from the
|
||||
same sequences together and order them by timestamps
|
||||
eval_batches: A list of batches that form the evaluation set;
|
||||
list of batch-sized lists of indices corresponding to __getitem__
|
||||
of this class, thus it can be used directly as a batch sampler.
|
||||
eval_batch_index:
|
||||
( Optional[List[List[Union[Tuple[str, int, str], Tuple[str, int]]]] )
|
||||
A list of batches of frames described as (sequence_name, frame_idx)
|
||||
that can form the evaluation set, `eval_batches` will be set from this.
|
||||
|
||||
Blob-loading parameters:
|
||||
dataset_root: The root folder of the dataset; all the paths in jsons are
|
||||
specified relative to this root (but not json paths themselves).
|
||||
load_images: Enable loading the frame RGB data.
|
||||
@@ -122,6 +109,23 @@ class JsonIndexDataset(DatasetBase, ReplaceableBase):
|
||||
is greater than this threshold, the loader lowers it and repeats.
|
||||
box_crop_context: The amount of additional padding added to each
|
||||
dimension of the cropping bounding box, relative to box size.
|
||||
remove_empty_masks: Removes the frames with no active foreground pixels
|
||||
in the segmentation mask after thresholding (see box_crop_mask_thr).
|
||||
n_frames_per_sequence: If > 0, randomly samples #n_frames_per_sequence
|
||||
frames in each sequences uniformly without replacement if it has
|
||||
more frames than that; applied before other frame-level filters.
|
||||
seed: The seed of the random generator sampling #n_frames_per_sequence
|
||||
random frames per sequence.
|
||||
sort_frames: Enable frame annotations sorting to group frames from the
|
||||
same sequences together and order them by timestamps
|
||||
eval_batches: A list of batches that form the evaluation set;
|
||||
list of batch-sized lists of indices corresponding to __getitem__
|
||||
of this class, thus it can be used directly as a batch sampler.
|
||||
eval_batch_index:
|
||||
( Optional[List[List[Union[Tuple[str, int, str], Tuple[str, int]]]] )
|
||||
A list of batches of frames described as (sequence_name, frame_idx)
|
||||
that can form the evaluation set, `eval_batches` will be set from this.
|
||||
|
||||
"""
|
||||
|
||||
frame_annotations_type: ClassVar[
|
||||
@@ -158,14 +162,12 @@ class JsonIndexDataset(DatasetBase, ReplaceableBase):
|
||||
sort_frames: bool = False
|
||||
eval_batches: Any = None
|
||||
eval_batch_index: Any = None
|
||||
# initialised in __post_init__
|
||||
# commented because of OmegaConf (for tests to pass)
|
||||
# _frame_data_builder: FrameDataBuilder = field(init=False)
|
||||
# frame_annots: List[FrameAnnotsEntry] = field(init=False)
|
||||
# seq_annots: Dict[str, types.SequenceAnnotation] = field(init=False)
|
||||
# _seq_to_idx: Dict[str, List[int]] = field(init=False)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
# pyre-fixme[16]: `JsonIndexDataset` has no attribute `subset_to_image_path`.
|
||||
self.subset_to_image_path = None
|
||||
self._load_frames()
|
||||
self._load_sequences()
|
||||
if self.sort_frames:
|
||||
@@ -173,27 +175,9 @@ class JsonIndexDataset(DatasetBase, ReplaceableBase):
|
||||
self._load_subset_lists()
|
||||
self._filter_db() # also computes sequence indices
|
||||
self._extract_and_set_eval_batches()
|
||||
|
||||
# pyre-ignore
|
||||
self._frame_data_builder = FrameDataBuilder(
|
||||
dataset_root=self.dataset_root,
|
||||
load_images=self.load_images,
|
||||
load_depths=self.load_depths,
|
||||
load_depth_masks=self.load_depth_masks,
|
||||
load_masks=self.load_masks,
|
||||
load_point_clouds=self.load_point_clouds,
|
||||
max_points=self.max_points,
|
||||
mask_images=self.mask_images,
|
||||
mask_depths=self.mask_depths,
|
||||
image_height=self.image_height,
|
||||
image_width=self.image_width,
|
||||
box_crop=self.box_crop,
|
||||
box_crop_mask_thr=self.box_crop_mask_thr,
|
||||
box_crop_context=self.box_crop_context,
|
||||
)
|
||||
logger.info(str(self))
|
||||
|
||||
def _extract_and_set_eval_batches(self) -> None:
|
||||
def _extract_and_set_eval_batches(self):
|
||||
"""
|
||||
Sets eval_batches based on input eval_batch_index.
|
||||
"""
|
||||
@@ -223,13 +207,13 @@ class JsonIndexDataset(DatasetBase, ReplaceableBase):
|
||||
# https://gist.github.com/treyhunner/f35292e676efa0be1728
|
||||
functools.reduce(
|
||||
lambda a, b: {**a, **b},
|
||||
# pyre-ignore[16]
|
||||
[d.seq_annots for d in other_datasets],
|
||||
[d.seq_annots for d in other_datasets], # pyre-ignore[16]
|
||||
)
|
||||
)
|
||||
all_eval_batches = [
|
||||
self.eval_batches,
|
||||
*[d.eval_batches for d in other_datasets], # pyre-ignore[16]
|
||||
# pyre-ignore
|
||||
*[d.eval_batches for d in other_datasets],
|
||||
]
|
||||
if not (
|
||||
all(ba is None for ba in all_eval_batches)
|
||||
@@ -267,7 +251,7 @@ class JsonIndexDataset(DatasetBase, ReplaceableBase):
|
||||
allow_missing_indices: bool = False,
|
||||
remove_missing_indices: bool = False,
|
||||
suppress_missing_index_warning: bool = True,
|
||||
) -> Union[List[List[Optional[int]]], List[List[int]]]:
|
||||
) -> List[List[Union[Optional[int], int]]]:
|
||||
"""
|
||||
Obtain indices into the dataset object given a list of frame ids.
|
||||
|
||||
@@ -339,7 +323,9 @@ class JsonIndexDataset(DatasetBase, ReplaceableBase):
|
||||
valid_dataset_idx = [
|
||||
[b for b in batch if b is not None] for batch in dataset_idx
|
||||
]
|
||||
return [batch for batch in valid_dataset_idx if len(batch) > 0]
|
||||
return [ # pyre-ignore[7]
|
||||
batch for batch in valid_dataset_idx if len(batch) > 0
|
||||
]
|
||||
|
||||
return dataset_idx
|
||||
|
||||
@@ -431,18 +417,255 @@ class JsonIndexDataset(DatasetBase, ReplaceableBase):
|
||||
raise IndexError(f"index {index} out of range {len(self.frame_annots)}")
|
||||
|
||||
entry = self.frame_annots[index]["frame_annotation"]
|
||||
|
||||
# pyre-ignore
|
||||
frame_data = self._frame_data_builder.build(
|
||||
entry,
|
||||
# pyre-ignore
|
||||
self.seq_annots[entry.sequence_name],
|
||||
# pyre-ignore[16]
|
||||
point_cloud = self.seq_annots[entry.sequence_name].point_cloud
|
||||
frame_data = FrameData(
|
||||
frame_number=_safe_as_tensor(entry.frame_number, torch.long),
|
||||
frame_timestamp=_safe_as_tensor(entry.frame_timestamp, torch.float),
|
||||
sequence_name=entry.sequence_name,
|
||||
sequence_category=self.seq_annots[entry.sequence_name].category,
|
||||
camera_quality_score=_safe_as_tensor(
|
||||
self.seq_annots[entry.sequence_name].viewpoint_quality_score,
|
||||
torch.float,
|
||||
),
|
||||
point_cloud_quality_score=_safe_as_tensor(
|
||||
point_cloud.quality_score, torch.float
|
||||
)
|
||||
if point_cloud is not None
|
||||
else None,
|
||||
)
|
||||
# Optional field
|
||||
|
||||
# The rest of the fields are optional
|
||||
frame_data.frame_type = self._get_frame_type(self.frame_annots[index])
|
||||
|
||||
(
|
||||
frame_data.fg_probability,
|
||||
frame_data.mask_path,
|
||||
frame_data.bbox_xywh,
|
||||
clamp_bbox_xyxy,
|
||||
frame_data.crop_bbox_xywh,
|
||||
) = self._load_crop_fg_probability(entry)
|
||||
|
||||
scale = 1.0
|
||||
if self.load_images and entry.image is not None:
|
||||
# original image size
|
||||
frame_data.image_size_hw = _safe_as_tensor(entry.image.size, torch.long)
|
||||
|
||||
(
|
||||
frame_data.image_rgb,
|
||||
frame_data.image_path,
|
||||
frame_data.mask_crop,
|
||||
scale,
|
||||
) = self._load_crop_images(
|
||||
entry, frame_data.fg_probability, clamp_bbox_xyxy
|
||||
)
|
||||
|
||||
if self.load_depths and entry.depth is not None:
|
||||
(
|
||||
frame_data.depth_map,
|
||||
frame_data.depth_path,
|
||||
frame_data.depth_mask,
|
||||
) = self._load_mask_depth(entry, clamp_bbox_xyxy, frame_data.fg_probability)
|
||||
|
||||
if entry.viewpoint is not None:
|
||||
frame_data.camera = self._get_pytorch3d_camera(
|
||||
entry,
|
||||
scale,
|
||||
clamp_bbox_xyxy,
|
||||
)
|
||||
|
||||
if self.load_point_clouds and point_cloud is not None:
|
||||
pcl_path = self._fix_point_cloud_path(point_cloud.path)
|
||||
frame_data.sequence_point_cloud = _load_pointcloud(
|
||||
self._local_path(pcl_path), max_points=self.max_points
|
||||
)
|
||||
frame_data.sequence_point_cloud_path = pcl_path
|
||||
|
||||
return frame_data
|
||||
|
||||
def _fix_point_cloud_path(self, path: str) -> str:
|
||||
"""
|
||||
Fix up a point cloud path from the dataset.
|
||||
Some files in Co3Dv2 have an accidental absolute path stored.
|
||||
"""
|
||||
unwanted_prefix = (
|
||||
"/large_experiments/p3/replay/datasets/co3d/co3d45k_220512/export_v23/"
|
||||
)
|
||||
if path.startswith(unwanted_prefix):
|
||||
path = path[len(unwanted_prefix) :]
|
||||
return os.path.join(self.dataset_root, path)
|
||||
|
||||
def _load_crop_fg_probability(
|
||||
self, entry: types.FrameAnnotation
|
||||
) -> Tuple[
|
||||
Optional[torch.Tensor],
|
||||
Optional[str],
|
||||
Optional[torch.Tensor],
|
||||
Optional[torch.Tensor],
|
||||
Optional[torch.Tensor],
|
||||
]:
|
||||
fg_probability = None
|
||||
full_path = None
|
||||
bbox_xywh = None
|
||||
clamp_bbox_xyxy = None
|
||||
crop_box_xywh = None
|
||||
|
||||
if (self.load_masks or self.box_crop) and entry.mask is not None:
|
||||
full_path = os.path.join(self.dataset_root, entry.mask.path)
|
||||
mask = _load_mask(self._local_path(full_path))
|
||||
|
||||
if mask.shape[-2:] != entry.image.size:
|
||||
raise ValueError(
|
||||
f"bad mask size: {mask.shape[-2:]} vs {entry.image.size}!"
|
||||
)
|
||||
|
||||
bbox_xywh = torch.tensor(_get_bbox_from_mask(mask, self.box_crop_mask_thr))
|
||||
|
||||
if self.box_crop:
|
||||
clamp_bbox_xyxy = _clamp_box_to_image_bounds_and_round(
|
||||
_get_clamp_bbox(
|
||||
bbox_xywh,
|
||||
image_path=entry.image.path,
|
||||
box_crop_context=self.box_crop_context,
|
||||
),
|
||||
image_size_hw=tuple(mask.shape[-2:]),
|
||||
)
|
||||
crop_box_xywh = _bbox_xyxy_to_xywh(clamp_bbox_xyxy)
|
||||
|
||||
mask = _crop_around_box(mask, clamp_bbox_xyxy, full_path)
|
||||
|
||||
fg_probability, _, _ = self._resize_image(mask, mode="nearest")
|
||||
|
||||
return fg_probability, full_path, bbox_xywh, clamp_bbox_xyxy, crop_box_xywh
|
||||
|
||||
def _load_crop_images(
|
||||
self,
|
||||
entry: types.FrameAnnotation,
|
||||
fg_probability: Optional[torch.Tensor],
|
||||
clamp_bbox_xyxy: Optional[torch.Tensor],
|
||||
) -> Tuple[torch.Tensor, str, torch.Tensor, float]:
|
||||
assert self.dataset_root is not None and entry.image is not None
|
||||
path = os.path.join(self.dataset_root, entry.image.path)
|
||||
image_rgb = _load_image(self._local_path(path))
|
||||
|
||||
if image_rgb.shape[-2:] != entry.image.size:
|
||||
raise ValueError(
|
||||
f"bad image size: {image_rgb.shape[-2:]} vs {entry.image.size}!"
|
||||
)
|
||||
|
||||
if self.box_crop:
|
||||
assert clamp_bbox_xyxy is not None
|
||||
image_rgb = _crop_around_box(image_rgb, clamp_bbox_xyxy, path)
|
||||
|
||||
image_rgb, scale, mask_crop = self._resize_image(image_rgb)
|
||||
|
||||
if self.mask_images:
|
||||
assert fg_probability is not None
|
||||
image_rgb *= fg_probability
|
||||
|
||||
return image_rgb, path, mask_crop, scale
|
||||
|
||||
def _load_mask_depth(
|
||||
self,
|
||||
entry: types.FrameAnnotation,
|
||||
clamp_bbox_xyxy: Optional[torch.Tensor],
|
||||
fg_probability: Optional[torch.Tensor],
|
||||
) -> Tuple[torch.Tensor, str, torch.Tensor]:
|
||||
entry_depth = entry.depth
|
||||
assert entry_depth is not None
|
||||
path = os.path.join(self.dataset_root, entry_depth.path)
|
||||
depth_map = _load_depth(self._local_path(path), entry_depth.scale_adjustment)
|
||||
|
||||
if self.box_crop:
|
||||
assert clamp_bbox_xyxy is not None
|
||||
depth_bbox_xyxy = _rescale_bbox(
|
||||
clamp_bbox_xyxy, entry.image.size, depth_map.shape[-2:]
|
||||
)
|
||||
depth_map = _crop_around_box(depth_map, depth_bbox_xyxy, path)
|
||||
|
||||
depth_map, _, _ = self._resize_image(depth_map, mode="nearest")
|
||||
|
||||
if self.mask_depths:
|
||||
assert fg_probability is not None
|
||||
depth_map *= fg_probability
|
||||
|
||||
if self.load_depth_masks:
|
||||
assert entry_depth.mask_path is not None
|
||||
mask_path = os.path.join(self.dataset_root, entry_depth.mask_path)
|
||||
depth_mask = _load_depth_mask(self._local_path(mask_path))
|
||||
|
||||
if self.box_crop:
|
||||
assert clamp_bbox_xyxy is not None
|
||||
depth_mask_bbox_xyxy = _rescale_bbox(
|
||||
clamp_bbox_xyxy, entry.image.size, depth_mask.shape[-2:]
|
||||
)
|
||||
depth_mask = _crop_around_box(
|
||||
depth_mask, depth_mask_bbox_xyxy, mask_path
|
||||
)
|
||||
|
||||
depth_mask, _, _ = self._resize_image(depth_mask, mode="nearest")
|
||||
else:
|
||||
depth_mask = torch.ones_like(depth_map)
|
||||
|
||||
return depth_map, path, depth_mask
|
||||
|
||||
def _get_pytorch3d_camera(
|
||||
self,
|
||||
entry: types.FrameAnnotation,
|
||||
scale: float,
|
||||
clamp_bbox_xyxy: Optional[torch.Tensor],
|
||||
) -> PerspectiveCameras:
|
||||
entry_viewpoint = entry.viewpoint
|
||||
assert entry_viewpoint is not None
|
||||
# principal point and focal length
|
||||
principal_point = torch.tensor(
|
||||
entry_viewpoint.principal_point, dtype=torch.float
|
||||
)
|
||||
focal_length = torch.tensor(entry_viewpoint.focal_length, dtype=torch.float)
|
||||
|
||||
half_image_size_wh_orig = (
|
||||
torch.tensor(list(reversed(entry.image.size)), dtype=torch.float) / 2.0
|
||||
)
|
||||
|
||||
# first, we convert from the dataset's NDC convention to pixels
|
||||
format = entry_viewpoint.intrinsics_format
|
||||
if format.lower() == "ndc_norm_image_bounds":
|
||||
# this is e.g. currently used in CO3D for storing intrinsics
|
||||
rescale = half_image_size_wh_orig
|
||||
elif format.lower() == "ndc_isotropic":
|
||||
rescale = half_image_size_wh_orig.min()
|
||||
else:
|
||||
raise ValueError(f"Unknown intrinsics format: {format}")
|
||||
|
||||
# principal point and focal length in pixels
|
||||
principal_point_px = half_image_size_wh_orig - principal_point * rescale
|
||||
focal_length_px = focal_length * rescale
|
||||
if self.box_crop:
|
||||
assert clamp_bbox_xyxy is not None
|
||||
principal_point_px -= clamp_bbox_xyxy[:2]
|
||||
|
||||
# now, convert from pixels to PyTorch3D v0.5+ NDC convention
|
||||
if self.image_height is None or self.image_width is None:
|
||||
out_size = list(reversed(entry.image.size))
|
||||
else:
|
||||
out_size = [self.image_width, self.image_height]
|
||||
|
||||
half_image_size_output = torch.tensor(out_size, dtype=torch.float) / 2.0
|
||||
half_min_image_size_output = half_image_size_output.min()
|
||||
|
||||
# rescaled principal point and focal length in ndc
|
||||
principal_point = (
|
||||
half_image_size_output - principal_point_px * scale
|
||||
) / half_min_image_size_output
|
||||
focal_length = focal_length_px * scale / half_min_image_size_output
|
||||
|
||||
return PerspectiveCameras(
|
||||
focal_length=focal_length[None],
|
||||
principal_point=principal_point[None],
|
||||
R=torch.tensor(entry_viewpoint.R, dtype=torch.float)[None],
|
||||
T=torch.tensor(entry_viewpoint.T, dtype=torch.float)[None],
|
||||
)
|
||||
|
||||
def _load_frames(self) -> None:
|
||||
logger.info(f"Loading Co3D frames from {self.frame_annotations_file}.")
|
||||
local_file = self._local_path(self.frame_annotations_file)
|
||||
@@ -630,23 +853,46 @@ class JsonIndexDataset(DatasetBase, ReplaceableBase):
|
||||
# pyre-ignore[16]
|
||||
self._seq_to_idx = seq_to_idx
|
||||
|
||||
def _resize_image(
|
||||
self, image, mode="bilinear"
|
||||
) -> Tuple[torch.Tensor, float, torch.Tensor]:
|
||||
image_height, image_width = self.image_height, self.image_width
|
||||
if image_height is None or image_width is None:
|
||||
# skip the resizing
|
||||
imre_ = torch.from_numpy(image)
|
||||
return imre_, 1.0, torch.ones_like(imre_[:1])
|
||||
# takes numpy array, returns pytorch tensor
|
||||
minscale = min(
|
||||
image_height / image.shape[-2],
|
||||
image_width / image.shape[-1],
|
||||
)
|
||||
imre = torch.nn.functional.interpolate(
|
||||
torch.from_numpy(image)[None],
|
||||
scale_factor=minscale,
|
||||
mode=mode,
|
||||
align_corners=False if mode == "bilinear" else None,
|
||||
recompute_scale_factor=True,
|
||||
)[0]
|
||||
# pyre-fixme[19]: Expected 1 positional argument.
|
||||
imre_ = torch.zeros(image.shape[0], self.image_height, self.image_width)
|
||||
imre_[:, 0 : imre.shape[1], 0 : imre.shape[2]] = imre
|
||||
# pyre-fixme[6]: For 2nd param expected `int` but got `Optional[int]`.
|
||||
# pyre-fixme[6]: For 3rd param expected `int` but got `Optional[int]`.
|
||||
mask = torch.zeros(1, self.image_height, self.image_width)
|
||||
mask[:, 0 : imre.shape[1], 0 : imre.shape[2]] = 1.0
|
||||
return imre_, minscale, mask
|
||||
|
||||
def _local_path(self, path: str) -> str:
|
||||
if self.path_manager is None:
|
||||
return path
|
||||
return self.path_manager.get_local_path(path)
|
||||
|
||||
def get_frame_numbers_and_timestamps(
|
||||
self, idxs: Sequence[int], subset_filter: Optional[Sequence[str]] = None
|
||||
self, idxs: Sequence[int]
|
||||
) -> List[Tuple[int, float]]:
|
||||
out: List[Tuple[int, float]] = []
|
||||
for idx in idxs:
|
||||
if (
|
||||
subset_filter is not None
|
||||
# pyre-fixme[16]: `JsonIndexDataset` has no attribute `frame_annots`.
|
||||
and self.frame_annots[idx]["subset"] not in subset_filter
|
||||
):
|
||||
continue
|
||||
|
||||
# pyre-ignore[16]
|
||||
frame_annotation = self.frame_annots[idx]["frame_annotation"]
|
||||
out.append(
|
||||
(frame_annotation.frame_number, frame_annotation.frame_timestamp)
|
||||
@@ -666,3 +912,169 @@ class JsonIndexDataset(DatasetBase, ReplaceableBase):
|
||||
|
||||
def _seq_name_to_seed(seq_name) -> int:
|
||||
return int(hashlib.sha1(seq_name.encode("utf-8")).hexdigest(), 16)
|
||||
|
||||
|
||||
def _load_image(path) -> np.ndarray:
|
||||
with Image.open(path) as pil_im:
|
||||
im = np.array(pil_im.convert("RGB"))
|
||||
im = im.transpose((2, 0, 1))
|
||||
im = im.astype(np.float32) / 255.0
|
||||
return im
|
||||
|
||||
|
||||
def _load_16big_png_depth(depth_png) -> np.ndarray:
|
||||
with Image.open(depth_png) as depth_pil:
|
||||
# the image is stored with 16-bit depth but PIL reads it as I (32 bit).
|
||||
# we cast it to uint16, then reinterpret as float16, then cast to float32
|
||||
depth = (
|
||||
np.frombuffer(np.array(depth_pil, dtype=np.uint16), dtype=np.float16)
|
||||
.astype(np.float32)
|
||||
.reshape((depth_pil.size[1], depth_pil.size[0]))
|
||||
)
|
||||
return depth
|
||||
|
||||
|
||||
def _load_1bit_png_mask(file: str) -> np.ndarray:
|
||||
with Image.open(file) as pil_im:
|
||||
mask = (np.array(pil_im.convert("L")) > 0.0).astype(np.float32)
|
||||
return mask
|
||||
|
||||
|
||||
def _load_depth_mask(path: str) -> np.ndarray:
|
||||
if not path.lower().endswith(".png"):
|
||||
raise ValueError('unsupported depth mask file name "%s"' % path)
|
||||
m = _load_1bit_png_mask(path)
|
||||
return m[None] # fake feature channel
|
||||
|
||||
|
||||
def _load_depth(path, scale_adjustment) -> np.ndarray:
|
||||
if not path.lower().endswith(".png"):
|
||||
raise ValueError('unsupported depth file name "%s"' % path)
|
||||
|
||||
d = _load_16big_png_depth(path) * scale_adjustment
|
||||
d[~np.isfinite(d)] = 0.0
|
||||
return d[None] # fake feature channel
|
||||
|
||||
|
||||
def _load_mask(path) -> np.ndarray:
|
||||
with Image.open(path) as pil_im:
|
||||
mask = np.array(pil_im)
|
||||
mask = mask.astype(np.float32) / 255.0
|
||||
return mask[None] # fake feature channel
|
||||
|
||||
|
||||
def _get_1d_bounds(arr) -> Tuple[int, int]:
|
||||
nz = np.flatnonzero(arr)
|
||||
return nz[0], nz[-1] + 1
|
||||
|
||||
|
||||
def _get_bbox_from_mask(
|
||||
mask, thr, decrease_quant: float = 0.05
|
||||
) -> Tuple[int, int, int, int]:
|
||||
# bbox in xywh
|
||||
masks_for_box = np.zeros_like(mask)
|
||||
while masks_for_box.sum() <= 1.0:
|
||||
masks_for_box = (mask > thr).astype(np.float32)
|
||||
thr -= decrease_quant
|
||||
if thr <= 0.0:
|
||||
warnings.warn(f"Empty masks_for_bbox (thr={thr}) => using full image.")
|
||||
|
||||
x0, x1 = _get_1d_bounds(masks_for_box.sum(axis=-2))
|
||||
y0, y1 = _get_1d_bounds(masks_for_box.sum(axis=-1))
|
||||
|
||||
return x0, y0, x1 - x0, y1 - y0
|
||||
|
||||
|
||||
def _get_clamp_bbox(
|
||||
bbox: torch.Tensor,
|
||||
box_crop_context: float = 0.0,
|
||||
image_path: str = "",
|
||||
) -> torch.Tensor:
|
||||
# box_crop_context: rate of expansion for bbox
|
||||
# returns possibly expanded bbox xyxy as float
|
||||
|
||||
bbox = bbox.clone() # do not edit bbox in place
|
||||
|
||||
# increase box size
|
||||
if box_crop_context > 0.0:
|
||||
c = box_crop_context
|
||||
bbox = bbox.float()
|
||||
bbox[0] -= bbox[2] * c / 2
|
||||
bbox[1] -= bbox[3] * c / 2
|
||||
bbox[2] += bbox[2] * c
|
||||
bbox[3] += bbox[3] * c
|
||||
|
||||
if (bbox[2:] <= 1.0).any():
|
||||
raise ValueError(
|
||||
f"squashed image {image_path}!! The bounding box contains no pixels."
|
||||
)
|
||||
|
||||
bbox[2:] = torch.clamp(bbox[2:], 2) # set min height, width to 2 along both axes
|
||||
bbox_xyxy = _bbox_xywh_to_xyxy(bbox, clamp_size=2)
|
||||
|
||||
return bbox_xyxy
|
||||
|
||||
|
||||
def _crop_around_box(tensor, bbox, impath: str = ""):
|
||||
# bbox is xyxy, where the upper bound is corrected with +1
|
||||
bbox = _clamp_box_to_image_bounds_and_round(
|
||||
bbox,
|
||||
image_size_hw=tensor.shape[-2:],
|
||||
)
|
||||
tensor = tensor[..., bbox[1] : bbox[3], bbox[0] : bbox[2]]
|
||||
assert all(c > 0 for c in tensor.shape), f"squashed image {impath}"
|
||||
return tensor
|
||||
|
||||
|
||||
def _clamp_box_to_image_bounds_and_round(
|
||||
bbox_xyxy: torch.Tensor,
|
||||
image_size_hw: Tuple[int, int],
|
||||
) -> torch.LongTensor:
|
||||
bbox_xyxy = bbox_xyxy.clone()
|
||||
bbox_xyxy[[0, 2]] = torch.clamp(bbox_xyxy[[0, 2]], 0, image_size_hw[-1])
|
||||
bbox_xyxy[[1, 3]] = torch.clamp(bbox_xyxy[[1, 3]], 0, image_size_hw[-2])
|
||||
if not isinstance(bbox_xyxy, torch.LongTensor):
|
||||
bbox_xyxy = bbox_xyxy.round().long()
|
||||
return bbox_xyxy # pyre-ignore [7]
|
||||
|
||||
|
||||
def _rescale_bbox(bbox: torch.Tensor, orig_res, new_res) -> torch.Tensor:
|
||||
assert bbox is not None
|
||||
assert np.prod(orig_res) > 1e-8
|
||||
# average ratio of dimensions
|
||||
rel_size = (new_res[0] / orig_res[0] + new_res[1] / orig_res[1]) / 2.0
|
||||
return bbox * rel_size
|
||||
|
||||
|
||||
def _bbox_xyxy_to_xywh(xyxy: torch.Tensor) -> torch.Tensor:
|
||||
wh = xyxy[2:] - xyxy[:2]
|
||||
xywh = torch.cat([xyxy[:2], wh])
|
||||
return xywh
|
||||
|
||||
|
||||
def _bbox_xywh_to_xyxy(
|
||||
xywh: torch.Tensor, clamp_size: Optional[int] = None
|
||||
) -> torch.Tensor:
|
||||
xyxy = xywh.clone()
|
||||
if clamp_size is not None:
|
||||
xyxy[2:] = torch.clamp(xyxy[2:], clamp_size)
|
||||
xyxy[2:] += xyxy[:2]
|
||||
return xyxy
|
||||
|
||||
|
||||
def _safe_as_tensor(data, dtype):
|
||||
if data is None:
|
||||
return None
|
||||
return torch.tensor(data, dtype=dtype)
|
||||
|
||||
|
||||
# NOTE this cache is per-worker; they are implemented as processes.
|
||||
# each batch is loaded and collated by a single worker;
|
||||
# since sequences tend to co-occur within batches, this is useful.
|
||||
@functools.lru_cache(maxsize=256)
|
||||
def _load_pointcloud(pcl_path: Union[str, Path], max_points: int = 0) -> Pointclouds:
|
||||
pcl = IO().load_pointcloud(pcl_path)
|
||||
if max_points > 0:
|
||||
pcl = pcl.subsample(max_points)
|
||||
|
||||
return pcl
|
||||
|
||||
@@ -49,7 +49,7 @@ class RenderedMeshDatasetMapProvider(DatasetMapProviderBase): # pyre-ignore [13
|
||||
if one is available, the data it produces is on the CPU just like
|
||||
the data returned by implicitron's other dataset map providers.
|
||||
This is because both datasets and models can be large, so implicitron's
|
||||
training loop expects data on the CPU and only moves
|
||||
GenericModel.forward (etc) expects data on the CPU and only moves
|
||||
what it needs to the device.
|
||||
|
||||
For a more detailed explanation of this code, please refer to the
|
||||
@@ -61,23 +61,16 @@ class RenderedMeshDatasetMapProvider(DatasetMapProviderBase): # pyre-ignore [13
|
||||
the cow mesh in the same repo as this code.
|
||||
azimuth_range: number of degrees on each side of the start position to
|
||||
take samples
|
||||
distance: distance from camera centres to the origin.
|
||||
resolution: the common height and width of the output images.
|
||||
use_point_light: whether to use a particular point light as opposed
|
||||
to ambient white.
|
||||
gpu_idx: which gpu to use for rendering the mesh.
|
||||
path_manager_factory: (Optional) An object that generates an instance of
|
||||
PathManager that can translate provided file paths.
|
||||
path_manager_factory_class_type: The class type of `path_manager_factory`.
|
||||
"""
|
||||
|
||||
num_views: int = 40
|
||||
data_file: Optional[str] = None
|
||||
azimuth_range: float = 180
|
||||
distance: float = 2.7
|
||||
resolution: int = 128
|
||||
use_point_light: bool = True
|
||||
gpu_idx: Optional[int] = 0
|
||||
path_manager_factory: PathManagerFactory
|
||||
path_manager_factory_class_type: str = "PathManagerFactory"
|
||||
|
||||
@@ -92,8 +85,8 @@ class RenderedMeshDatasetMapProvider(DatasetMapProviderBase): # pyre-ignore [13
|
||||
def __post_init__(self) -> None:
|
||||
super().__init__()
|
||||
run_auto_creation(self)
|
||||
if torch.cuda.is_available() and self.gpu_idx is not None:
|
||||
device = torch.device(f"cuda:{self.gpu_idx}")
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device("cuda:0")
|
||||
else:
|
||||
device = torch.device("cpu")
|
||||
if self.data_file is None:
|
||||
@@ -113,13 +106,13 @@ class RenderedMeshDatasetMapProvider(DatasetMapProviderBase): # pyre-ignore [13
|
||||
num_views=self.num_views,
|
||||
mesh=mesh,
|
||||
azimuth_range=self.azimuth_range,
|
||||
distance=self.distance,
|
||||
resolution=self.resolution,
|
||||
device=device,
|
||||
use_point_light=self.use_point_light,
|
||||
)
|
||||
# pyre-ignore[16]
|
||||
self.poses = poses.cpu()
|
||||
expand_args_fields(SingleSceneDataset)
|
||||
# pyre-ignore[16]
|
||||
self.train_dataset = SingleSceneDataset( # pyre-ignore[28]
|
||||
object_name="cow",
|
||||
@@ -137,7 +130,6 @@ def _generate_cow_renders(
|
||||
num_views: int,
|
||||
mesh: Meshes,
|
||||
azimuth_range: float,
|
||||
distance: float,
|
||||
resolution: int,
|
||||
device: torch.device,
|
||||
use_point_light: bool,
|
||||
@@ -176,11 +168,11 @@ def _generate_cow_renders(
|
||||
else:
|
||||
lights = AmbientLights(device=device)
|
||||
|
||||
# Initialize a perspective camera that represents a batch of different
|
||||
# Initialize an OpenGL perspective camera that represents a batch of different
|
||||
# viewing angles. All the cameras helper methods support mixed type inputs and
|
||||
# broadcasting. So we can view the camera from a fixed distance, and
|
||||
# broadcasting. So we can view the camera from the a distance of dist=2.7, and
|
||||
# then specify elevation and azimuth angles for each viewpoint as tensors.
|
||||
R, T = look_at_view_transform(dist=distance, elev=elev, azim=azim)
|
||||
R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)
|
||||
cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
|
||||
|
||||
# Define the settings for rasterization and shading.
|
||||
|
||||
@@ -20,9 +20,8 @@ from pytorch3d.implicitron.tools.config import (
|
||||
)
|
||||
from pytorch3d.renderer import CamerasBase, join_cameras_as_batch, PerspectiveCameras
|
||||
|
||||
from .dataset_base import DatasetBase
|
||||
from .dataset_base import DatasetBase, FrameData
|
||||
from .dataset_map_provider import DatasetMap, DatasetMapProviderBase, PathManagerFactory
|
||||
from .frame_data import FrameData
|
||||
from .utils import DATASET_TYPE_KNOWN, DATASET_TYPE_UNKNOWN
|
||||
|
||||
_SINGLE_SEQUENCE_NAME: str = "one_sequence"
|
||||
@@ -47,8 +46,6 @@ class SingleSceneDataset(DatasetBase, Configurable):
|
||||
def __len__(self) -> int:
|
||||
return len(self.poses)
|
||||
|
||||
# pyre-fixme[14]: `sequence_frames_in_order` overrides method defined in
|
||||
# `DatasetBase` inconsistently.
|
||||
def sequence_frames_in_order(
|
||||
self, seq_name: str
|
||||
) -> Iterator[Tuple[float, int, int]]:
|
||||
@@ -70,8 +67,7 @@ class SingleSceneDataset(DatasetBase, Configurable):
|
||||
sequence_name=_SINGLE_SEQUENCE_NAME,
|
||||
sequence_category=self.object_name,
|
||||
camera=pose,
|
||||
# pyre-ignore
|
||||
image_size_hw=torch.tensor(image.shape[1:], dtype=torch.long),
|
||||
image_size_hw=torch.tensor(image.shape[1:]),
|
||||
image_rgb=image,
|
||||
fg_probability=fg_probability,
|
||||
frame_type=frame_type,
|
||||
|
||||
@@ -9,21 +9,10 @@ import dataclasses
|
||||
import gzip
|
||||
import json
|
||||
from dataclasses import dataclass, Field, MISSING
|
||||
from typing import (
|
||||
Any,
|
||||
cast,
|
||||
Dict,
|
||||
get_args,
|
||||
get_origin,
|
||||
IO,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
from typing import Any, cast, Dict, IO, Optional, Tuple, Type, TypeVar, Union
|
||||
|
||||
import numpy as np
|
||||
from pytorch3d.common.datatypes import get_args, get_origin
|
||||
|
||||
|
||||
_X = TypeVar("_X")
|
||||
@@ -55,8 +44,6 @@ class MaskAnnotation:
|
||||
path: str
|
||||
# (soft) number of pixels in the mask; sum(Prob(fg | pixel))
|
||||
mass: Optional[float] = None
|
||||
# tight bounding box around the foreground mask
|
||||
bounding_box_xywh: Optional[Tuple[float, float, float, float]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -5,18 +5,10 @@
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
|
||||
import functools
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple, TypeVar, Union
|
||||
from typing import List, Optional
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
|
||||
from pytorch3d.io import IO
|
||||
from pytorch3d.renderer.cameras import PerspectiveCameras
|
||||
from pytorch3d.structures.pointclouds import Pointclouds
|
||||
|
||||
DATASET_TYPE_TRAIN = "train"
|
||||
DATASET_TYPE_TEST = "test"
|
||||
@@ -24,26 +16,6 @@ DATASET_TYPE_KNOWN = "known"
|
||||
DATASET_TYPE_UNKNOWN = "unseen"
|
||||
|
||||
|
||||
class GenericWorkaround:
|
||||
"""
|
||||
OmegaConf.structured has a weirdness when you try to apply
|
||||
it to a dataclass whose first base class is a Generic which is not
|
||||
Dict. The issue is with a function called get_dict_key_value_types
|
||||
in omegaconf/_utils.py.
|
||||
For example this fails:
|
||||
|
||||
@dataclass(eq=False)
|
||||
class D(torch.utils.data.Dataset[int]):
|
||||
a: int = 3
|
||||
|
||||
OmegaConf.structured(D)
|
||||
|
||||
We avoid the problem by adding this class as an extra base class.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def is_known_frame_scalar(frame_type: str) -> bool:
|
||||
"""
|
||||
Given a single frame type corresponding to a single frame, return whether
|
||||
@@ -80,286 +52,3 @@ def is_train_frame(
|
||||
dtype=torch.bool,
|
||||
device=device,
|
||||
)
|
||||
|
||||
|
||||
def get_bbox_from_mask(
|
||||
mask: np.ndarray, thr: float, decrease_quant: float = 0.05
|
||||
) -> Tuple[int, int, int, int]:
|
||||
# bbox in xywh
|
||||
masks_for_box = np.zeros_like(mask)
|
||||
while masks_for_box.sum() <= 1.0:
|
||||
masks_for_box = (mask > thr).astype(np.float32)
|
||||
thr -= decrease_quant
|
||||
if thr <= 0.0:
|
||||
warnings.warn(
|
||||
f"Empty masks_for_bbox (thr={thr}) => using full image.", stacklevel=1
|
||||
)
|
||||
|
||||
x0, x1 = get_1d_bounds(masks_for_box.sum(axis=-2))
|
||||
y0, y1 = get_1d_bounds(masks_for_box.sum(axis=-1))
|
||||
|
||||
return x0, y0, x1 - x0, y1 - y0
|
||||
|
||||
|
||||
def crop_around_box(
|
||||
tensor: torch.Tensor, bbox: torch.Tensor, impath: str = ""
|
||||
) -> torch.Tensor:
|
||||
# bbox is xyxy, where the upper bound is corrected with +1
|
||||
bbox = clamp_box_to_image_bounds_and_round(
|
||||
bbox,
|
||||
image_size_hw=tuple(tensor.shape[-2:]),
|
||||
)
|
||||
tensor = tensor[..., bbox[1] : bbox[3], bbox[0] : bbox[2]]
|
||||
assert all(c > 0 for c in tensor.shape), f"squashed image {impath}"
|
||||
return tensor
|
||||
|
||||
|
||||
def clamp_box_to_image_bounds_and_round(
|
||||
bbox_xyxy: torch.Tensor,
|
||||
image_size_hw: Tuple[int, int],
|
||||
) -> torch.LongTensor:
|
||||
bbox_xyxy = bbox_xyxy.clone()
|
||||
bbox_xyxy[[0, 2]] = torch.clamp(bbox_xyxy[[0, 2]], 0, image_size_hw[-1])
|
||||
bbox_xyxy[[1, 3]] = torch.clamp(bbox_xyxy[[1, 3]], 0, image_size_hw[-2])
|
||||
if not isinstance(bbox_xyxy, torch.LongTensor):
|
||||
bbox_xyxy = bbox_xyxy.round().long()
|
||||
return bbox_xyxy # pyre-ignore [7]
|
||||
|
||||
|
||||
T = TypeVar("T", bound=torch.Tensor)
|
||||
|
||||
|
||||
def bbox_xyxy_to_xywh(xyxy: T) -> T:
|
||||
wh = xyxy[2:] - xyxy[:2]
|
||||
xywh = torch.cat([xyxy[:2], wh])
|
||||
return xywh # pyre-ignore
|
||||
|
||||
|
||||
def get_clamp_bbox(
|
||||
bbox: torch.Tensor,
|
||||
box_crop_context: float = 0.0,
|
||||
image_path: str = "",
|
||||
) -> torch.Tensor:
|
||||
# box_crop_context: rate of expansion for bbox
|
||||
# returns possibly expanded bbox xyxy as float
|
||||
|
||||
bbox = bbox.clone() # do not edit bbox in place
|
||||
|
||||
# increase box size
|
||||
if box_crop_context > 0.0:
|
||||
c = box_crop_context
|
||||
bbox = bbox.float()
|
||||
bbox[0] -= bbox[2] * c / 2
|
||||
bbox[1] -= bbox[3] * c / 2
|
||||
bbox[2] += bbox[2] * c
|
||||
bbox[3] += bbox[3] * c
|
||||
|
||||
if (bbox[2:] <= 1.0).any():
|
||||
raise ValueError(
|
||||
f"squashed image {image_path}!! The bounding box contains no pixels."
|
||||
)
|
||||
|
||||
bbox[2:] = torch.clamp(bbox[2:], 2) # set min height, width to 2 along both axes
|
||||
bbox_xyxy = bbox_xywh_to_xyxy(bbox, clamp_size=2)
|
||||
|
||||
return bbox_xyxy
|
||||
|
||||
|
||||
def rescale_bbox(
|
||||
bbox: torch.Tensor,
|
||||
orig_res: Union[Tuple[int, int], torch.LongTensor],
|
||||
new_res: Union[Tuple[int, int], torch.LongTensor],
|
||||
) -> torch.Tensor:
|
||||
assert bbox is not None
|
||||
assert np.prod(orig_res) > 1e-8
|
||||
# average ratio of dimensions
|
||||
# pyre-ignore
|
||||
rel_size = (new_res[0] / orig_res[0] + new_res[1] / orig_res[1]) / 2.0
|
||||
return bbox * rel_size
|
||||
|
||||
|
||||
def bbox_xywh_to_xyxy(
|
||||
xywh: torch.Tensor, clamp_size: Optional[int] = None
|
||||
) -> torch.Tensor:
|
||||
xyxy = xywh.clone()
|
||||
if clamp_size is not None:
|
||||
xyxy[2:] = torch.clamp(xyxy[2:], clamp_size)
|
||||
xyxy[2:] += xyxy[:2]
|
||||
return xyxy
|
||||
|
||||
|
||||
def get_1d_bounds(arr: np.ndarray) -> Tuple[int, int]:
|
||||
nz = np.flatnonzero(arr)
|
||||
return nz[0], nz[-1] + 1
|
||||
|
||||
|
||||
def resize_image(
|
||||
image: Union[np.ndarray, torch.Tensor],
|
||||
image_height: Optional[int],
|
||||
image_width: Optional[int],
|
||||
mode: str = "bilinear",
|
||||
) -> Tuple[torch.Tensor, float, torch.Tensor]:
|
||||
|
||||
if type(image) == np.ndarray:
|
||||
image = torch.from_numpy(image)
|
||||
|
||||
if image_height is None or image_width is None:
|
||||
# skip the resizing
|
||||
return image, 1.0, torch.ones_like(image[:1])
|
||||
# takes numpy array or tensor, returns pytorch tensor
|
||||
minscale = min(
|
||||
image_height / image.shape[-2],
|
||||
image_width / image.shape[-1],
|
||||
)
|
||||
imre = torch.nn.functional.interpolate(
|
||||
image[None],
|
||||
scale_factor=minscale,
|
||||
mode=mode,
|
||||
align_corners=False if mode == "bilinear" else None,
|
||||
recompute_scale_factor=True,
|
||||
)[0]
|
||||
imre_ = torch.zeros(image.shape[0], image_height, image_width)
|
||||
imre_[:, 0 : imre.shape[1], 0 : imre.shape[2]] = imre
|
||||
mask = torch.zeros(1, image_height, image_width)
|
||||
mask[:, 0 : imre.shape[1], 0 : imre.shape[2]] = 1.0
|
||||
return imre_, minscale, mask
|
||||
|
||||
|
||||
def load_image(path: str) -> np.ndarray:
|
||||
with Image.open(path) as pil_im:
|
||||
im = np.array(pil_im.convert("RGB"))
|
||||
im = im.transpose((2, 0, 1))
|
||||
im = im.astype(np.float32) / 255.0
|
||||
return im
|
||||
|
||||
|
||||
def load_mask(path: str) -> np.ndarray:
|
||||
with Image.open(path) as pil_im:
|
||||
mask = np.array(pil_im)
|
||||
mask = mask.astype(np.float32) / 255.0
|
||||
return mask[None] # fake feature channel
|
||||
|
||||
|
||||
def load_depth(path: str, scale_adjustment: float) -> np.ndarray:
|
||||
if not path.lower().endswith(".png"):
|
||||
raise ValueError('unsupported depth file name "%s"' % path)
|
||||
|
||||
d = load_16big_png_depth(path) * scale_adjustment
|
||||
d[~np.isfinite(d)] = 0.0
|
||||
return d[None] # fake feature channel
|
||||
|
||||
|
||||
def load_16big_png_depth(depth_png: str) -> np.ndarray:
|
||||
with Image.open(depth_png) as depth_pil:
|
||||
# the image is stored with 16-bit depth but PIL reads it as I (32 bit).
|
||||
# we cast it to uint16, then reinterpret as float16, then cast to float32
|
||||
depth = (
|
||||
np.frombuffer(np.array(depth_pil, dtype=np.uint16), dtype=np.float16)
|
||||
.astype(np.float32)
|
||||
.reshape((depth_pil.size[1], depth_pil.size[0]))
|
||||
)
|
||||
return depth
|
||||
|
||||
|
||||
def load_1bit_png_mask(file: str) -> np.ndarray:
|
||||
with Image.open(file) as pil_im:
|
||||
mask = (np.array(pil_im.convert("L")) > 0.0).astype(np.float32)
|
||||
return mask
|
||||
|
||||
|
||||
def load_depth_mask(path: str) -> np.ndarray:
|
||||
if not path.lower().endswith(".png"):
|
||||
raise ValueError('unsupported depth mask file name "%s"' % path)
|
||||
m = load_1bit_png_mask(path)
|
||||
return m[None] # fake feature channel
|
||||
|
||||
|
||||
def safe_as_tensor(data, dtype):
|
||||
return torch.tensor(data, dtype=dtype) if data is not None else None
|
||||
|
||||
|
||||
def _convert_ndc_to_pixels(
|
||||
focal_length: torch.Tensor,
|
||||
principal_point: torch.Tensor,
|
||||
image_size_wh: torch.Tensor,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
half_image_size = image_size_wh / 2
|
||||
rescale = half_image_size.min()
|
||||
principal_point_px = half_image_size - principal_point * rescale
|
||||
focal_length_px = focal_length * rescale
|
||||
return focal_length_px, principal_point_px
|
||||
|
||||
|
||||
def _convert_pixels_to_ndc(
|
||||
focal_length_px: torch.Tensor,
|
||||
principal_point_px: torch.Tensor,
|
||||
image_size_wh: torch.Tensor,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
half_image_size = image_size_wh / 2
|
||||
rescale = half_image_size.min()
|
||||
principal_point = (half_image_size - principal_point_px) / rescale
|
||||
focal_length = focal_length_px / rescale
|
||||
return focal_length, principal_point
|
||||
|
||||
|
||||
def adjust_camera_to_bbox_crop_(
|
||||
camera: PerspectiveCameras,
|
||||
image_size_wh: torch.Tensor,
|
||||
clamp_bbox_xywh: torch.Tensor,
|
||||
) -> None:
|
||||
if len(camera) != 1:
|
||||
raise ValueError("Adjusting currently works with singleton cameras camera only")
|
||||
|
||||
focal_length_px, principal_point_px = _convert_ndc_to_pixels(
|
||||
camera.focal_length[0],
|
||||
camera.principal_point[0], # pyre-ignore
|
||||
image_size_wh,
|
||||
)
|
||||
principal_point_px_cropped = principal_point_px - clamp_bbox_xywh[:2]
|
||||
|
||||
focal_length, principal_point_cropped = _convert_pixels_to_ndc(
|
||||
focal_length_px,
|
||||
principal_point_px_cropped,
|
||||
clamp_bbox_xywh[2:],
|
||||
)
|
||||
|
||||
camera.focal_length = focal_length[None]
|
||||
camera.principal_point = principal_point_cropped[None] # pyre-ignore
|
||||
|
||||
|
||||
def adjust_camera_to_image_scale_(
|
||||
camera: PerspectiveCameras,
|
||||
original_size_wh: torch.Tensor,
|
||||
new_size_wh: torch.LongTensor,
|
||||
) -> PerspectiveCameras:
|
||||
focal_length_px, principal_point_px = _convert_ndc_to_pixels(
|
||||
camera.focal_length[0],
|
||||
camera.principal_point[0], # pyre-ignore
|
||||
original_size_wh,
|
||||
)
|
||||
|
||||
# now scale and convert from pixels to NDC
|
||||
image_size_wh_output = new_size_wh.float()
|
||||
scale = (image_size_wh_output / original_size_wh).min(dim=-1, keepdim=True).values
|
||||
focal_length_px_scaled = focal_length_px * scale
|
||||
principal_point_px_scaled = principal_point_px * scale
|
||||
|
||||
focal_length_scaled, principal_point_scaled = _convert_pixels_to_ndc(
|
||||
focal_length_px_scaled,
|
||||
principal_point_px_scaled,
|
||||
image_size_wh_output,
|
||||
)
|
||||
camera.focal_length = focal_length_scaled[None]
|
||||
camera.principal_point = principal_point_scaled[None] # pyre-ignore
|
||||
|
||||
|
||||
# NOTE this cache is per-worker; they are implemented as processes.
|
||||
# each batch is loaded and collated by a single worker;
|
||||
# since sequences tend to co-occur within batches, this is useful.
|
||||
@functools.lru_cache(maxsize=256)
|
||||
def load_pointcloud(pcl_path: Union[str, Path], max_points: int = 0) -> Pointclouds:
|
||||
pcl = IO().load_pointcloud(pcl_path)
|
||||
if max_points > 0:
|
||||
pcl = pcl.subsample(max_points)
|
||||
|
||||
return pcl
|
||||
|
||||
@@ -10,7 +10,7 @@ import torch
|
||||
from pytorch3d.implicitron.tools.point_cloud_utils import get_rgbd_point_cloud
|
||||
from pytorch3d.structures import Pointclouds
|
||||
|
||||
from .frame_data import FrameData
|
||||
from .dataset_base import FrameData
|
||||
from .json_index_dataset import JsonIndexDataset
|
||||
|
||||
|
||||
|
||||
@@ -130,7 +130,7 @@ def evaluate_dbir_for_category(
|
||||
raise ValueError("Image size should be set in the dataset")
|
||||
|
||||
# init the simple DBIR model
|
||||
model = ModelDBIR(
|
||||
model = ModelDBIR( # pyre-ignore[28]: c’tor implicitly overridden
|
||||
render_image_width=image_size,
|
||||
render_image_height=image_size,
|
||||
bg_color=bg_color,
|
||||
@@ -153,12 +153,21 @@ def evaluate_dbir_for_category(
|
||||
preds["implicitron_render"],
|
||||
bg_color=bg_color,
|
||||
lpips_model=lpips_model,
|
||||
source_cameras=data_source.all_train_cameras,
|
||||
)
|
||||
)
|
||||
|
||||
if task == Task.SINGLE_SEQUENCE:
|
||||
camera_difficulty_bin_breaks = 0.97, 0.98
|
||||
multisequence_evaluation = False
|
||||
else:
|
||||
camera_difficulty_bin_breaks = 2.0 / 3, 5.0 / 6
|
||||
multisequence_evaluation = True
|
||||
|
||||
category_result_flat, category_result = summarize_nvs_eval_results(
|
||||
per_batch_eval_results,
|
||||
is_multisequence=task != Task.SINGLE_SEQUENCE,
|
||||
camera_difficulty_bin_breaks=camera_difficulty_bin_breaks,
|
||||
is_multisequence=multisequence_evaluation,
|
||||
)
|
||||
|
||||
return category_result["results"]
|
||||
|
||||
@@ -14,15 +14,17 @@ from typing import Any, Dict, List, Optional, Sequence, Tuple, TYPE_CHECKING, Un
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from pytorch3d.implicitron.dataset.frame_data import FrameData
|
||||
from pytorch3d.implicitron.dataset.utils import is_train_frame
|
||||
from pytorch3d.implicitron.dataset.dataset_base import FrameData
|
||||
from pytorch3d.implicitron.dataset.utils import is_known_frame, is_train_frame
|
||||
from pytorch3d.implicitron.models.base_model import ImplicitronRender
|
||||
from pytorch3d.implicitron.tools import vis_utils
|
||||
from pytorch3d.implicitron.tools.camera_utils import volumetric_camera_overlaps
|
||||
from pytorch3d.implicitron.tools.image_utils import mask_background
|
||||
from pytorch3d.implicitron.tools.metric_utils import calc_psnr, eval_depth, iou, rgb_l1
|
||||
from pytorch3d.implicitron.tools.point_cloud_utils import get_rgbd_point_cloud
|
||||
from pytorch3d.implicitron.tools.vis_utils import make_depth_image
|
||||
from pytorch3d.renderer.cameras import PerspectiveCameras
|
||||
from pytorch3d.renderer.camera_utils import join_cameras_as_batch
|
||||
from pytorch3d.renderer.cameras import CamerasBase, PerspectiveCameras
|
||||
from pytorch3d.vis.plotly_vis import plot_scene
|
||||
from tabulate import tabulate
|
||||
|
||||
@@ -147,6 +149,7 @@ def eval_batch(
|
||||
visualize: bool = False,
|
||||
visualize_visdom_env: str = "eval_debug",
|
||||
break_after_visualising: bool = True,
|
||||
source_cameras: Optional[CamerasBase] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Produce performance metrics for a single batch of new-view synthesis
|
||||
@@ -168,6 +171,8 @@ def eval_batch(
|
||||
ground truth.
|
||||
lpips_model: A pre-trained model for evaluating the LPIPS metric.
|
||||
visualize: If True, visualizes the results to Visdom.
|
||||
source_cameras: A list of all training cameras for evaluating the
|
||||
difficulty of the target views.
|
||||
|
||||
Returns:
|
||||
results: A dictionary holding evaluation metrics.
|
||||
@@ -219,10 +224,17 @@ def eval_batch(
|
||||
frame_type = [frame_type]
|
||||
|
||||
is_train = is_train_frame(frame_type)
|
||||
if len(is_train) > 1 and (is_train[1] != is_train[1:]).any():
|
||||
if not (is_train[0] == is_train).all():
|
||||
raise ValueError("All frames in the eval batch have to be either train/test.")
|
||||
|
||||
# pyre-fixme[16]: `Optional` has no attribute `device`.
|
||||
is_known = is_known_frame(frame_type, device=frame_data.image_rgb.device)
|
||||
|
||||
if not ((is_known[1:] == 1).all() and (is_known[0] == 0).all()):
|
||||
raise ValueError(
|
||||
"All (conditioning) frames in the eval batch have to be either train/test."
|
||||
)
|
||||
"For evaluation the first element of the batch has to be"
|
||||
+ " a target view while the rest should be source views."
|
||||
) # TODO: do we need to enforce this?
|
||||
|
||||
for k in [
|
||||
"depth_map",
|
||||
@@ -353,9 +365,18 @@ def eval_batch(
|
||||
# convert all metrics to floats
|
||||
results = {k: float(v) for k, v in results.items()}
|
||||
|
||||
if source_cameras is None:
|
||||
# pyre-fixme[16]: Optional has no attribute __getitem__
|
||||
source_cameras = frame_data.camera[torch.where(is_known)[0]]
|
||||
|
||||
results["meta"] = {
|
||||
# calculate the camera difficulties and add to results
|
||||
"camera_difficulty": calculate_camera_difficulties(
|
||||
frame_data.camera[0],
|
||||
source_cameras,
|
||||
)[0].item(),
|
||||
# store the size of the batch (corresponds to n_src_views+1)
|
||||
"batch_size": len(frame_type),
|
||||
"batch_size": int(is_known.numel()),
|
||||
# store the type of the target frame
|
||||
# pyre-fixme[16]: `None` has no attribute `__getitem__`.
|
||||
"frame_type": str(frame_data.frame_type[0]),
|
||||
@@ -385,6 +406,33 @@ def average_per_batch_results(
|
||||
}
|
||||
|
||||
|
||||
def calculate_camera_difficulties(
|
||||
cameras_target: CamerasBase,
|
||||
cameras_source: CamerasBase,
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Calculate the difficulties of the target cameras, given a set of known
|
||||
cameras `cameras_source`.
|
||||
|
||||
Returns:
|
||||
a tensor of shape (len(cameras_target),)
|
||||
"""
|
||||
ious = [
|
||||
volumetric_camera_overlaps(
|
||||
join_cameras_as_batch(
|
||||
# pyre-fixme[6]: Expected `CamerasBase` for 1st param but got
|
||||
# `Optional[pytorch3d.renderer.utils.TensorProperties]`.
|
||||
[cameras_target[cami], cameras_source.to(cameras_target.device)]
|
||||
)
|
||||
)[0, :]
|
||||
for cami in range(cameras_target.R.shape[0])
|
||||
]
|
||||
camera_difficulties = torch.stack(
|
||||
[_reduce_camera_iou_overlap(iou[1:]) for iou in ious]
|
||||
)
|
||||
return camera_difficulties
|
||||
|
||||
|
||||
def _reduce_camera_iou_overlap(ious: torch.Tensor, topk: int = 2) -> torch.Tensor:
|
||||
"""
|
||||
Calculate the final camera difficulty by computing the average of the
|
||||
@@ -410,7 +458,8 @@ def _get_camera_difficulty_bin_edges(camera_difficulty_bin_breaks: Tuple[float,
|
||||
def summarize_nvs_eval_results(
|
||||
per_batch_eval_results: List[Dict[str, Any]],
|
||||
is_multisequence: bool,
|
||||
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
|
||||
camera_difficulty_bin_breaks: Tuple[float, float],
|
||||
):
|
||||
"""
|
||||
Compile the per-batch evaluation results `per_batch_eval_results` into
|
||||
a set of aggregate metrics. The produced metrics depend on is_multisequence.
|
||||
@@ -433,12 +482,19 @@ def summarize_nvs_eval_results(
|
||||
batch_sizes = torch.tensor(
|
||||
[r["meta"]["batch_size"] for r in per_batch_eval_results]
|
||||
).long()
|
||||
|
||||
camera_difficulty = torch.tensor(
|
||||
[r["meta"]["camera_difficulty"] for r in per_batch_eval_results]
|
||||
).float()
|
||||
is_train = is_train_frame([r["meta"]["frame_type"] for r in per_batch_eval_results])
|
||||
|
||||
# init the result database dict
|
||||
results = []
|
||||
|
||||
diff_bin_edges, diff_bin_names = _get_camera_difficulty_bin_edges(
|
||||
camera_difficulty_bin_breaks
|
||||
)
|
||||
n_diff_edges = diff_bin_edges.numel()
|
||||
|
||||
# add per set averages
|
||||
for SET in eval_sets:
|
||||
if SET is None:
|
||||
@@ -448,17 +504,26 @@ def summarize_nvs_eval_results(
|
||||
ok_set = is_train == int(SET == "train")
|
||||
set_name = SET
|
||||
|
||||
# average over all results
|
||||
bin_results = average_per_batch_results(
|
||||
per_batch_eval_results, idx=torch.where(ok_set)[0]
|
||||
)
|
||||
results.append(
|
||||
{
|
||||
"subset": set_name,
|
||||
"subsubset": "diff=all",
|
||||
"metrics": bin_results,
|
||||
}
|
||||
)
|
||||
# eval each difficulty bin, including a full average result (diff_bin=None)
|
||||
for diff_bin in [None, *list(range(n_diff_edges - 1))]:
|
||||
if diff_bin is None:
|
||||
# average over all results
|
||||
in_bin = ok_set
|
||||
diff_bin_name = "all"
|
||||
else:
|
||||
b1, b2 = diff_bin_edges[diff_bin : (diff_bin + 2)]
|
||||
in_bin = ok_set & (camera_difficulty > b1) & (camera_difficulty <= b2)
|
||||
diff_bin_name = diff_bin_names[diff_bin]
|
||||
bin_results = average_per_batch_results(
|
||||
per_batch_eval_results, idx=torch.where(in_bin)[0]
|
||||
)
|
||||
results.append(
|
||||
{
|
||||
"subset": set_name,
|
||||
"subsubset": f"diff={diff_bin_name}",
|
||||
"metrics": bin_results,
|
||||
}
|
||||
)
|
||||
|
||||
if is_multisequence:
|
||||
# split based on n_src_views
|
||||
@@ -487,7 +552,7 @@ def _get_flat_nvs_metric_key(result, metric_name) -> str:
|
||||
return metric_key
|
||||
|
||||
|
||||
def flatten_nvs_results(results) -> Dict[str, Any]:
|
||||
def flatten_nvs_results(results):
|
||||
"""
|
||||
Takes input `results` list of dicts of the form::
|
||||
|
||||
@@ -506,6 +571,7 @@ def flatten_nvs_results(results) -> Dict[str, Any]:
|
||||
'subset=train/test/...|subsubset=src=1/src=2/...': nvs_eval_metrics,
|
||||
...
|
||||
}
|
||||
|
||||
"""
|
||||
results_flat = {}
|
||||
for result in results:
|
||||
|
||||
@@ -23,6 +23,7 @@ from pytorch3d.implicitron.tools.config import (
|
||||
ReplaceableBase,
|
||||
run_auto_creation,
|
||||
)
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -49,9 +50,12 @@ class EvaluatorBase(ReplaceableBase):
|
||||
class ImplicitronEvaluator(EvaluatorBase):
|
||||
"""
|
||||
Evaluate the results of Implicitron training.
|
||||
|
||||
Members:
|
||||
camera_difficulty_bin_breaks: low/medium vals to divide camera difficulties into
|
||||
[0-eps, low, medium, 1+eps].
|
||||
"""
|
||||
|
||||
# UNUSED; preserved for compatibility purposes
|
||||
camera_difficulty_bin_breaks: Tuple[float, ...] = 0.97, 0.98
|
||||
|
||||
def __post_init__(self):
|
||||
@@ -61,6 +65,7 @@ class ImplicitronEvaluator(EvaluatorBase):
|
||||
self,
|
||||
model: ImplicitronModelBase,
|
||||
dataloader: DataLoader,
|
||||
all_train_cameras: Optional[CamerasBase],
|
||||
device: torch.device,
|
||||
dump_to_json: bool = False,
|
||||
exp_dir: Optional[str] = None,
|
||||
@@ -74,6 +79,7 @@ class ImplicitronEvaluator(EvaluatorBase):
|
||||
Args:
|
||||
model: A (trained) model to evaluate.
|
||||
dataloader: A test dataloader.
|
||||
all_train_cameras: Camera instances we used for training.
|
||||
device: A torch device.
|
||||
dump_to_json: If True, will dump the results to a json file.
|
||||
exp_dir: Root expeirment directory.
|
||||
@@ -117,12 +123,16 @@ class ImplicitronEvaluator(EvaluatorBase):
|
||||
implicitron_render,
|
||||
bg_color="black",
|
||||
lpips_model=lpips_model,
|
||||
source_cameras=( # None will make it use batch’s known cameras
|
||||
None if self.is_multisequence else all_train_cameras
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
_, category_result = evaluate.summarize_nvs_eval_results(
|
||||
per_batch_eval_results,
|
||||
self.is_multisequence,
|
||||
self.camera_difficulty_bin_breaks,
|
||||
)
|
||||
|
||||
results = category_result["results"]
|
||||
@@ -149,11 +159,14 @@ def _dump_to_json(
|
||||
|
||||
def _get_eval_frame_data(frame_data: Any) -> Any:
|
||||
"""
|
||||
Masks the target image data to make sure we cannot use it at model evaluation
|
||||
time. Assumes the first batch element is target, the rest are source.
|
||||
Masks the unknown image data to make sure we cannot use it at model evaluation time.
|
||||
"""
|
||||
frame_data_for_eval = copy.deepcopy(frame_data)
|
||||
is_known = ds_utils.is_known_frame(frame_data.frame_type).type_as(
|
||||
frame_data.image_rgb
|
||||
)[:, None, None, None]
|
||||
for k in ("image_rgb", "depth_map", "fg_probability", "mask_crop"):
|
||||
value = getattr(frame_data_for_eval, k)
|
||||
value[0].zero_()
|
||||
value_masked = value.clone() * is_known if value is not None else None
|
||||
setattr(frame_data_for_eval, k, value_masked)
|
||||
return frame_data_for_eval
|
||||
|
||||
@@ -3,8 +3,3 @@
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# Allows to register the models
|
||||
# see: pytorch3d.implicitron.tools.config.registry:register
|
||||
from pytorch3d.implicitron.models.generic_model import GenericModel
|
||||
from pytorch3d.implicitron.models.overfit_model import OverfitModel
|
||||
|
||||
@@ -8,11 +8,11 @@ from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.models.renderer.base import EvaluationMode
|
||||
from pytorch3d.implicitron.tools.config import ReplaceableBase
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
|
||||
from .renderer.base import EvaluationMode
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImplicitronRender:
|
||||
@@ -49,6 +49,9 @@ class ImplicitronModelBase(ReplaceableBase, torch.nn.Module):
|
||||
# the training loop.
|
||||
log_vars: List[str] = field(default_factory=lambda: ["objective"])
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
def forward(
|
||||
self,
|
||||
*, # force keyword-only arguments
|
||||
|
||||
@@ -15,6 +15,9 @@ class FeatureExtractorBase(ReplaceableBase, torch.nn.Module):
|
||||
Base class for an extractor of a set of features from images.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def get_feat_dims(self) -> int:
|
||||
"""
|
||||
Returns:
|
||||
|
||||
@@ -78,6 +78,7 @@ class ResNetFeatureExtractor(FeatureExtractorBase):
|
||||
feature_rescale: float = 1.0
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
if self.normalize_image:
|
||||
# register buffers needed to normalize the image
|
||||
for k, v in (("_resnet_mean", _RESNET_MEAN), ("_resnet_std", _RESNET_STD)):
|
||||
|
||||
@@ -9,56 +9,66 @@
|
||||
# which are part of implicitron. They ensure that the registry is prepopulated.
|
||||
|
||||
import logging
|
||||
import warnings
|
||||
from dataclasses import field
|
||||
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
|
||||
|
||||
import torch
|
||||
import tqdm
|
||||
from omegaconf import DictConfig
|
||||
|
||||
from pytorch3d.implicitron.models.base_model import (
|
||||
ImplicitronModelBase,
|
||||
ImplicitronRender,
|
||||
)
|
||||
from pytorch3d.implicitron.models.feature_extractor import FeatureExtractorBase
|
||||
from pytorch3d.implicitron.models.global_encoder.global_encoder import GlobalEncoderBase
|
||||
from pytorch3d.implicitron.models.implicit_function.base import ImplicitFunctionBase
|
||||
from pytorch3d.common.compat import prod
|
||||
from pytorch3d.implicitron.models.metrics import (
|
||||
RegularizationMetricsBase,
|
||||
ViewMetricsBase,
|
||||
)
|
||||
|
||||
from pytorch3d.implicitron.models.renderer.base import (
|
||||
BaseRenderer,
|
||||
EvaluationMode,
|
||||
ImplicitFunctionWrapper,
|
||||
ImplicitronRayBundle,
|
||||
RendererOutput,
|
||||
RenderSamplingMode,
|
||||
)
|
||||
from pytorch3d.implicitron.models.renderer.ray_sampler import RaySamplerBase
|
||||
|
||||
from pytorch3d.implicitron.models.utils import (
|
||||
apply_chunked,
|
||||
chunk_generator,
|
||||
log_loss_weights,
|
||||
preprocess_input,
|
||||
weighted_sum_losses,
|
||||
)
|
||||
from pytorch3d.implicitron.models.view_pooler.view_pooler import ViewPooler
|
||||
from pytorch3d.implicitron.tools import vis_utils
|
||||
from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
|
||||
from pytorch3d.implicitron.tools import image_utils, vis_utils
|
||||
from pytorch3d.implicitron.tools.config import (
|
||||
expand_args_fields,
|
||||
registry,
|
||||
run_auto_creation,
|
||||
)
|
||||
|
||||
from pytorch3d.implicitron.tools.rasterize_mc import rasterize_sparse_ray_bundle
|
||||
from pytorch3d.implicitron.tools.utils import cat_dataclass
|
||||
from pytorch3d.renderer import utils as rend_utils
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from visdom import Visdom
|
||||
|
||||
from .base_model import ImplicitronModelBase, ImplicitronRender
|
||||
from .feature_extractor import FeatureExtractorBase
|
||||
from .feature_extractor.resnet_feature_extractor import ResNetFeatureExtractor # noqa
|
||||
from .global_encoder.global_encoder import GlobalEncoderBase
|
||||
from .implicit_function.base import ImplicitFunctionBase
|
||||
from .implicit_function.idr_feature_field import IdrFeatureField # noqa
|
||||
from .implicit_function.neural_radiance_field import ( # noqa
|
||||
NeRFormerImplicitFunction,
|
||||
NeuralRadianceFieldImplicitFunction,
|
||||
)
|
||||
from .implicit_function.scene_representation_networks import ( # noqa
|
||||
SRNHyperNetImplicitFunction,
|
||||
SRNImplicitFunction,
|
||||
)
|
||||
from .implicit_function.voxel_grid_implicit_function import ( # noqa
|
||||
VoxelGridImplicitFunction,
|
||||
)
|
||||
|
||||
from .renderer.base import (
|
||||
BaseRenderer,
|
||||
EvaluationMode,
|
||||
ImplicitFunctionWrapper,
|
||||
RendererOutput,
|
||||
RenderSamplingMode,
|
||||
)
|
||||
from .renderer.lstm_renderer import LSTMRenderer # noqa
|
||||
from .renderer.multipass_ea import MultiPassEmissionAbsorptionRenderer # noqa
|
||||
from .renderer.ray_sampler import RaySamplerBase
|
||||
from .renderer.sdf_renderer import SignedDistanceFunctionRenderer # noqa
|
||||
from .view_pooler.view_pooler import ViewPooler
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -293,38 +303,9 @@ class GenericModel(ImplicitronModelBase): # pyre-ignore: 13
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def pre_expand(cls) -> None:
|
||||
# use try/finally to bypass cinder's lazy imports
|
||||
try:
|
||||
from pytorch3d.implicitron.models.feature_extractor.resnet_feature_extractor import ( # noqa: F401, B950
|
||||
ResNetFeatureExtractor,
|
||||
)
|
||||
from pytorch3d.implicitron.models.implicit_function.idr_feature_field import ( # noqa: F401, B950
|
||||
IdrFeatureField,
|
||||
)
|
||||
from pytorch3d.implicitron.models.implicit_function.neural_radiance_field import ( # noqa: F401, B950
|
||||
NeRFormerImplicitFunction,
|
||||
)
|
||||
from pytorch3d.implicitron.models.implicit_function.scene_representation_networks import ( # noqa: F401, B950
|
||||
SRNHyperNetImplicitFunction,
|
||||
)
|
||||
from pytorch3d.implicitron.models.implicit_function.voxel_grid_implicit_function import ( # noqa: F401, B950
|
||||
VoxelGridImplicitFunction,
|
||||
)
|
||||
from pytorch3d.implicitron.models.renderer.lstm_renderer import ( # noqa: F401
|
||||
LSTMRenderer,
|
||||
)
|
||||
from pytorch3d.implicitron.models.renderer.multipass_ea import ( # noqa
|
||||
MultiPassEmissionAbsorptionRenderer,
|
||||
)
|
||||
from pytorch3d.implicitron.models.renderer.sdf_renderer import ( # noqa: F401
|
||||
SignedDistanceFunctionRenderer,
|
||||
)
|
||||
finally:
|
||||
pass
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
|
||||
if self.view_pooler_enabled:
|
||||
if self.image_feature_extractor_class_type is None:
|
||||
raise ValueError(
|
||||
@@ -334,7 +315,7 @@ class GenericModel(ImplicitronModelBase): # pyre-ignore: 13
|
||||
|
||||
self._implicit_functions = self._construct_implicit_functions()
|
||||
|
||||
log_loss_weights(self.loss_weights, logger)
|
||||
self.log_loss_weights()
|
||||
|
||||
def forward(
|
||||
self,
|
||||
@@ -378,14 +359,8 @@ class GenericModel(ImplicitronModelBase): # pyre-ignore: 13
|
||||
preds: A dictionary containing all outputs of the forward pass including the
|
||||
rendered images, depths, masks, losses and other metrics.
|
||||
"""
|
||||
image_rgb, fg_probability, depth_map = preprocess_input(
|
||||
image_rgb,
|
||||
fg_probability,
|
||||
depth_map,
|
||||
self.mask_images,
|
||||
self.mask_depths,
|
||||
self.mask_threshold,
|
||||
self.bg_color,
|
||||
image_rgb, fg_probability, depth_map = self._preprocess_input(
|
||||
image_rgb, fg_probability, depth_map
|
||||
)
|
||||
|
||||
# Obtain the batch size from the camera as this is the only required input.
|
||||
@@ -470,12 +445,12 @@ class GenericModel(ImplicitronModelBase): # pyre-ignore: 13
|
||||
for func in self._implicit_functions:
|
||||
func.bind_args(**custom_args)
|
||||
|
||||
inputs_to_be_chunked = {}
|
||||
chunked_renderer_inputs = {}
|
||||
if fg_probability is not None and self.renderer.requires_object_mask():
|
||||
sampled_fb_prob = rend_utils.ndc_grid_sample(
|
||||
fg_probability[:n_targets], ray_bundle.xys, mode="nearest"
|
||||
)
|
||||
inputs_to_be_chunked["object_mask"] = sampled_fb_prob > 0.5
|
||||
chunked_renderer_inputs["object_mask"] = sampled_fb_prob > 0.5
|
||||
|
||||
# (5)-(6) Implicit function evaluation and Rendering
|
||||
rendered = self._render(
|
||||
@@ -483,7 +458,7 @@ class GenericModel(ImplicitronModelBase): # pyre-ignore: 13
|
||||
sampling_mode=sampling_mode,
|
||||
evaluation_mode=evaluation_mode,
|
||||
implicit_functions=self._implicit_functions,
|
||||
inputs_to_be_chunked=inputs_to_be_chunked,
|
||||
chunked_inputs=chunked_renderer_inputs,
|
||||
)
|
||||
|
||||
# Unbind the custom arguments to prevent pytorch from storing
|
||||
@@ -547,18 +522,30 @@ class GenericModel(ImplicitronModelBase): # pyre-ignore: 13
|
||||
raise AssertionError("Unreachable state")
|
||||
|
||||
# (7) Compute losses
|
||||
# finally get the optimization objective using self.loss_weights
|
||||
objective = self._get_objective(preds)
|
||||
if objective is not None:
|
||||
preds["objective"] = objective
|
||||
|
||||
return preds
|
||||
|
||||
def _get_objective(self, preds: Dict[str, torch.Tensor]) -> Optional[torch.Tensor]:
|
||||
def _get_objective(self, preds) -> Optional[torch.Tensor]:
|
||||
"""
|
||||
A helper function to compute the overall loss as the dot product
|
||||
of individual loss functions with the corresponding weights.
|
||||
"""
|
||||
return weighted_sum_losses(preds, self.loss_weights)
|
||||
losses_weighted = [
|
||||
preds[k] * float(w)
|
||||
for k, w in self.loss_weights.items()
|
||||
if (k in preds and w != 0.0)
|
||||
]
|
||||
if len(losses_weighted) == 0:
|
||||
warnings.warn("No main objective found.")
|
||||
return None
|
||||
loss = sum(losses_weighted)
|
||||
assert torch.is_tensor(loss)
|
||||
# pyre-fixme[7]: Expected `Optional[Tensor]` but got `int`.
|
||||
return loss
|
||||
|
||||
def visualize(
|
||||
self,
|
||||
@@ -590,7 +577,7 @@ class GenericModel(ImplicitronModelBase): # pyre-ignore: 13
|
||||
self,
|
||||
*,
|
||||
ray_bundle: ImplicitronRayBundle,
|
||||
inputs_to_be_chunked: Dict[str, torch.Tensor],
|
||||
chunked_inputs: Dict[str, torch.Tensor],
|
||||
sampling_mode: RenderSamplingMode,
|
||||
**kwargs,
|
||||
) -> RendererOutput:
|
||||
@@ -598,7 +585,7 @@ class GenericModel(ImplicitronModelBase): # pyre-ignore: 13
|
||||
Args:
|
||||
ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the
|
||||
sampled rendering rays.
|
||||
inputs_to_be_chunked: A collection of tensor of shape `(B, _, H, W)`. E.g.
|
||||
chunked_inputs: A collection of tensor of shape `(B, _, H, W)`. E.g.
|
||||
SignedDistanceFunctionRenderer requires "object_mask", shape
|
||||
(B, 1, H, W), the silhouette of the object in the image. When
|
||||
chunking, they are passed to the renderer as shape
|
||||
@@ -610,27 +597,30 @@ class GenericModel(ImplicitronModelBase): # pyre-ignore: 13
|
||||
An instance of RendererOutput
|
||||
"""
|
||||
if sampling_mode == RenderSamplingMode.FULL_GRID and self.chunk_size_grid > 0:
|
||||
return apply_chunked(
|
||||
return _apply_chunked(
|
||||
self.renderer,
|
||||
chunk_generator(
|
||||
_chunk_generator(
|
||||
self.chunk_size_grid,
|
||||
ray_bundle,
|
||||
inputs_to_be_chunked,
|
||||
chunked_inputs,
|
||||
self.tqdm_trigger_threshold,
|
||||
**kwargs,
|
||||
),
|
||||
lambda batch: torch.cat(batch, dim=1).reshape(
|
||||
*ray_bundle.lengths.shape[:-1], -1
|
||||
),
|
||||
lambda batch: _tensor_collator(batch, ray_bundle.lengths.shape[:-1]),
|
||||
)
|
||||
else:
|
||||
# pyre-fixme[29]: `BaseRenderer` is not a function.
|
||||
return self.renderer(
|
||||
ray_bundle=ray_bundle,
|
||||
**inputs_to_be_chunked,
|
||||
**chunked_inputs,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def _get_global_encoder_encoding_dim(self) -> int:
|
||||
if self.global_encoder is None:
|
||||
return 0
|
||||
return self.global_encoder.get_encoding_dim()
|
||||
|
||||
def _get_viewpooled_feature_dim(self) -> int:
|
||||
if self.view_pooler is None:
|
||||
return 0
|
||||
@@ -722,29 +712,30 @@ class GenericModel(ImplicitronModelBase): # pyre-ignore: 13
|
||||
function(s) are initialized.
|
||||
"""
|
||||
extra_args = {}
|
||||
global_encoder_dim = (
|
||||
0 if self.global_encoder is None else self.global_encoder.get_encoding_dim()
|
||||
)
|
||||
viewpooled_feature_dim = self._get_viewpooled_feature_dim()
|
||||
|
||||
if self.implicit_function_class_type in (
|
||||
"NeuralRadianceFieldImplicitFunction",
|
||||
"NeRFormerImplicitFunction",
|
||||
):
|
||||
extra_args["latent_dim"] = viewpooled_feature_dim + global_encoder_dim
|
||||
extra_args["latent_dim"] = (
|
||||
self._get_viewpooled_feature_dim()
|
||||
+ self._get_global_encoder_encoding_dim()
|
||||
)
|
||||
extra_args["color_dim"] = self.render_features_dimensions
|
||||
|
||||
if self.implicit_function_class_type == "IdrFeatureField":
|
||||
extra_args["feature_vector_size"] = self.render_features_dimensions
|
||||
extra_args["encoding_dim"] = global_encoder_dim
|
||||
extra_args["encoding_dim"] = self._get_global_encoder_encoding_dim()
|
||||
|
||||
if self.implicit_function_class_type == "SRNImplicitFunction":
|
||||
extra_args["latent_dim"] = viewpooled_feature_dim + global_encoder_dim
|
||||
extra_args["latent_dim"] = (
|
||||
self._get_viewpooled_feature_dim()
|
||||
+ self._get_global_encoder_encoding_dim()
|
||||
)
|
||||
|
||||
# srn_hypernet preprocessing
|
||||
if self.implicit_function_class_type == "SRNHyperNetImplicitFunction":
|
||||
extra_args["latent_dim"] = viewpooled_feature_dim
|
||||
extra_args["latent_dim_hypernet"] = global_encoder_dim
|
||||
extra_args["latent_dim"] = self._get_viewpooled_feature_dim()
|
||||
extra_args["latent_dim_hypernet"] = self._get_global_encoder_encoding_dim()
|
||||
|
||||
# check that for srn, srn_hypernet, idr we have self.num_passes=1
|
||||
implicit_function_type = registry.get(
|
||||
@@ -771,3 +762,147 @@ class GenericModel(ImplicitronModelBase): # pyre-ignore: 13
|
||||
for _ in range(self.num_passes)
|
||||
]
|
||||
return torch.nn.ModuleList(implicit_functions_list)
|
||||
|
||||
def log_loss_weights(self) -> None:
|
||||
"""
|
||||
Print a table of the loss weights.
|
||||
"""
|
||||
loss_weights_message = (
|
||||
"-------\nloss_weights:\n"
|
||||
+ "\n".join(f"{k:40s}: {w:1.2e}" for k, w in self.loss_weights.items())
|
||||
+ "-------"
|
||||
)
|
||||
logger.info(loss_weights_message)
|
||||
|
||||
def _preprocess_input(
|
||||
self,
|
||||
image_rgb: Optional[torch.Tensor],
|
||||
fg_probability: Optional[torch.Tensor],
|
||||
depth_map: Optional[torch.Tensor],
|
||||
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
|
||||
"""
|
||||
Helper function to preprocess the input images and optional depth maps
|
||||
to apply masking if required.
|
||||
|
||||
Args:
|
||||
image_rgb: A tensor of shape `(B, 3, H, W)` containing a batch of rgb images
|
||||
corresponding to the source viewpoints from which features will be extracted
|
||||
fg_probability: A tensor of shape `(B, 1, H, W)` containing a batch
|
||||
of foreground masks with values in [0, 1].
|
||||
depth_map: A tensor of shape `(B, 1, H, W)` containing a batch of depth maps.
|
||||
|
||||
Returns:
|
||||
Modified image_rgb, fg_mask, depth_map
|
||||
"""
|
||||
if image_rgb is not None and image_rgb.ndim == 3:
|
||||
# The FrameData object is used for both frames and batches of frames,
|
||||
# and a user might get this error if those were confused.
|
||||
# Perhaps a user has a FrameData `fd` representing a single frame and
|
||||
# wrote something like `model(**fd)` instead of
|
||||
# `model(**fd.collate([fd]))`.
|
||||
raise ValueError(
|
||||
"Model received unbatched inputs. "
|
||||
+ "Perhaps they came from a FrameData which had not been collated."
|
||||
)
|
||||
|
||||
fg_mask = fg_probability
|
||||
if fg_mask is not None and self.mask_threshold > 0.0:
|
||||
# threshold masks
|
||||
warnings.warn("Thresholding masks!")
|
||||
fg_mask = (fg_mask >= self.mask_threshold).type_as(fg_mask)
|
||||
|
||||
if self.mask_images and fg_mask is not None and image_rgb is not None:
|
||||
# mask the image
|
||||
warnings.warn("Masking images!")
|
||||
image_rgb = image_utils.mask_background(
|
||||
image_rgb, fg_mask, dim_color=1, bg_color=torch.tensor(self.bg_color)
|
||||
)
|
||||
|
||||
if self.mask_depths and fg_mask is not None and depth_map is not None:
|
||||
# mask the depths
|
||||
assert (
|
||||
self.mask_threshold > 0.0
|
||||
), "Depths should be masked only with thresholded masks"
|
||||
warnings.warn("Masking depths!")
|
||||
depth_map = depth_map * fg_mask
|
||||
|
||||
return image_rgb, fg_mask, depth_map
|
||||
|
||||
|
||||
def _apply_chunked(func, chunk_generator, tensor_collator):
|
||||
"""
|
||||
Helper function to apply a function on a sequence of
|
||||
chunked inputs yielded by a generator and collate
|
||||
the result.
|
||||
"""
|
||||
processed_chunks = [
|
||||
func(*chunk_args, **chunk_kwargs)
|
||||
for chunk_args, chunk_kwargs in chunk_generator
|
||||
]
|
||||
|
||||
return cat_dataclass(processed_chunks, tensor_collator)
|
||||
|
||||
|
||||
def _tensor_collator(batch, new_dims) -> torch.Tensor:
|
||||
"""
|
||||
Helper function to reshape the batch to the desired shape
|
||||
"""
|
||||
return torch.cat(batch, dim=1).reshape(*new_dims, -1)
|
||||
|
||||
|
||||
def _chunk_generator(
|
||||
chunk_size: int,
|
||||
ray_bundle: ImplicitronRayBundle,
|
||||
chunked_inputs: Dict[str, torch.Tensor],
|
||||
tqdm_trigger_threshold: int,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Helper function which yields chunks of rays from the
|
||||
input ray_bundle, to be used when the number of rays is
|
||||
large and will not fit in memory for rendering.
|
||||
"""
|
||||
(
|
||||
batch_size,
|
||||
*spatial_dim,
|
||||
n_pts_per_ray,
|
||||
) = ray_bundle.lengths.shape # B x ... x n_pts_per_ray
|
||||
if n_pts_per_ray > 0 and chunk_size % n_pts_per_ray != 0:
|
||||
raise ValueError(
|
||||
f"chunk_size_grid ({chunk_size}) should be divisible "
|
||||
f"by n_pts_per_ray ({n_pts_per_ray})"
|
||||
)
|
||||
|
||||
n_rays = prod(spatial_dim)
|
||||
# special handling for raytracing-based methods
|
||||
n_chunks = -(-n_rays * max(n_pts_per_ray, 1) // chunk_size)
|
||||
chunk_size_in_rays = -(-n_rays // n_chunks)
|
||||
|
||||
iter = range(0, n_rays, chunk_size_in_rays)
|
||||
if len(iter) >= tqdm_trigger_threshold:
|
||||
iter = tqdm.tqdm(iter)
|
||||
|
||||
def _safe_slice(
|
||||
tensor: Optional[torch.Tensor], start_idx: int, end_idx: int
|
||||
) -> Any:
|
||||
return tensor[start_idx:end_idx] if tensor is not None else None
|
||||
|
||||
for start_idx in iter:
|
||||
end_idx = min(start_idx + chunk_size_in_rays, n_rays)
|
||||
ray_bundle_chunk = ImplicitronRayBundle(
|
||||
origins=ray_bundle.origins.reshape(batch_size, -1, 3)[:, start_idx:end_idx],
|
||||
directions=ray_bundle.directions.reshape(batch_size, -1, 3)[
|
||||
:, start_idx:end_idx
|
||||
],
|
||||
lengths=ray_bundle.lengths.reshape(batch_size, n_rays, n_pts_per_ray)[
|
||||
:, start_idx:end_idx
|
||||
],
|
||||
xys=ray_bundle.xys.reshape(batch_size, -1, 2)[:, start_idx:end_idx],
|
||||
camera_ids=_safe_slice(ray_bundle.camera_ids, start_idx, end_idx),
|
||||
camera_counts=_safe_slice(ray_bundle.camera_counts, start_idx, end_idx),
|
||||
)
|
||||
extra_args = kwargs.copy()
|
||||
for k, v in chunked_inputs.items():
|
||||
extra_args[k] = v.flatten(2)[:, :, start_idx:end_idx]
|
||||
yield [ray_bundle_chunk, *args], extra_args
|
||||
|
||||
@@ -29,6 +29,8 @@ class Autodecoder(Configurable, torch.nn.Module):
|
||||
ignore_input: bool = False
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
|
||||
if self.n_instances <= 0:
|
||||
raise ValueError(f"Invalid n_instances {self.n_instances}")
|
||||
|
||||
|
||||
@@ -26,6 +26,9 @@ class GlobalEncoderBase(ReplaceableBase):
|
||||
(`SequenceAutodecoder`).
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
def get_encoding_dim(self):
|
||||
"""
|
||||
Returns the dimensionality of the returned encoding.
|
||||
@@ -66,6 +69,7 @@ class SequenceAutodecoder(GlobalEncoderBase, torch.nn.Module): # pyre-ignore: 1
|
||||
autodecoder: Autodecoder
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
run_auto_creation(self)
|
||||
|
||||
def get_encoding_dim(self):
|
||||
@@ -99,6 +103,7 @@ class HarmonicTimeEncoder(GlobalEncoderBase, torch.nn.Module):
|
||||
time_divisor: float = 1.0
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
self._harmonic_embedding = HarmonicEmbedding(
|
||||
n_harmonic_functions=self.n_harmonic_functions,
|
||||
append_input=self.append_input,
|
||||
|
||||
@@ -14,6 +14,9 @@ from pytorch3d.renderer.cameras import CamerasBase
|
||||
|
||||
|
||||
class ImplicitFunctionBase(ABC, ReplaceableBase):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
@abstractmethod
|
||||
def forward(
|
||||
self,
|
||||
|
||||
@@ -45,6 +45,9 @@ class DecoderFunctionBase(ReplaceableBase, torch.nn.Module):
|
||||
space and transforms it into the required quantity (for example density and color).
|
||||
"""
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
|
||||
def forward(
|
||||
self, features: torch.Tensor, z: Optional[torch.Tensor] = None
|
||||
) -> torch.Tensor:
|
||||
@@ -80,6 +83,7 @@ class ElementwiseDecoder(DecoderFunctionBase):
|
||||
operation: DecoderActivation = DecoderActivation.IDENTITY
|
||||
|
||||
def __post_init__(self):
|
||||
super().__post_init__()
|
||||
if self.operation not in [
|
||||
DecoderActivation.RELU,
|
||||
DecoderActivation.SOFTPLUS,
|
||||
@@ -159,6 +163,8 @@ class MLPWithInputSkips(Configurable, torch.nn.Module):
|
||||
use_xavier_init: bool = True
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
|
||||
try:
|
||||
last_activation = {
|
||||
DecoderActivation.RELU: torch.nn.ReLU(True),
|
||||
@@ -278,6 +284,7 @@ class MLPDecoder(DecoderFunctionBase):
|
||||
network: MLPWithInputSkips
|
||||
|
||||
def __post_init__(self):
|
||||
super().__post_init__()
|
||||
run_auto_creation(self)
|
||||
|
||||
def forward(
|
||||
|
||||
@@ -66,6 +66,8 @@ class IdrFeatureField(ImplicitFunctionBase, torch.nn.Module):
|
||||
encoding_dim: int = 0
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
|
||||
dims = [self.d_in] + list(self.dims) + [self.d_out + self.feature_vector_size]
|
||||
|
||||
self.embed_fn = None
|
||||
|
||||
@@ -56,6 +56,7 @@ class NeuralRadianceFieldBase(ImplicitFunctionBase, torch.nn.Module):
|
||||
"""
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
# The harmonic embedding layer converts input 3D coordinates
|
||||
# to a representation that is more suitable for
|
||||
# processing with a deep neural network.
|
||||
|
||||
@@ -44,6 +44,7 @@ class SRNRaymarchFunction(Configurable, torch.nn.Module):
|
||||
raymarch_function: Any = None
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
self._harmonic_embedding = HarmonicEmbedding(
|
||||
self.n_harmonic_functions, append_input=True
|
||||
)
|
||||
@@ -134,6 +135,7 @@ class SRNPixelGenerator(Configurable, torch.nn.Module):
|
||||
ray_dir_in_camera_coords: bool = False
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
self._harmonic_embedding = HarmonicEmbedding(
|
||||
self.n_harmonic_functions, append_input=True
|
||||
)
|
||||
@@ -247,6 +249,7 @@ class SRNRaymarchHyperNet(Configurable, torch.nn.Module):
|
||||
xyz_in_camera_coords: bool = False
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
raymarch_input_embedding_dim = (
|
||||
HarmonicEmbedding.get_output_dim_static(
|
||||
self.in_features,
|
||||
@@ -332,6 +335,7 @@ class SRNImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
|
||||
pixel_generator: SRNPixelGenerator
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
run_auto_creation(self)
|
||||
|
||||
def create_raymarch_function(self) -> None:
|
||||
@@ -389,6 +393,7 @@ class SRNHyperNetImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
|
||||
pixel_generator: SRNPixelGenerator
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
run_auto_creation(self)
|
||||
|
||||
def create_hypernet(self) -> None:
|
||||
|
||||
@@ -81,6 +81,7 @@ class VoxelGridBase(ReplaceableBase, torch.nn.Module):
|
||||
)
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
if 0 not in self.resolution_changes:
|
||||
raise ValueError("There has to be key `0` in `resolution_changes`.")
|
||||
|
||||
@@ -856,6 +857,7 @@ class VoxelGridModule(Configurable, torch.nn.Module):
|
||||
param_groups: Dict[str, str] = field(default_factory=lambda: {})
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
run_auto_creation(self)
|
||||
n_grids = 1 # Voxel grid objects are batched. We need only a single grid.
|
||||
shapes = self.voxel_grid.get_shapes(epoch=0)
|
||||
|
||||
@@ -186,6 +186,7 @@ class VoxelGridImplicitFunction(ImplicitFunctionBase, torch.nn.Module):
|
||||
volume_cropping_epochs: Tuple[int, ...] = ()
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
super().__init__()
|
||||
run_auto_creation(self)
|
||||
# pyre-ignore[16]
|
||||
self.voxel_grid_scaffold = self._create_voxel_grid_scaffold()
|
||||
|
||||
@@ -25,6 +25,9 @@ class RegularizationMetricsBase(ReplaceableBase, torch.nn.Module):
|
||||
depend on the model's parameters.
|
||||
"""
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
def forward(
|
||||
self, model: Any, keys_prefix: str = "loss_", **kwargs
|
||||
) -> Dict[str, Any]:
|
||||
@@ -53,6 +56,9 @@ class ViewMetricsBase(ReplaceableBase, torch.nn.Module):
|
||||
`forward()` method produces losses and other metrics.
|
||||
"""
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
def forward(
|
||||
self,
|
||||
raymarched: RendererOutput,
|
||||
|
||||
@@ -41,6 +41,9 @@ class ModelDBIR(ImplicitronModelBase):
|
||||
bg_color: Tuple[float, float, float] = (0.0, 0.0, 0.0)
|
||||
max_points: int = -1
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
|
||||
def forward(
|
||||
self,
|
||||
*, # force keyword-only arguments
|
||||
|
||||
@@ -1,664 +0,0 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
|
||||
# Note: The #noqa comments below are for unused imports of pluggable implementations
|
||||
# which are part of implicitron. They ensure that the registry is prepopulated.
|
||||
|
||||
import functools
|
||||
import logging
|
||||
from dataclasses import field
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
|
||||
|
||||
import torch
|
||||
from omegaconf import DictConfig
|
||||
|
||||
from pytorch3d.implicitron.models.base_model import (
|
||||
ImplicitronModelBase,
|
||||
ImplicitronRender,
|
||||
)
|
||||
from pytorch3d.implicitron.models.global_encoder.global_encoder import GlobalEncoderBase
|
||||
from pytorch3d.implicitron.models.implicit_function.base import ImplicitFunctionBase
|
||||
from pytorch3d.implicitron.models.metrics import (
|
||||
RegularizationMetricsBase,
|
||||
ViewMetricsBase,
|
||||
)
|
||||
|
||||
from pytorch3d.implicitron.models.renderer.base import (
|
||||
BaseRenderer,
|
||||
EvaluationMode,
|
||||
ImplicitronRayBundle,
|
||||
RendererOutput,
|
||||
RenderSamplingMode,
|
||||
)
|
||||
from pytorch3d.implicitron.models.renderer.ray_sampler import RaySamplerBase
|
||||
from pytorch3d.implicitron.models.utils import (
|
||||
apply_chunked,
|
||||
chunk_generator,
|
||||
log_loss_weights,
|
||||
preprocess_input,
|
||||
weighted_sum_losses,
|
||||
)
|
||||
from pytorch3d.implicitron.tools import vis_utils
|
||||
from pytorch3d.implicitron.tools.config import (
|
||||
expand_args_fields,
|
||||
registry,
|
||||
run_auto_creation,
|
||||
)
|
||||
|
||||
from pytorch3d.implicitron.tools.rasterize_mc import rasterize_sparse_ray_bundle
|
||||
from pytorch3d.renderer import utils as rend_utils
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from visdom import Visdom
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
IMPLICIT_FUNCTION_ARGS_TO_REMOVE: List[str] = [
|
||||
"feature_vector_size",
|
||||
"encoding_dim",
|
||||
"latent_dim",
|
||||
"color_dim",
|
||||
]
|
||||
|
||||
|
||||
@registry.register
|
||||
class OverfitModel(ImplicitronModelBase): # pyre-ignore: 13
|
||||
"""
|
||||
OverfitModel is a wrapper for the neural implicit
|
||||
rendering and reconstruction pipeline which consists
|
||||
of the following sequence of 4 steps:
|
||||
|
||||
|
||||
(1) Ray Sampling
|
||||
------------------
|
||||
Rays are sampled from an image grid based on the target view(s).
|
||||
│
|
||||
▼
|
||||
(2) Implicit Function Evaluation
|
||||
------------------
|
||||
Evaluate the implicit function(s) at the sampled ray points
|
||||
(also optionally pass in a global encoding from global_encoder).
|
||||
│
|
||||
▼
|
||||
(3) Rendering
|
||||
------------------
|
||||
Render the image into the target cameras by raymarching along
|
||||
the sampled rays and aggregating the colors and densities
|
||||
output by the implicit function in (2).
|
||||
│
|
||||
▼
|
||||
(4) Loss Computation
|
||||
------------------
|
||||
Compute losses based on the predicted target image(s).
|
||||
|
||||
|
||||
The `forward` function of OverfitModel executes
|
||||
this sequence of steps. Currently, steps 1, 2, 3
|
||||
can be customized by intializing a subclass of the appropriate
|
||||
base class and adding the newly created module to the registry.
|
||||
Please see https://github.com/facebookresearch/pytorch3d/blob/main/projects/implicitron_trainer/README.md#custom-plugins
|
||||
for more details on how to create and register a custom component.
|
||||
|
||||
In the config .yaml files for experiments, the parameters below are
|
||||
contained in the
|
||||
`model_factory_ImplicitronModelFactory_args.model_OverfitModel_args`
|
||||
node. As OverfitModel derives from ReplaceableBase, the input arguments are
|
||||
parsed by the run_auto_creation function to initialize the
|
||||
necessary member modules. Please see implicitron_trainer/README.md
|
||||
for more details on this process.
|
||||
|
||||
Args:
|
||||
mask_images: Whether or not to mask the RGB image background given the
|
||||
foreground mask (the `fg_probability` argument of `GenericModel.forward`)
|
||||
mask_depths: Whether or not to mask the depth image background given the
|
||||
foreground mask (the `fg_probability` argument of `GenericModel.forward`)
|
||||
render_image_width: Width of the output image to render
|
||||
render_image_height: Height of the output image to render
|
||||
mask_threshold: If greater than 0.0, the foreground mask is
|
||||
thresholded by this value before being applied to the RGB/Depth images
|
||||
output_rasterized_mc: If True, visualize the Monte-Carlo pixel renders by
|
||||
splatting onto an image grid. Default: False.
|
||||
bg_color: RGB values for setting the background color of input image
|
||||
if mask_images=True. Defaults to (0.0, 0.0, 0.0). Each renderer has its own
|
||||
way to determine the background color of its output, unrelated to this.
|
||||
chunk_size_grid: The total number of points which can be rendered
|
||||
per chunk. This is used to compute the number of rays used
|
||||
per chunk when the chunked version of the renderer is used (in order
|
||||
to fit rendering on all rays in memory)
|
||||
render_features_dimensions: The number of output features to render.
|
||||
Defaults to 3, corresponding to RGB images.
|
||||
sampling_mode_training: The sampling method to use during training. Must be
|
||||
a value from the RenderSamplingMode Enum.
|
||||
sampling_mode_evaluation: Same as above but for evaluation.
|
||||
global_encoder_class_type: The name of the class to use for global_encoder,
|
||||
which must be available in the registry. Or `None` to disable global encoder.
|
||||
global_encoder: An instance of `GlobalEncoder`. This is used to generate an encoding
|
||||
of the image (referred to as the global_code) that can be used to model aspects of
|
||||
the scene such as multiple objects or morphing objects. It is up to the implicit
|
||||
function definition how to use it, but the most typical way is to broadcast and
|
||||
concatenate to the other inputs for the implicit function.
|
||||
raysampler_class_type: The name of the raysampler class which is available
|
||||
in the global registry.
|
||||
raysampler: An instance of RaySampler which is used to emit
|
||||
rays from the target view(s).
|
||||
renderer_class_type: The name of the renderer class which is available in the global
|
||||
registry.
|
||||
renderer: A renderer class which inherits from BaseRenderer. This is used to
|
||||
generate the images from the target view(s).
|
||||
share_implicit_function_across_passes: If set to True
|
||||
coarse_implicit_function is automatically set as implicit_function
|
||||
(coarse_implicit_function=implicit_funciton). The
|
||||
implicit_functions are then run sequentially during the rendering.
|
||||
implicit_function_class_type: The type of implicit function to use which
|
||||
is available in the global registry.
|
||||
implicit_function: An instance of ImplicitFunctionBase.
|
||||
coarse_implicit_function_class_type: The type of implicit function to use which
|
||||
is available in the global registry.
|
||||
coarse_implicit_function: An instance of ImplicitFunctionBase.
|
||||
If set and `share_implicit_function_across_passes` is set to False,
|
||||
coarse_implicit_function is instantiated on itself. It
|
||||
is then used as the second pass during the rendering.
|
||||
If set to None, we only do a single pass with implicit_function.
|
||||
view_metrics: An instance of ViewMetricsBase used to compute loss terms which
|
||||
are independent of the model's parameters.
|
||||
view_metrics_class_type: The type of view metrics to use, must be available in
|
||||
the global registry.
|
||||
regularization_metrics: An instance of RegularizationMetricsBase used to compute
|
||||
regularization terms which can depend on the model's parameters.
|
||||
regularization_metrics_class_type: The type of regularization metrics to use,
|
||||
must be available in the global registry.
|
||||
loss_weights: A dictionary with a {loss_name: weight} mapping; see documentation
|
||||
for `ViewMetrics` class for available loss functions.
|
||||
log_vars: A list of variable names which should be logged.
|
||||
The names should correspond to a subset of the keys of the
|
||||
dict `preds` output by the `forward` function.
|
||||
""" # noqa: B950
|
||||
|
||||
mask_images: bool = True
|
||||
mask_depths: bool = True
|
||||
render_image_width: int = 400
|
||||
render_image_height: int = 400
|
||||
mask_threshold: float = 0.5
|
||||
output_rasterized_mc: bool = False
|
||||
bg_color: Tuple[float, float, float] = (0.0, 0.0, 0.0)
|
||||
chunk_size_grid: int = 4096
|
||||
render_features_dimensions: int = 3
|
||||
tqdm_trigger_threshold: int = 16
|
||||
|
||||
n_train_target_views: int = 1
|
||||
sampling_mode_training: str = "mask_sample"
|
||||
sampling_mode_evaluation: str = "full_grid"
|
||||
|
||||
# ---- global encoder settings
|
||||
global_encoder_class_type: Optional[str] = None
|
||||
global_encoder: Optional[GlobalEncoderBase]
|
||||
|
||||
# ---- raysampler
|
||||
raysampler_class_type: str = "AdaptiveRaySampler"
|
||||
raysampler: RaySamplerBase
|
||||
|
||||
# ---- renderer configs
|
||||
renderer_class_type: str = "MultiPassEmissionAbsorptionRenderer"
|
||||
renderer: BaseRenderer
|
||||
|
||||
# ---- implicit function settings
|
||||
share_implicit_function_across_passes: bool = False
|
||||
implicit_function_class_type: str = "NeuralRadianceFieldImplicitFunction"
|
||||
implicit_function: ImplicitFunctionBase
|
||||
coarse_implicit_function_class_type: Optional[str] = None
|
||||
coarse_implicit_function: Optional[ImplicitFunctionBase]
|
||||
|
||||
# ----- metrics
|
||||
view_metrics: ViewMetricsBase
|
||||
view_metrics_class_type: str = "ViewMetrics"
|
||||
|
||||
regularization_metrics: RegularizationMetricsBase
|
||||
regularization_metrics_class_type: str = "RegularizationMetrics"
|
||||
|
||||
# ---- loss weights
|
||||
loss_weights: Dict[str, float] = field(
|
||||
default_factory=lambda: {
|
||||
"loss_rgb_mse": 1.0,
|
||||
"loss_prev_stage_rgb_mse": 1.0,
|
||||
"loss_mask_bce": 0.0,
|
||||
"loss_prev_stage_mask_bce": 0.0,
|
||||
}
|
||||
)
|
||||
|
||||
# ---- variables to be logged (logger automatically ignores if not computed)
|
||||
log_vars: List[str] = field(
|
||||
default_factory=lambda: [
|
||||
"loss_rgb_psnr_fg",
|
||||
"loss_rgb_psnr",
|
||||
"loss_rgb_mse",
|
||||
"loss_rgb_huber",
|
||||
"loss_depth_abs",
|
||||
"loss_depth_abs_fg",
|
||||
"loss_mask_neg_iou",
|
||||
"loss_mask_bce",
|
||||
"loss_mask_beta_prior",
|
||||
"loss_eikonal",
|
||||
"loss_density_tv",
|
||||
"loss_depth_neg_penalty",
|
||||
"loss_autodecoder_norm",
|
||||
# metrics that are only logged in 2+stage renderes
|
||||
"loss_prev_stage_rgb_mse",
|
||||
"loss_prev_stage_rgb_psnr_fg",
|
||||
"loss_prev_stage_rgb_psnr",
|
||||
"loss_prev_stage_mask_bce",
|
||||
# basic metrics
|
||||
"objective",
|
||||
"epoch",
|
||||
"sec/it",
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def pre_expand(cls) -> None:
|
||||
# use try/finally to bypass cinder's lazy imports
|
||||
try:
|
||||
from pytorch3d.implicitron.models.implicit_function.idr_feature_field import ( # noqa: F401, B950
|
||||
IdrFeatureField,
|
||||
)
|
||||
from pytorch3d.implicitron.models.implicit_function.neural_radiance_field import ( # noqa: F401, B950
|
||||
NeuralRadianceFieldImplicitFunction,
|
||||
)
|
||||
from pytorch3d.implicitron.models.implicit_function.scene_representation_networks import ( # noqa: F401, B950
|
||||
SRNImplicitFunction,
|
||||
)
|
||||
from pytorch3d.implicitron.models.renderer.lstm_renderer import ( # noqa: F401
|
||||
LSTMRenderer,
|
||||
)
|
||||
from pytorch3d.implicitron.models.renderer.multipass_ea import ( # noqa: F401
|
||||
MultiPassEmissionAbsorptionRenderer,
|
||||
)
|
||||
from pytorch3d.implicitron.models.renderer.sdf_renderer import ( # noqa: F401
|
||||
SignedDistanceFunctionRenderer,
|
||||
)
|
||||
finally:
|
||||
pass
|
||||
|
||||
def __post_init__(self):
|
||||
# The attribute will be filled by run_auto_creation
|
||||
run_auto_creation(self)
|
||||
log_loss_weights(self.loss_weights, logger)
|
||||
# We need to set it here since run_auto_creation
|
||||
# will create coarse_implicit_function before implicit_function
|
||||
if self.share_implicit_function_across_passes:
|
||||
self.coarse_implicit_function = self.implicit_function
|
||||
|
||||
def forward(
|
||||
self,
|
||||
*, # force keyword-only arguments
|
||||
image_rgb: Optional[torch.Tensor],
|
||||
camera: CamerasBase,
|
||||
fg_probability: Optional[torch.Tensor] = None,
|
||||
mask_crop: Optional[torch.Tensor] = None,
|
||||
depth_map: Optional[torch.Tensor] = None,
|
||||
sequence_name: Optional[List[str]] = None,
|
||||
frame_timestamp: Optional[torch.Tensor] = None,
|
||||
evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION,
|
||||
**kwargs,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Args:
|
||||
image_rgb: A tensor of shape `(B, 3, H, W)` containing a batch of rgb images;
|
||||
the first `min(B, n_train_target_views)` images are considered targets and
|
||||
are used to supervise the renders; the rest corresponding to the source
|
||||
viewpoints from which features will be extracted.
|
||||
camera: An instance of CamerasBase containing a batch of `B` cameras corresponding
|
||||
to the viewpoints of target images, from which the rays will be sampled,
|
||||
and source images, which will be used for intersecting with target rays.
|
||||
fg_probability: A tensor of shape `(B, 1, H, W)` containing a batch of
|
||||
foreground masks.
|
||||
mask_crop: A binary tensor of shape `(B, 1, H, W)` deonting valid
|
||||
regions in the input images (i.e. regions that do not correspond
|
||||
to, e.g., zero-padding). When the `RaySampler`'s sampling mode is set to
|
||||
"mask_sample", rays will be sampled in the non zero regions.
|
||||
depth_map: A tensor of shape `(B, 1, H, W)` containing a batch of depth maps.
|
||||
sequence_name: A list of `B` strings corresponding to the sequence names
|
||||
from which images `image_rgb` were extracted. They are used to match
|
||||
target frames with relevant source frames.
|
||||
frame_timestamp: Optionally a tensor of shape `(B,)` containing a batch
|
||||
of frame timestamps.
|
||||
evaluation_mode: one of EvaluationMode.TRAINING or
|
||||
EvaluationMode.EVALUATION which determines the settings used for
|
||||
rendering.
|
||||
|
||||
Returns:
|
||||
preds: A dictionary containing all outputs of the forward pass including the
|
||||
rendered images, depths, masks, losses and other metrics.
|
||||
"""
|
||||
image_rgb, fg_probability, depth_map = preprocess_input(
|
||||
image_rgb,
|
||||
fg_probability,
|
||||
depth_map,
|
||||
self.mask_images,
|
||||
self.mask_depths,
|
||||
self.mask_threshold,
|
||||
self.bg_color,
|
||||
)
|
||||
|
||||
# Determine the used ray sampling mode.
|
||||
sampling_mode = RenderSamplingMode(
|
||||
self.sampling_mode_training
|
||||
if evaluation_mode == EvaluationMode.TRAINING
|
||||
else self.sampling_mode_evaluation
|
||||
)
|
||||
|
||||
# (1) Sample rendering rays with the ray sampler.
|
||||
# pyre-ignore[29]
|
||||
ray_bundle: ImplicitronRayBundle = self.raysampler(
|
||||
camera,
|
||||
evaluation_mode,
|
||||
mask=mask_crop
|
||||
if mask_crop is not None and sampling_mode == RenderSamplingMode.MASK_SAMPLE
|
||||
else None,
|
||||
)
|
||||
|
||||
inputs_to_be_chunked = {}
|
||||
if fg_probability is not None and self.renderer.requires_object_mask():
|
||||
sampled_fb_prob = rend_utils.ndc_grid_sample(
|
||||
fg_probability, ray_bundle.xys, mode="nearest"
|
||||
)
|
||||
inputs_to_be_chunked["object_mask"] = sampled_fb_prob > 0.5
|
||||
|
||||
# (2)-(3) Implicit function evaluation and Rendering
|
||||
implicit_functions: List[Union[Callable, ImplicitFunctionBase]] = [
|
||||
self.implicit_function
|
||||
]
|
||||
if self.coarse_implicit_function is not None:
|
||||
implicit_functions += [self.coarse_implicit_function]
|
||||
|
||||
if self.global_encoder is not None:
|
||||
global_code = self.global_encoder( # pyre-fixme[29]
|
||||
sequence_name=sequence_name,
|
||||
frame_timestamp=frame_timestamp,
|
||||
)
|
||||
implicit_functions = [
|
||||
functools.partial(implicit_function, global_code=global_code)
|
||||
if isinstance(implicit_function, Callable)
|
||||
else functools.partial(
|
||||
implicit_function.forward, global_code=global_code
|
||||
)
|
||||
for implicit_function in implicit_functions
|
||||
]
|
||||
rendered = self._render(
|
||||
ray_bundle=ray_bundle,
|
||||
sampling_mode=sampling_mode,
|
||||
evaluation_mode=evaluation_mode,
|
||||
implicit_functions=implicit_functions,
|
||||
inputs_to_be_chunked=inputs_to_be_chunked,
|
||||
)
|
||||
|
||||
# A dict to store losses as well as rendering results.
|
||||
preds: Dict[str, Any] = self.view_metrics(
|
||||
results={},
|
||||
raymarched=rendered,
|
||||
ray_bundle=ray_bundle,
|
||||
image_rgb=image_rgb,
|
||||
depth_map=depth_map,
|
||||
fg_probability=fg_probability,
|
||||
mask_crop=mask_crop,
|
||||
)
|
||||
|
||||
preds.update(
|
||||
self.regularization_metrics(
|
||||
results=preds,
|
||||
model=self,
|
||||
)
|
||||
)
|
||||
|
||||
if sampling_mode == RenderSamplingMode.MASK_SAMPLE:
|
||||
if self.output_rasterized_mc:
|
||||
# Visualize the monte-carlo pixel renders by splatting onto
|
||||
# an image grid.
|
||||
(
|
||||
preds["images_render"],
|
||||
preds["depths_render"],
|
||||
preds["masks_render"],
|
||||
) = rasterize_sparse_ray_bundle(
|
||||
ray_bundle,
|
||||
rendered.features,
|
||||
(self.render_image_height, self.render_image_width),
|
||||
rendered.depths,
|
||||
masks=rendered.masks,
|
||||
)
|
||||
elif sampling_mode == RenderSamplingMode.FULL_GRID:
|
||||
preds["images_render"] = rendered.features.permute(0, 3, 1, 2)
|
||||
preds["depths_render"] = rendered.depths.permute(0, 3, 1, 2)
|
||||
preds["masks_render"] = rendered.masks.permute(0, 3, 1, 2)
|
||||
|
||||
preds["implicitron_render"] = ImplicitronRender(
|
||||
image_render=preds["images_render"],
|
||||
depth_render=preds["depths_render"],
|
||||
mask_render=preds["masks_render"],
|
||||
)
|
||||
else:
|
||||
raise AssertionError("Unreachable state")
|
||||
|
||||
# (4) Compute losses
|
||||
# finally get the optimization objective using self.loss_weights
|
||||
objective = self._get_objective(preds)
|
||||
if objective is not None:
|
||||
preds["objective"] = objective
|
||||
|
||||
return preds
|
||||
|
||||
def _get_objective(self, preds: Dict[str, torch.Tensor]) -> Optional[torch.Tensor]:
|
||||
"""
|
||||
A helper function to compute the overall loss as the dot product
|
||||
of individual loss functions with the corresponding weights.
|
||||
"""
|
||||
return weighted_sum_losses(preds, self.loss_weights)
|
||||
|
||||
def visualize(
|
||||
self,
|
||||
viz: Optional["Visdom"],
|
||||
visdom_env_imgs: str,
|
||||
preds: Dict[str, Any],
|
||||
prefix: str,
|
||||
) -> None:
|
||||
"""
|
||||
Helper function to visualize the predictions generated
|
||||
in the forward pass.
|
||||
|
||||
Args:
|
||||
viz: Visdom connection object
|
||||
visdom_env_imgs: name of visdom environment for the images.
|
||||
preds: predictions dict like returned by forward()
|
||||
prefix: prepended to the names of images
|
||||
"""
|
||||
if viz is None or not viz.check_connection():
|
||||
logger.info("no visdom server! -> skipping batch vis")
|
||||
return
|
||||
|
||||
idx_image = 0
|
||||
title = f"{prefix}_im{idx_image}"
|
||||
|
||||
vis_utils.visualize_basics(viz, preds, visdom_env_imgs, title=title)
|
||||
|
||||
def _render(
|
||||
self,
|
||||
*,
|
||||
ray_bundle: ImplicitronRayBundle,
|
||||
inputs_to_be_chunked: Dict[str, torch.Tensor],
|
||||
sampling_mode: RenderSamplingMode,
|
||||
**kwargs,
|
||||
) -> RendererOutput:
|
||||
"""
|
||||
Args:
|
||||
ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the
|
||||
sampled rendering rays.
|
||||
inputs_to_be_chunked: A collection of tensor of shape `(B, _, H, W)`. E.g.
|
||||
SignedDistanceFunctionRenderer requires "object_mask", shape
|
||||
(B, 1, H, W), the silhouette of the object in the image. When
|
||||
chunking, they are passed to the renderer as shape
|
||||
`(B, _, chunksize)`.
|
||||
sampling_mode: The sampling method to use. Must be a value from the
|
||||
RenderSamplingMode Enum.
|
||||
|
||||
Returns:
|
||||
An instance of RendererOutput
|
||||
"""
|
||||
if sampling_mode == RenderSamplingMode.FULL_GRID and self.chunk_size_grid > 0:
|
||||
return apply_chunked(
|
||||
self.renderer,
|
||||
chunk_generator(
|
||||
self.chunk_size_grid,
|
||||
ray_bundle,
|
||||
inputs_to_be_chunked,
|
||||
self.tqdm_trigger_threshold,
|
||||
**kwargs,
|
||||
),
|
||||
lambda batch: torch.cat(batch, dim=1).reshape(
|
||||
*ray_bundle.lengths.shape[:-1], -1
|
||||
),
|
||||
)
|
||||
else:
|
||||
# pyre-fixme[29]: `BaseRenderer` is not a function.
|
||||
return self.renderer(
|
||||
ray_bundle=ray_bundle,
|
||||
**inputs_to_be_chunked,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def raysampler_tweak_args(cls, type, args: DictConfig) -> None:
|
||||
"""
|
||||
We don't expose certain fields of the raysampler because we want to set
|
||||
them from our own members.
|
||||
"""
|
||||
del args["sampling_mode_training"]
|
||||
del args["sampling_mode_evaluation"]
|
||||
del args["image_width"]
|
||||
del args["image_height"]
|
||||
|
||||
def create_raysampler(self):
|
||||
extra_args = {
|
||||
"sampling_mode_training": self.sampling_mode_training,
|
||||
"sampling_mode_evaluation": self.sampling_mode_evaluation,
|
||||
"image_width": self.render_image_width,
|
||||
"image_height": self.render_image_height,
|
||||
}
|
||||
raysampler_args = getattr(
|
||||
self, "raysampler_" + self.raysampler_class_type + "_args"
|
||||
)
|
||||
self.raysampler = registry.get(RaySamplerBase, self.raysampler_class_type)(
|
||||
**raysampler_args, **extra_args
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def renderer_tweak_args(cls, type, args: DictConfig) -> None:
|
||||
"""
|
||||
We don't expose certain fields of the renderer because we want to set
|
||||
them based on other inputs.
|
||||
"""
|
||||
args.pop("render_features_dimensions", None)
|
||||
args.pop("object_bounding_sphere", None)
|
||||
|
||||
def create_renderer(self):
|
||||
extra_args = {}
|
||||
|
||||
if self.renderer_class_type == "SignedDistanceFunctionRenderer":
|
||||
extra_args["render_features_dimensions"] = self.render_features_dimensions
|
||||
if not hasattr(self.raysampler, "scene_extent"):
|
||||
raise ValueError(
|
||||
"SignedDistanceFunctionRenderer requires"
|
||||
+ " a raysampler that defines the 'scene_extent' field"
|
||||
+ " (this field is supported by, e.g., the adaptive raysampler - "
|
||||
+ " self.raysampler_class_type='AdaptiveRaySampler')."
|
||||
)
|
||||
extra_args["object_bounding_sphere"] = self.raysampler.scene_extent
|
||||
|
||||
renderer_args = getattr(self, "renderer_" + self.renderer_class_type + "_args")
|
||||
self.renderer = registry.get(BaseRenderer, self.renderer_class_type)(
|
||||
**renderer_args, **extra_args
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def implicit_function_tweak_args(cls, type, args: DictConfig) -> None:
|
||||
"""
|
||||
We don't expose certain implicit_function fields because we want to set
|
||||
them based on other inputs.
|
||||
"""
|
||||
for arg in IMPLICIT_FUNCTION_ARGS_TO_REMOVE:
|
||||
args.pop(arg, None)
|
||||
|
||||
@classmethod
|
||||
def coarse_implicit_function_tweak_args(cls, type, args: DictConfig) -> None:
|
||||
"""
|
||||
We don't expose certain implicit_function fields because we want to set
|
||||
them based on other inputs.
|
||||
"""
|
||||
for arg in IMPLICIT_FUNCTION_ARGS_TO_REMOVE:
|
||||
args.pop(arg, None)
|
||||
|
||||
def _create_extra_args_for_implicit_function(self) -> Dict[str, Any]:
|
||||
extra_args = {}
|
||||
global_encoder_dim = (
|
||||
0 if self.global_encoder is None else self.global_encoder.get_encoding_dim()
|
||||
)
|
||||
if self.implicit_function_class_type in (
|
||||
"NeuralRadianceFieldImplicitFunction",
|
||||
"NeRFormerImplicitFunction",
|
||||
):
|
||||
extra_args["latent_dim"] = global_encoder_dim
|
||||
extra_args["color_dim"] = self.render_features_dimensions
|
||||
|
||||
if self.implicit_function_class_type == "IdrFeatureField":
|
||||
extra_args["feature_work_size"] = global_encoder_dim
|
||||
extra_args["feature_vector_size"] = self.render_features_dimensions
|
||||
|
||||
if self.implicit_function_class_type == "SRNImplicitFunction":
|
||||
extra_args["latent_dim"] = global_encoder_dim
|
||||
return extra_args
|
||||
|
||||
def create_implicit_function(self) -> None:
|
||||
implicit_function_type = registry.get(
|
||||
ImplicitFunctionBase, self.implicit_function_class_type
|
||||
)
|
||||
expand_args_fields(implicit_function_type)
|
||||
|
||||
config_name = f"implicit_function_{self.implicit_function_class_type}_args"
|
||||
config = getattr(self, config_name, None)
|
||||
if config is None:
|
||||
raise ValueError(f"{config_name} not present")
|
||||
|
||||
extra_args = self._create_extra_args_for_implicit_function()
|
||||
self.implicit_function = implicit_function_type(**config, **extra_args)
|
||||
|
||||
def create_coarse_implicit_function(self) -> None:
|
||||
# If coarse_implicit_function_class_type has been defined
|
||||
# then we init a module based on its arguments
|
||||
if (
|
||||
self.coarse_implicit_function_class_type is not None
|
||||
and not self.share_implicit_function_across_passes
|
||||
):
|
||||
config_name = "coarse_implicit_function_{0}_args".format(
|
||||
self.coarse_implicit_function_class_type
|
||||
)
|
||||
config = getattr(self, config_name, {})
|
||||
|
||||
implicit_function_type = registry.get(
|
||||
ImplicitFunctionBase,
|
||||
# pyre-ignore: config is None allow to check if this is None.
|
||||
self.coarse_implicit_function_class_type,
|
||||
)
|
||||
expand_args_fields(implicit_function_type)
|
||||
|
||||
extra_args = self._create_extra_args_for_implicit_function()
|
||||
self.coarse_implicit_function = implicit_function_type(
|
||||
**config, **extra_args
|
||||
)
|
||||
elif self.share_implicit_function_across_passes:
|
||||
# Since coarse_implicit_function is initialised before
|
||||
# implicit_function we handle this case in the post_init.
|
||||
pass
|
||||
else:
|
||||
self.coarse_implicit_function = None
|
||||
@@ -141,6 +141,9 @@ class BaseRenderer(ABC, ReplaceableBase):
|
||||
Base class for all Renderer implementations.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
def requires_object_mask(self) -> bool:
|
||||
"""
|
||||
Whether `forward` needs the object_mask.
|
||||
|
||||
@@ -57,6 +57,7 @@ class LSTMRenderer(BaseRenderer, torch.nn.Module):
|
||||
verbose: bool = False
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
self._lstm = torch.nn.LSTMCell(
|
||||
input_size=self.n_feature_channels,
|
||||
hidden_size=self.hidden_size,
|
||||
|
||||
@@ -90,6 +90,7 @@ class MultiPassEmissionAbsorptionRenderer( # pyre-ignore: 13
|
||||
return_weights: bool = False
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
self._refiners = {
|
||||
EvaluationMode.TRAINING: RayPointRefiner(
|
||||
n_pts_per_ray=self.n_pts_per_ray_fine_training,
|
||||
|
||||
@@ -38,6 +38,9 @@ class RayPointRefiner(Configurable, torch.nn.Module):
|
||||
random_sampling: bool
|
||||
add_input_samples: bool = True
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
def forward(
|
||||
self,
|
||||
input_ray_bundle: ImplicitronRayBundle,
|
||||
|
||||
@@ -20,6 +20,9 @@ class RaySamplerBase(ReplaceableBase):
|
||||
Base class for ray samplers.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def forward(
|
||||
self,
|
||||
cameras: CamerasBase,
|
||||
@@ -99,6 +102,8 @@ class AbstractMaskRaySampler(RaySamplerBase, torch.nn.Module):
|
||||
stratified_point_sampling_evaluation: bool = False
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
|
||||
if (self.n_rays_per_image_sampled_from_mask is not None) and (
|
||||
self.n_rays_total_training is not None
|
||||
):
|
||||
|
||||
@@ -43,6 +43,9 @@ class RayTracing(Configurable, nn.Module):
|
||||
n_steps: int = 100
|
||||
n_secant_steps: int = 8
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
|
||||
def forward(
|
||||
self,
|
||||
sdf: Callable[[torch.Tensor], torch.Tensor],
|
||||
|
||||
@@ -22,6 +22,9 @@ class RaymarcherBase(ReplaceableBase):
|
||||
and marching along them in order to generate a feature render.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def forward(
|
||||
self,
|
||||
rays_densities: torch.Tensor,
|
||||
@@ -95,6 +98,8 @@ class AccumulativeRaymarcherBase(RaymarcherBase, torch.nn.Module):
|
||||
surface_thickness: Denotes the overlap between the absorption
|
||||
function and the density function.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
bg_color = torch.tensor(self.bg_color)
|
||||
if bg_color.ndim != 1:
|
||||
raise ValueError(f"bg_color (shape {bg_color.shape}) should be a 1D tensor")
|
||||
|
||||
@@ -35,6 +35,7 @@ class SignedDistanceFunctionRenderer(BaseRenderer, torch.nn.Module): # pyre-ign
|
||||
def __post_init__(
|
||||
self,
|
||||
):
|
||||
super().__init__()
|
||||
render_features_dimensions = self.render_features_dimensions
|
||||
if len(self.bg_color) not in [1, render_features_dimensions]:
|
||||
raise ValueError(
|
||||
|
||||
@@ -1,195 +0,0 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
|
||||
# Note: The #noqa comments below are for unused imports of pluggable implementations
|
||||
# which are part of implicitron. They ensure that the registry is prepopulated.
|
||||
|
||||
import warnings
|
||||
from logging import Logger
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
|
||||
import torch
|
||||
import tqdm
|
||||
from pytorch3d.common.compat import prod
|
||||
|
||||
from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
|
||||
|
||||
from pytorch3d.implicitron.tools import image_utils
|
||||
|
||||
from pytorch3d.implicitron.tools.utils import cat_dataclass
|
||||
|
||||
|
||||
def preprocess_input(
|
||||
image_rgb: Optional[torch.Tensor],
|
||||
fg_probability: Optional[torch.Tensor],
|
||||
depth_map: Optional[torch.Tensor],
|
||||
mask_images: bool,
|
||||
mask_depths: bool,
|
||||
mask_threshold: float,
|
||||
bg_color: Tuple[float, float, float],
|
||||
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
|
||||
"""
|
||||
Helper function to preprocess the input images and optional depth maps
|
||||
to apply masking if required.
|
||||
|
||||
Args:
|
||||
image_rgb: A tensor of shape `(B, 3, H, W)` containing a batch of rgb images
|
||||
corresponding to the source viewpoints from which features will be extracted
|
||||
fg_probability: A tensor of shape `(B, 1, H, W)` containing a batch
|
||||
of foreground masks with values in [0, 1].
|
||||
depth_map: A tensor of shape `(B, 1, H, W)` containing a batch of depth maps.
|
||||
mask_images: Whether or not to mask the RGB image background given the
|
||||
foreground mask (the `fg_probability` argument of `GenericModel.forward`)
|
||||
mask_depths: Whether or not to mask the depth image background given the
|
||||
foreground mask (the `fg_probability` argument of `GenericModel.forward`)
|
||||
mask_threshold: If greater than 0.0, the foreground mask is
|
||||
thresholded by this value before being applied to the RGB/Depth images
|
||||
bg_color: RGB values for setting the background color of input image
|
||||
if mask_images=True. Defaults to (0.0, 0.0, 0.0). Each renderer has its own
|
||||
way to determine the background color of its output, unrelated to this.
|
||||
|
||||
Returns:
|
||||
Modified image_rgb, fg_mask, depth_map
|
||||
"""
|
||||
if image_rgb is not None and image_rgb.ndim == 3:
|
||||
# The FrameData object is used for both frames and batches of frames,
|
||||
# and a user might get this error if those were confused.
|
||||
# Perhaps a user has a FrameData `fd` representing a single frame and
|
||||
# wrote something like `model(**fd)` instead of
|
||||
# `model(**fd.collate([fd]))`.
|
||||
raise ValueError(
|
||||
"Model received unbatched inputs. "
|
||||
+ "Perhaps they came from a FrameData which had not been collated."
|
||||
)
|
||||
|
||||
fg_mask = fg_probability
|
||||
if fg_mask is not None and mask_threshold > 0.0:
|
||||
# threshold masks
|
||||
warnings.warn("Thresholding masks!")
|
||||
fg_mask = (fg_mask >= mask_threshold).type_as(fg_mask)
|
||||
|
||||
if mask_images and fg_mask is not None and image_rgb is not None:
|
||||
# mask the image
|
||||
warnings.warn("Masking images!")
|
||||
image_rgb = image_utils.mask_background(
|
||||
image_rgb, fg_mask, dim_color=1, bg_color=torch.tensor(bg_color)
|
||||
)
|
||||
|
||||
if mask_depths and fg_mask is not None and depth_map is not None:
|
||||
# mask the depths
|
||||
assert (
|
||||
mask_threshold > 0.0
|
||||
), "Depths should be masked only with thresholded masks"
|
||||
warnings.warn("Masking depths!")
|
||||
depth_map = depth_map * fg_mask
|
||||
|
||||
return image_rgb, fg_mask, depth_map
|
||||
|
||||
|
||||
def log_loss_weights(loss_weights: Dict[str, float], logger: Logger) -> None:
|
||||
"""
|
||||
Print a table of the loss weights.
|
||||
"""
|
||||
loss_weights_message = (
|
||||
"-------\nloss_weights:\n"
|
||||
+ "\n".join(f"{k:40s}: {w:1.2e}" for k, w in loss_weights.items())
|
||||
+ "-------"
|
||||
)
|
||||
logger.info(loss_weights_message)
|
||||
|
||||
|
||||
def weighted_sum_losses(
|
||||
preds: Dict[str, torch.Tensor], loss_weights: Dict[str, float]
|
||||
) -> Optional[torch.Tensor]:
|
||||
"""
|
||||
A helper function to compute the overall loss as the dot product
|
||||
of individual loss functions with the corresponding weights.
|
||||
"""
|
||||
losses_weighted = [
|
||||
preds[k] * float(w)
|
||||
for k, w in loss_weights.items()
|
||||
if (k in preds and w != 0.0)
|
||||
]
|
||||
if len(losses_weighted) == 0:
|
||||
warnings.warn("No main objective found.")
|
||||
return None
|
||||
loss = sum(losses_weighted)
|
||||
assert torch.is_tensor(loss)
|
||||
# pyre-fixme[7]: Expected `Optional[Tensor]` but got `int`.
|
||||
return loss
|
||||
|
||||
|
||||
def apply_chunked(func, chunk_generator, tensor_collator):
|
||||
"""
|
||||
Helper function to apply a function on a sequence of
|
||||
chunked inputs yielded by a generator and collate
|
||||
the result.
|
||||
"""
|
||||
processed_chunks = [
|
||||
func(*chunk_args, **chunk_kwargs)
|
||||
for chunk_args, chunk_kwargs in chunk_generator
|
||||
]
|
||||
|
||||
return cat_dataclass(processed_chunks, tensor_collator)
|
||||
|
||||
|
||||
def chunk_generator(
|
||||
chunk_size: int,
|
||||
ray_bundle: ImplicitronRayBundle,
|
||||
chunked_inputs: Dict[str, torch.Tensor],
|
||||
tqdm_trigger_threshold: int,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Helper function which yields chunks of rays from the
|
||||
input ray_bundle, to be used when the number of rays is
|
||||
large and will not fit in memory for rendering.
|
||||
"""
|
||||
(
|
||||
batch_size,
|
||||
*spatial_dim,
|
||||
n_pts_per_ray,
|
||||
) = ray_bundle.lengths.shape # B x ... x n_pts_per_ray
|
||||
if n_pts_per_ray > 0 and chunk_size % n_pts_per_ray != 0:
|
||||
raise ValueError(
|
||||
f"chunk_size_grid ({chunk_size}) should be divisible "
|
||||
f"by n_pts_per_ray ({n_pts_per_ray})"
|
||||
)
|
||||
|
||||
n_rays = prod(spatial_dim)
|
||||
# special handling for raytracing-based methods
|
||||
n_chunks = -(-n_rays * max(n_pts_per_ray, 1) // chunk_size)
|
||||
chunk_size_in_rays = -(-n_rays // n_chunks)
|
||||
|
||||
iter = range(0, n_rays, chunk_size_in_rays)
|
||||
if len(iter) >= tqdm_trigger_threshold:
|
||||
iter = tqdm.tqdm(iter)
|
||||
|
||||
def _safe_slice(
|
||||
tensor: Optional[torch.Tensor], start_idx: int, end_idx: int
|
||||
) -> Any:
|
||||
return tensor[start_idx:end_idx] if tensor is not None else None
|
||||
|
||||
for start_idx in iter:
|
||||
end_idx = min(start_idx + chunk_size_in_rays, n_rays)
|
||||
ray_bundle_chunk = ImplicitronRayBundle(
|
||||
origins=ray_bundle.origins.reshape(batch_size, -1, 3)[:, start_idx:end_idx],
|
||||
directions=ray_bundle.directions.reshape(batch_size, -1, 3)[
|
||||
:, start_idx:end_idx
|
||||
],
|
||||
lengths=ray_bundle.lengths.reshape(batch_size, n_rays, n_pts_per_ray)[
|
||||
:, start_idx:end_idx
|
||||
],
|
||||
xys=ray_bundle.xys.reshape(batch_size, -1, 2)[:, start_idx:end_idx],
|
||||
camera_ids=_safe_slice(ray_bundle.camera_ids, start_idx, end_idx),
|
||||
camera_counts=_safe_slice(ray_bundle.camera_counts, start_idx, end_idx),
|
||||
)
|
||||
extra_args = kwargs.copy()
|
||||
for k, v in chunked_inputs.items():
|
||||
extra_args[k] = v.flatten(2)[:, :, start_idx:end_idx]
|
||||
yield [ray_bundle_chunk, *args], extra_args
|
||||
@@ -118,6 +118,9 @@ class IdentityFeatureAggregator(torch.nn.Module, FeatureAggregatorBase):
|
||||
the outputs.
|
||||
"""
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
|
||||
def get_aggregated_feature_dim(
|
||||
self, feats_or_feats_dim: Union[Dict[str, torch.Tensor], int]
|
||||
):
|
||||
@@ -178,6 +181,9 @@ class ReductionFeatureAggregator(torch.nn.Module, FeatureAggregatorBase):
|
||||
ReductionFunction.STD,
|
||||
)
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
|
||||
def get_aggregated_feature_dim(
|
||||
self, feats_or_feats_dim: Union[Dict[str, torch.Tensor], int]
|
||||
):
|
||||
@@ -269,6 +275,9 @@ class AngleWeightedReductionFeatureAggregator(torch.nn.Module, FeatureAggregator
|
||||
weight_by_ray_angle_gamma: float = 1.0
|
||||
min_ray_angle_weight: float = 0.1
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
|
||||
def get_aggregated_feature_dim(
|
||||
self, feats_or_feats_dim: Union[Dict[str, torch.Tensor], int]
|
||||
):
|
||||
@@ -368,6 +377,9 @@ class AngleWeightedIdentityFeatureAggregator(torch.nn.Module, FeatureAggregatorB
|
||||
weight_by_ray_angle_gamma: float = 1.0
|
||||
min_ray_angle_weight: float = 0.1
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
|
||||
def get_aggregated_feature_dim(
|
||||
self, feats_or_feats_dim: Union[Dict[str, torch.Tensor], int]
|
||||
):
|
||||
|
||||
@@ -38,6 +38,7 @@ class ViewPooler(Configurable, torch.nn.Module):
|
||||
feature_aggregator: FeatureAggregatorBase
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
run_auto_creation(self)
|
||||
|
||||
def get_aggregated_feature_dim(self, feats: Union[Dict[str, torch.Tensor], int]):
|
||||
|
||||
@@ -29,6 +29,9 @@ class ViewSampler(Configurable, torch.nn.Module):
|
||||
masked_sampling: bool = False
|
||||
sampling_mode: str = "bilinear"
|
||||
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
|
||||
def forward(
|
||||
self,
|
||||
*, # force kw args
|
||||
|
||||
@@ -12,21 +12,10 @@ import warnings
|
||||
from collections import Counter, defaultdict
|
||||
from enum import Enum
|
||||
from functools import partial
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
get_args,
|
||||
get_origin,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union
|
||||
|
||||
from omegaconf import DictConfig, OmegaConf, open_dict
|
||||
from pytorch3d.common.datatypes import get_args, get_origin
|
||||
|
||||
|
||||
"""
|
||||
@@ -184,8 +173,6 @@ ENABLED_SUFFIX: str = "_enabled"
|
||||
CREATE_PREFIX: str = "create_"
|
||||
IMPL_SUFFIX: str = "_impl"
|
||||
TWEAK_SUFFIX: str = "_tweak_args"
|
||||
_DATACLASS_INIT: str = "__dataclass_own_init__"
|
||||
PRE_EXPAND_NAME: str = "pre_expand"
|
||||
|
||||
|
||||
class ReplaceableBase:
|
||||
@@ -836,12 +823,6 @@ def expand_args_fields(
|
||||
then the default_factory of x_args will also have a call to x_tweak_args(X, x_args) and
|
||||
the default_factory of x_Y_args will also have a call to x_tweak_args(Y, x_Y_args).
|
||||
|
||||
In addition, if the class inherits torch.nn.Module, the generated __init__ will
|
||||
call torch.nn.Module's __init__ before doing anything else.
|
||||
|
||||
Before any transformation of the class, if the class has a classmethod called
|
||||
`pre_expand`, it will be called with no arguments.
|
||||
|
||||
Note that although the *_args members are intended to have type DictConfig, they
|
||||
are actually internally annotated as dicts. OmegaConf is happy to see a DictConfig
|
||||
in place of a dict, but not vice-versa. Allowing dict lets a class user specify
|
||||
@@ -862,9 +843,6 @@ def expand_args_fields(
|
||||
if _is_actually_dataclass(some_class):
|
||||
return some_class
|
||||
|
||||
if hasattr(some_class, PRE_EXPAND_NAME):
|
||||
getattr(some_class, PRE_EXPAND_NAME)()
|
||||
|
||||
# The functions this class's run_auto_creation will run.
|
||||
creation_functions: List[str] = []
|
||||
# The classes which this type knows about from the registry
|
||||
@@ -923,40 +901,9 @@ def expand_args_fields(
|
||||
some_class._known_implementations = known_implementations
|
||||
|
||||
dataclasses.dataclass(eq=False)(some_class)
|
||||
_fixup_class_init(some_class)
|
||||
return some_class
|
||||
|
||||
|
||||
def _fixup_class_init(some_class) -> None:
|
||||
"""
|
||||
In-place modification of the some_class class which happens
|
||||
after dataclass processing.
|
||||
|
||||
If the dataclass some_class inherits torch.nn.Module, then
|
||||
makes torch.nn.Module's __init__ be called before anything else
|
||||
on instantiation of some_class.
|
||||
This is a bit like attr's __pre_init__.
|
||||
"""
|
||||
|
||||
assert _is_actually_dataclass(some_class)
|
||||
try:
|
||||
import torch
|
||||
except ModuleNotFoundError:
|
||||
return
|
||||
|
||||
if not issubclass(some_class, torch.nn.Module):
|
||||
return
|
||||
|
||||
def init(self, *args, **kwargs) -> None:
|
||||
torch.nn.Module.__init__(self)
|
||||
getattr(self, _DATACLASS_INIT)(*args, **kwargs)
|
||||
|
||||
assert _DATACLASS_INIT not in some_class.__dict__
|
||||
|
||||
setattr(some_class, _DATACLASS_INIT, some_class.__init__)
|
||||
some_class.__init__ = init
|
||||
|
||||
|
||||
def get_default_args_field(
|
||||
C,
|
||||
*,
|
||||
|
||||
@@ -147,7 +147,6 @@ def find_last_checkpoint(
|
||||
# pyre-fixme[61]: `fls` is undefined, or not always defined.
|
||||
fl = [f[0 : -len(ext)] + ".pth" for f in fls]
|
||||
else:
|
||||
# pyre-fixme[61]: `ext` is undefined, or not always defined.
|
||||
fl = fls[-1][0 : -len(ext)] + ".pth"
|
||||
|
||||
return fl
|
||||
|
||||
@@ -27,33 +27,13 @@ def get_rgbd_point_cloud(
|
||||
mask: Optional[torch.Tensor] = None,
|
||||
mask_thr: float = 0.5,
|
||||
mask_points: bool = True,
|
||||
euclidean: bool = False,
|
||||
) -> Pointclouds:
|
||||
"""
|
||||
Given a batch of images, depths, masks and cameras, generate a single colored
|
||||
point cloud by unprojecting depth maps and coloring with the source
|
||||
Given a batch of images, depths, masks and cameras, generate a colored
|
||||
point cloud by unprojecting depth maps to the and coloring with the source
|
||||
pixel colors.
|
||||
|
||||
Arguments:
|
||||
camera: Batch of N cameras
|
||||
image_rgb: Batch of N images of shape (N, C, H, W).
|
||||
For RGB images C=3.
|
||||
depth_map: Batch of N depth maps of shape (N, 1, H', W').
|
||||
Only positive values here are used to generate points.
|
||||
If euclidean=False (default) this contains perpendicular distances
|
||||
from each point to the camera plane (z-values).
|
||||
If euclidean=True, this contains distances from each point to
|
||||
the camera center.
|
||||
mask: If provided, batch of N masks of the same shape as depth_map.
|
||||
If provided, values in depth_map are ignored if the corresponding
|
||||
element of mask is smaller than mask_thr.
|
||||
mask_thr: used in interpreting mask
|
||||
euclidean: used in interpreting depth_map.
|
||||
|
||||
Returns:
|
||||
Pointclouds object containing one point cloud.
|
||||
"""
|
||||
imh, imw = depth_map.shape[2:]
|
||||
imh, imw = image_rgb.shape[2:]
|
||||
|
||||
# convert the depth maps to point clouds using the grid ray sampler
|
||||
pts_3d = ray_bundle_to_ray_points(
|
||||
@@ -63,7 +43,6 @@ def get_rgbd_point_cloud(
|
||||
n_pts_per_ray=1,
|
||||
min_depth=1.0,
|
||||
max_depth=1.0,
|
||||
unit_directions=euclidean,
|
||||
)(camera)._replace(lengths=depth_map[:, 0, ..., None])
|
||||
)
|
||||
|
||||
|
||||
@@ -7,9 +7,8 @@
|
||||
import math
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import pytorch3d
|
||||
|
||||
import torch
|
||||
from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle
|
||||
from pytorch3d.ops import packed_to_padded
|
||||
from pytorch3d.renderer import PerspectiveCameras
|
||||
from pytorch3d.structures import Pointclouds
|
||||
@@ -19,7 +18,7 @@ from .point_cloud_utils import render_point_cloud_pytorch3d
|
||||
|
||||
@torch.no_grad()
|
||||
def rasterize_sparse_ray_bundle(
|
||||
ray_bundle: "pytorch3d.implicitron.models.renderer.base.ImplicitronRayBundle",
|
||||
ray_bundle: ImplicitronRayBundle,
|
||||
features: torch.Tensor,
|
||||
image_size_hw: Tuple[int, int],
|
||||
depth: torch.Tensor,
|
||||
|
||||
@@ -86,8 +86,6 @@ class VideoWriter:
|
||||
or a 2-tuple defining the size of the output image.
|
||||
"""
|
||||
|
||||
# pyre-fixme[6]: For 1st argument expected `Union[PathLike[str], str]` but
|
||||
# got `Optional[str]`.
|
||||
outfile = os.path.join(self.cache_dir, self.regexp % self.frame_num)
|
||||
|
||||
if isinstance(frame, matplotlib.figure.Figure):
|
||||
@@ -126,14 +124,9 @@ class VideoWriter:
|
||||
quiet: If `True`, suppresses logging messages.
|
||||
|
||||
Returns:
|
||||
video_path: The path to the generated video if any frames were added.
|
||||
Otherwise returns an empty string.
|
||||
video_path: The path to the generated video.
|
||||
"""
|
||||
if self.frame_num == 0:
|
||||
return ""
|
||||
|
||||
# pyre-fixme[6]: For 1st argument expected `Union[PathLike[str], str]` but
|
||||
# got `Optional[str]`.
|
||||
regexp = os.path.join(self.cache_dir, self.regexp)
|
||||
|
||||
if shutil.which(self.ffmpeg_bin) is None:
|
||||
|
||||
@@ -128,8 +128,8 @@ def visualize_basics(
|
||||
# TODO: handle errors on the outside
|
||||
try:
|
||||
imout = {"all": torch.cat(list(imout.values()), dim=2)}
|
||||
except RuntimeError as e:
|
||||
print("cant cat!", e.args)
|
||||
except:
|
||||
print("cant cat!")
|
||||
|
||||
for k, v in imout.items():
|
||||
viz.images(
|
||||
|
||||
@@ -215,8 +215,6 @@ def load_obj(
|
||||
"""
|
||||
data_dir = "./"
|
||||
if isinstance(f, (str, bytes, Path)):
|
||||
# pyre-fixme[6]: For 1st argument expected `PathLike[Variable[AnyStr <:
|
||||
# [str, bytes]]]` but got `Union[Path, bytes, str]`.
|
||||
data_dir = os.path.dirname(f)
|
||||
if path_manager is None:
|
||||
path_manager = PathManager()
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
import contextlib
|
||||
import pathlib
|
||||
import warnings
|
||||
from typing import cast, ContextManager, IO, Optional, Union
|
||||
from typing import ContextManager, IO, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
@@ -17,6 +17,14 @@ from PIL import Image
|
||||
from ..common.datatypes import Device
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def nullcontext(x):
|
||||
"""
|
||||
This is just like contextlib.nullcontext but also works in Python 3.6.
|
||||
"""
|
||||
yield x
|
||||
|
||||
|
||||
PathOrStr = Union[pathlib.Path, str]
|
||||
|
||||
|
||||
@@ -28,7 +36,7 @@ def _open_file(f, path_manager: PathManager, mode: str = "r") -> ContextManager[
|
||||
f = f.open(mode)
|
||||
return contextlib.closing(f)
|
||||
else:
|
||||
return contextlib.nullcontext(cast(IO, f))
|
||||
return nullcontext(f)
|
||||
|
||||
|
||||
def _make_tensor(
|
||||
|
||||
@@ -124,7 +124,6 @@ def mesh_laplacian_smoothing(meshes, method: str = "uniform"):
|
||||
if method == "uniform":
|
||||
loss = L.mm(verts_packed)
|
||||
elif method == "cot":
|
||||
# pyre-fixme[61]: `norm_w` is undefined, or not always defined.
|
||||
loss = L.mm(verts_packed) * norm_w - verts_packed
|
||||
elif method == "cotcurv":
|
||||
# pyre-fixme[61]: `norm_w` may not be initialized here.
|
||||
|
||||
@@ -432,7 +432,7 @@ class CamerasBase(TensorProperties):
|
||||
f"Boolean index of shape {index.shape} does not match cameras"
|
||||
)
|
||||
elif max(index) >= len(self):
|
||||
raise IndexError(f"Index {max(index)} is out of bounds for select cameras")
|
||||
raise ValueError(f"Index {max(index)} is out of bounds for select cameras")
|
||||
|
||||
for field in self._FIELDS:
|
||||
val = getattr(self, field, None)
|
||||
|
||||
@@ -198,6 +198,9 @@ class MeshRasterizer(nn.Module):
|
||||
verts_view = cameras.get_world_to_view_transform(**kwargs).transform_points(
|
||||
verts_world, eps=eps
|
||||
)
|
||||
# Call transform_points instead of explicitly composing transforms to handle
|
||||
# the case, where camera class does not have a projection matrix form.
|
||||
verts_proj = cameras.transform_points(verts_world, eps=eps)
|
||||
to_ndc_transform = cameras.get_ndc_camera_transform(**kwargs)
|
||||
projection_transform = try_get_projection_transform(cameras, kwargs)
|
||||
if projection_transform is not None:
|
||||
|
||||
@@ -5,11 +5,7 @@
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
from .meshes import join_meshes_as_batch, join_meshes_as_scene, Meshes
|
||||
from .pointclouds import (
|
||||
join_pointclouds_as_batch,
|
||||
join_pointclouds_as_scene,
|
||||
Pointclouds,
|
||||
)
|
||||
from .pointclouds import Pointclouds
|
||||
from .utils import list_to_packed, list_to_padded, packed_to_list, padded_to_list
|
||||
from .volumes import Volumes
|
||||
|
||||
|
||||
@@ -124,14 +124,12 @@ class Pointclouds:
|
||||
normals:
|
||||
Can be either
|
||||
|
||||
- None
|
||||
- List where each element is a tensor of shape (num_points, 3)
|
||||
containing the normal vector for each point.
|
||||
- Padded float tensor of shape (num_clouds, num_points, 3).
|
||||
features:
|
||||
Can be either
|
||||
|
||||
- None
|
||||
- List where each element is a tensor of shape (num_points, C)
|
||||
containing the features for the points in the cloud.
|
||||
- Padded float tensor of shape (num_clouds, num_points, C).
|
||||
@@ -1262,42 +1260,6 @@ def join_pointclouds_as_batch(pointclouds: Sequence[Pointclouds]) -> Pointclouds
|
||||
field_list = None
|
||||
else:
|
||||
field_list = [p for points in field_list for p in points]
|
||||
if field == "features" and any(
|
||||
p.shape[1] != field_list[0].shape[1] for p in field_list[1:]
|
||||
):
|
||||
raise ValueError("Pointclouds must have the same number of features")
|
||||
kwargs[field] = field_list
|
||||
|
||||
return Pointclouds(**kwargs)
|
||||
|
||||
|
||||
def join_pointclouds_as_scene(
|
||||
pointclouds: Union[Pointclouds, List[Pointclouds]]
|
||||
) -> Pointclouds:
|
||||
"""
|
||||
Joins a batch of point cloud in the form of a Pointclouds object or a list of Pointclouds
|
||||
objects as a single point cloud. If the input is a list, the Pointclouds objects in the
|
||||
list must all be on the same device, and they must either all or none have features and
|
||||
all or none have normals.
|
||||
|
||||
Args:
|
||||
Pointclouds: Pointclouds object that contains a batch of point clouds, or a list of
|
||||
Pointclouds objects.
|
||||
|
||||
Returns:
|
||||
new Pointclouds object containing a single point cloud
|
||||
"""
|
||||
if isinstance(pointclouds, list):
|
||||
pointclouds = join_pointclouds_as_batch(pointclouds)
|
||||
|
||||
if len(pointclouds) == 1:
|
||||
return pointclouds
|
||||
points = pointclouds.points_packed()
|
||||
features = pointclouds.features_packed()
|
||||
normals = pointclouds.normals_packed()
|
||||
pointcloud = Pointclouds(
|
||||
points=points[None],
|
||||
features=None if features is None else features[None],
|
||||
normals=None if normals is None else normals[None],
|
||||
)
|
||||
return pointcloud
|
||||
|
||||
@@ -42,11 +42,12 @@ def cameras_from_opencv_projection(
|
||||
followed by the homogenization of `x_screen_opencv`.
|
||||
|
||||
Note:
|
||||
The parameters `R, tvec, camera_matrix` correspond to the inputs of
|
||||
`cv2.projectPoints(x_world, rvec, tvec, camera_matrix, [])`,
|
||||
where `rvec` is an axis-angle vector that can be obtained from
|
||||
the rotation matrix `R` expected here by calling the `so3_log_map` function.
|
||||
Correspondingly, `R` can be obtained from `rvec` by calling `so3_exp_map`.
|
||||
The parameters `R, tvec, camera_matrix` correspond to the outputs of
|
||||
`cv2.decomposeProjectionMatrix`.
|
||||
|
||||
The `rvec` parameter of the `cv2.projectPoints` is an axis-angle vector
|
||||
that can be converted to the rotation matrix `R` expected here by
|
||||
calling the `so3_exp_map` function.
|
||||
|
||||
Args:
|
||||
R: A batch of rotation matrices of shape `(N, 3, 3)`.
|
||||
@@ -72,11 +73,12 @@ def opencv_from_cameras_projection(
|
||||
of `cameras_from_opencv_projection`.
|
||||
|
||||
Note:
|
||||
The outputs `R, tvec, camera_matrix` correspond to the inputs of
|
||||
`cv2.projectPoints(x_world, rvec, tvec, camera_matrix, [])`,
|
||||
where `rvec` is an axis-angle vector that can be obtained from
|
||||
the rotation matrix `R` output here by calling the `so3_log_map` function.
|
||||
Correspondingly, `R` can be obtained from `rvec` by calling `so3_exp_map`.
|
||||
The outputs `R, tvec, camera_matrix` correspond to the outputs of
|
||||
`cv2.decomposeProjectionMatrix`.
|
||||
|
||||
The `rvec` parameter of the `cv2.projectPoints` is an axis-angle vector
|
||||
that can be converted from the returned rotation matrix `R` here by
|
||||
calling the `so3_log_map` function.
|
||||
|
||||
Args:
|
||||
cameras: A batch of `N` cameras in the PyTorch3D convention.
|
||||
|
||||
@@ -3,19 +3,3 @@
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import warnings
|
||||
|
||||
|
||||
try:
|
||||
from .plotly_vis import get_camera_wireframe, plot_batch_individually, plot_scene
|
||||
except ModuleNotFoundError as err:
|
||||
if "plotly" in str(err):
|
||||
warnings.warn(
|
||||
"Cannot import plotly-based visualization code."
|
||||
" Please install plotly to enable (pip install plotly)."
|
||||
)
|
||||
else:
|
||||
raise
|
||||
|
||||
from .texture_vis import texturesuv_image_matplotlib, texturesuv_image_PIL
|
||||
|
||||
@@ -100,7 +100,6 @@ class Lighting(NamedTuple): # pragma: no cover
|
||||
vertexnormalsepsilon: float = 1e-12
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def plot_scene(
|
||||
plots: Dict[str, Dict[str, Struct]],
|
||||
*,
|
||||
@@ -408,7 +407,6 @@ def plot_scene(
|
||||
return fig
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def plot_batch_individually(
|
||||
batched_structs: Union[
|
||||
List[Struct],
|
||||
@@ -890,12 +888,8 @@ def _add_ray_bundle_trace(
|
||||
)
|
||||
|
||||
# make the ray lines for plotly plotting
|
||||
nan_tensor = torch.tensor(
|
||||
[[float("NaN")] * 3],
|
||||
device=ray_lines_endpoints.device,
|
||||
dtype=ray_lines_endpoints.dtype,
|
||||
)
|
||||
ray_lines = torch.empty(size=(1, 3), device=ray_lines_endpoints.device)
|
||||
nan_tensor = torch.Tensor([[float("NaN")] * 3])
|
||||
ray_lines = torch.empty(size=(1, 3))
|
||||
for ray_line in ray_lines_endpoints:
|
||||
# We combine the ray lines into a single tensor to plot them in a
|
||||
# single trace. The NaNs are inserted between sets of ray lines
|
||||
@@ -958,7 +952,7 @@ def _add_ray_bundle_trace(
|
||||
current_layout = fig["layout"][plot_scene]
|
||||
|
||||
# update the bounds of the axes for the current trace
|
||||
all_ray_points = ray_bundle_to_ray_points(ray_bundle).reshape(-1, 3)
|
||||
all_ray_points = ray_bundle_to_ray_points(ray_bundle).view(-1, 3)
|
||||
ray_points_center = all_ray_points.mean(dim=0)
|
||||
max_expand = (all_ray_points.max(0)[0] - all_ray_points.min(0)[0]).max().item()
|
||||
_update_axes_bounds(ray_points_center, float(max_expand), current_layout)
|
||||
@@ -1008,7 +1002,6 @@ def _update_axes_bounds(
|
||||
max_expand: the maximum spread in any dimension of the trace's vertices.
|
||||
current_layout: the plotly figure layout scene corresponding to the referenced trace.
|
||||
"""
|
||||
verts_center = verts_center.detach().cpu()
|
||||
verts_min = verts_center - max_expand
|
||||
verts_max = verts_center + max_expand
|
||||
bounds = torch.t(torch.stack((verts_min, verts_max)))
|
||||
|
||||
@@ -90,10 +90,8 @@ dataset_map_provider_RenderedMeshDatasetMapProvider_args:
|
||||
num_views: 40
|
||||
data_file: null
|
||||
azimuth_range: 180.0
|
||||
distance: 2.7
|
||||
resolution: 128
|
||||
use_point_light: true
|
||||
gpu_idx: 0
|
||||
path_manager_factory_class_type: PathManagerFactory
|
||||
path_manager_factory_PathManagerFactory_args:
|
||||
silence_logs: true
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
@@ -1,217 +0,0 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import unittest
|
||||
from typing import Any, Dict
|
||||
from unittest.mock import patch
|
||||
|
||||
import torch
|
||||
from pytorch3d.implicitron.models.generic_model import GenericModel
|
||||
from pytorch3d.implicitron.models.overfit_model import OverfitModel
|
||||
from pytorch3d.implicitron.models.renderer.base import EvaluationMode
|
||||
from pytorch3d.implicitron.tools.config import expand_args_fields
|
||||
from pytorch3d.renderer.cameras import look_at_view_transform, PerspectiveCameras
|
||||
|
||||
DEVICE = torch.device("cuda:0")
|
||||
|
||||
|
||||
def _generate_fake_inputs(N: int, H: int, W: int) -> Dict[str, Any]:
|
||||
R, T = look_at_view_transform(azim=torch.rand(N) * 360)
|
||||
return {
|
||||
"camera": PerspectiveCameras(R=R, T=T, device=DEVICE),
|
||||
"fg_probability": torch.randint(
|
||||
high=2, size=(N, 1, H, W), device=DEVICE
|
||||
).float(),
|
||||
"depth_map": torch.rand((N, 1, H, W), device=DEVICE) + 0.1,
|
||||
"mask_crop": torch.randint(high=2, size=(N, 1, H, W), device=DEVICE).float(),
|
||||
"sequence_name": ["sequence"] * N,
|
||||
"image_rgb": torch.rand((N, 1, H, W), device=DEVICE),
|
||||
}
|
||||
|
||||
|
||||
def mock_safe_multinomial(input: torch.Tensor, num_samples: int) -> torch.Tensor:
|
||||
"""Return non deterministic indexes to mock safe_multinomial
|
||||
|
||||
Args:
|
||||
input: tensor of shape [B, n] containing non-negative values;
|
||||
rows are interpreted as unnormalized event probabilities
|
||||
in categorical distributions.
|
||||
num_samples: number of samples to take.
|
||||
|
||||
Returns:
|
||||
Tensor of shape [B, num_samples]
|
||||
"""
|
||||
batch_size = input.shape[0]
|
||||
return torch.arange(num_samples).repeat(batch_size, 1).to(DEVICE)
|
||||
|
||||
|
||||
class TestOverfitModel(unittest.TestCase):
|
||||
def setUp(self):
|
||||
torch.manual_seed(42)
|
||||
|
||||
def test_overfit_model_vs_generic_model_with_batch_size_one(self):
|
||||
"""In this test we compare OverfitModel to GenericModel behavior.
|
||||
|
||||
We use a Nerf setup (2 rendering passes).
|
||||
|
||||
OverfitModel is a specific case of GenericModel. Hence, with the same inputs,
|
||||
they should provide the exact same results.
|
||||
"""
|
||||
expand_args_fields(OverfitModel)
|
||||
expand_args_fields(GenericModel)
|
||||
batch_size, image_height, image_width = 1, 80, 80
|
||||
assert batch_size == 1
|
||||
overfit_model = OverfitModel(
|
||||
render_image_height=image_height,
|
||||
render_image_width=image_width,
|
||||
coarse_implicit_function_class_type="NeuralRadianceFieldImplicitFunction",
|
||||
# To avoid randomization to compare the outputs of our model
|
||||
# we deactivate the stratified_point_sampling_training
|
||||
raysampler_AdaptiveRaySampler_args={
|
||||
"stratified_point_sampling_training": False
|
||||
},
|
||||
global_encoder_class_type="SequenceAutodecoder",
|
||||
global_encoder_SequenceAutodecoder_args={
|
||||
"autodecoder_args": {
|
||||
"n_instances": 1000,
|
||||
"init_scale": 1.0,
|
||||
"encoding_dim": 64,
|
||||
}
|
||||
},
|
||||
)
|
||||
generic_model = GenericModel(
|
||||
render_image_height=image_height,
|
||||
render_image_width=image_width,
|
||||
n_train_target_views=batch_size,
|
||||
num_passes=2,
|
||||
# To avoid randomization to compare the outputs of our model
|
||||
# we deactivate the stratified_point_sampling_training
|
||||
raysampler_AdaptiveRaySampler_args={
|
||||
"stratified_point_sampling_training": False
|
||||
},
|
||||
global_encoder_class_type="SequenceAutodecoder",
|
||||
global_encoder_SequenceAutodecoder_args={
|
||||
"autodecoder_args": {
|
||||
"n_instances": 1000,
|
||||
"init_scale": 1.0,
|
||||
"encoding_dim": 64,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
# Check if they do share the number of parameters
|
||||
num_params_mvm = sum(p.numel() for p in overfit_model.parameters())
|
||||
num_params_gm = sum(p.numel() for p in generic_model.parameters())
|
||||
self.assertEqual(num_params_mvm, num_params_gm)
|
||||
|
||||
# Adapt the mapping from generic model to overfit model
|
||||
mapping_om_from_gm = {
|
||||
key.replace("_implicit_functions.0._fn", "implicit_function").replace(
|
||||
"_implicit_functions.1._fn", "coarse_implicit_function"
|
||||
): val
|
||||
for key, val in generic_model.state_dict().items()
|
||||
}
|
||||
# Copy parameters from generic_model to overfit_model
|
||||
overfit_model.load_state_dict(mapping_om_from_gm)
|
||||
|
||||
overfit_model.to(DEVICE)
|
||||
generic_model.to(DEVICE)
|
||||
inputs_ = _generate_fake_inputs(batch_size, image_height, image_width)
|
||||
|
||||
# training forward pass
|
||||
overfit_model.train()
|
||||
generic_model.train()
|
||||
|
||||
with patch(
|
||||
"pytorch3d.renderer.implicit.raysampling._safe_multinomial",
|
||||
side_effect=mock_safe_multinomial,
|
||||
):
|
||||
train_preds_om = overfit_model(
|
||||
**inputs_,
|
||||
evaluation_mode=EvaluationMode.TRAINING,
|
||||
)
|
||||
train_preds_gm = generic_model(
|
||||
**inputs_,
|
||||
evaluation_mode=EvaluationMode.TRAINING,
|
||||
)
|
||||
|
||||
self.assertTrue(len(train_preds_om) == len(train_preds_gm))
|
||||
|
||||
self.assertTrue(train_preds_om["objective"].isfinite().item())
|
||||
# We avoid all the randomization and the weights are the same
|
||||
# The objective should be the same
|
||||
self.assertTrue(
|
||||
torch.allclose(train_preds_om["objective"], train_preds_gm["objective"])
|
||||
)
|
||||
|
||||
# Test if the evaluation works
|
||||
overfit_model.eval()
|
||||
generic_model.eval()
|
||||
with torch.no_grad():
|
||||
eval_preds_om = overfit_model(
|
||||
**inputs_,
|
||||
evaluation_mode=EvaluationMode.EVALUATION,
|
||||
)
|
||||
eval_preds_gm = generic_model(
|
||||
**inputs_,
|
||||
evaluation_mode=EvaluationMode.EVALUATION,
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
eval_preds_om["images_render"].shape,
|
||||
(batch_size, 3, image_height, image_width),
|
||||
)
|
||||
self.assertTrue(
|
||||
torch.allclose(eval_preds_om["objective"], eval_preds_gm["objective"])
|
||||
)
|
||||
self.assertTrue(
|
||||
torch.allclose(
|
||||
eval_preds_om["images_render"], eval_preds_gm["images_render"]
|
||||
)
|
||||
)
|
||||
|
||||
def test_overfit_model_check_share_weights(self):
|
||||
model = OverfitModel(share_implicit_function_across_passes=True)
|
||||
for p1, p2 in zip(
|
||||
model.implicit_function.parameters(),
|
||||
model.coarse_implicit_function.parameters(),
|
||||
):
|
||||
self.assertEqual(id(p1), id(p2))
|
||||
|
||||
model.to(DEVICE)
|
||||
inputs_ = _generate_fake_inputs(2, 80, 80)
|
||||
model(**inputs_, evaluation_mode=EvaluationMode.TRAINING)
|
||||
|
||||
def test_overfit_model_check_no_share_weights(self):
|
||||
model = OverfitModel(
|
||||
share_implicit_function_across_passes=False,
|
||||
coarse_implicit_function_class_type="NeuralRadianceFieldImplicitFunction",
|
||||
coarse_implicit_function_NeuralRadianceFieldImplicitFunction_args={
|
||||
"transformer_dim_down_factor": 1.0,
|
||||
"n_hidden_neurons_xyz": 256,
|
||||
"n_layers_xyz": 8,
|
||||
"append_xyz": (5,),
|
||||
},
|
||||
)
|
||||
for p1, p2 in zip(
|
||||
model.implicit_function.parameters(),
|
||||
model.coarse_implicit_function.parameters(),
|
||||
):
|
||||
self.assertNotEqual(id(p1), id(p2))
|
||||
|
||||
model.to(DEVICE)
|
||||
inputs_ = _generate_fake_inputs(2, 80, 80)
|
||||
model(**inputs_, evaluation_mode=EvaluationMode.TRAINING)
|
||||
|
||||
def test_overfit_model_coarse_implicit_function_is_none(self):
|
||||
model = OverfitModel(
|
||||
share_implicit_function_across_passes=False,
|
||||
coarse_implicit_function_NeuralRadianceFieldImplicitFunction_args=None,
|
||||
)
|
||||
self.assertIsNone(model.coarse_implicit_function)
|
||||
model.to(DEVICE)
|
||||
inputs_ = _generate_fake_inputs(2, 80, 80)
|
||||
model(**inputs_, evaluation_mode=EvaluationMode.TRAINING)
|
||||
@@ -1,66 +0,0 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.models.utils import preprocess_input, weighted_sum_losses
|
||||
|
||||
|
||||
class TestUtils(unittest.TestCase):
|
||||
def test_prepare_inputs_wrong_num_dim(self):
|
||||
img = torch.randn(3, 3, 3)
|
||||
with self.assertRaises(ValueError) as context:
|
||||
img, fg_prob, depth_map = preprocess_input(
|
||||
img, None, None, True, True, 0.5, (0.0, 0.0, 0.0)
|
||||
)
|
||||
self.assertEqual(
|
||||
"Model received unbatched inputs. "
|
||||
+ "Perhaps they came from a FrameData which had not been collated.",
|
||||
context.exception,
|
||||
)
|
||||
|
||||
def test_prepare_inputs_mask_image_true(self):
|
||||
batch, channels, height, width = 2, 3, 10, 10
|
||||
img = torch.ones(batch, channels, height, width)
|
||||
# Create a mask on the lower triangular matrix
|
||||
fg_prob = torch.tril(torch.ones(batch, 1, height, width)) * 0.3
|
||||
|
||||
out_img, out_fg_prob, out_depth_map = preprocess_input(
|
||||
img, fg_prob, None, True, False, 0.3, (0.0, 0.0, 0.0)
|
||||
)
|
||||
|
||||
self.assertTrue(torch.equal(out_img, torch.tril(img)))
|
||||
self.assertTrue(torch.equal(out_fg_prob, fg_prob >= 0.3))
|
||||
self.assertIsNone(out_depth_map)
|
||||
|
||||
def test_prepare_inputs_mask_depth_true(self):
|
||||
batch, channels, height, width = 2, 3, 10, 10
|
||||
img = torch.ones(batch, channels, height, width)
|
||||
depth_map = torch.randn(batch, channels, height, width)
|
||||
# Create a mask on the lower triangular matrix
|
||||
fg_prob = torch.tril(torch.ones(batch, 1, height, width)) * 0.3
|
||||
|
||||
out_img, out_fg_prob, out_depth_map = preprocess_input(
|
||||
img, fg_prob, depth_map, False, True, 0.3, (0.0, 0.0, 0.0)
|
||||
)
|
||||
|
||||
self.assertTrue(torch.equal(out_img, img))
|
||||
self.assertTrue(torch.equal(out_fg_prob, fg_prob >= 0.3))
|
||||
self.assertTrue(torch.equal(out_depth_map, torch.tril(depth_map)))
|
||||
|
||||
def test_weighted_sum_losses(self):
|
||||
preds = {"a": torch.tensor(2), "b": torch.tensor(2)}
|
||||
weights = {"a": 2.0, "b": 0.0}
|
||||
loss = weighted_sum_losses(preds, weights)
|
||||
self.assertEqual(loss, 4.0)
|
||||
|
||||
def test_weighted_sum_losses_raise_warning(self):
|
||||
preds = {"a": torch.tensor(2), "b": torch.tensor(2)}
|
||||
weights = {"c": 2.0, "d": 2.0}
|
||||
self.assertIsNone(weighted_sum_losses(preds, weights))
|
||||
@@ -17,8 +17,7 @@ from pytorch3d.implicitron.dataset.data_loader_map_provider import (
|
||||
DoublePoolBatchSampler,
|
||||
)
|
||||
|
||||
from pytorch3d.implicitron.dataset.dataset_base import DatasetBase
|
||||
from pytorch3d.implicitron.dataset.frame_data import FrameData
|
||||
from pytorch3d.implicitron.dataset.dataset_base import DatasetBase, FrameData
|
||||
from pytorch3d.implicitron.dataset.scene_batch_sampler import SceneBatchSampler
|
||||
|
||||
|
||||
@@ -49,8 +48,7 @@ class MockDataset(DatasetBase):
|
||||
for i in idx:
|
||||
self.frame_annots[i]["frame_annotation"].sequence_name = seq_name
|
||||
|
||||
def get_frame_numbers_and_timestamps(self, idxs, subset_filter=None):
|
||||
assert subset_filter is None
|
||||
def get_frame_numbers_and_timestamps(self, idxs):
|
||||
out = []
|
||||
for idx in idxs:
|
||||
frame_annotation = self.frame_annots[idx]["frame_annotation"]
|
||||
|
||||
@@ -9,19 +9,11 @@ import unittest
|
||||
import numpy as np
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch3d.implicitron.dataset.utils import (
|
||||
bbox_xywh_to_xyxy,
|
||||
bbox_xyxy_to_xywh,
|
||||
clamp_box_to_image_bounds_and_round,
|
||||
crop_around_box,
|
||||
get_1d_bounds,
|
||||
get_bbox_from_mask,
|
||||
get_clamp_bbox,
|
||||
rescale_bbox,
|
||||
resize_image,
|
||||
from pytorch3d.implicitron.dataset.json_index_dataset import (
|
||||
_bbox_xywh_to_xyxy,
|
||||
_bbox_xyxy_to_xywh,
|
||||
_get_bbox_from_mask,
|
||||
)
|
||||
|
||||
from tests.common_testing import TestCaseMixin
|
||||
|
||||
|
||||
@@ -39,9 +31,9 @@ class TestBBox(TestCaseMixin, unittest.TestCase):
|
||||
]
|
||||
)
|
||||
for bbox_xywh in bbox_xywh_list:
|
||||
bbox_xyxy = bbox_xywh_to_xyxy(bbox_xywh)
|
||||
bbox_xywh_ = bbox_xyxy_to_xywh(bbox_xyxy)
|
||||
bbox_xyxy_ = bbox_xywh_to_xyxy(bbox_xywh_)
|
||||
bbox_xyxy = _bbox_xywh_to_xyxy(bbox_xywh)
|
||||
bbox_xywh_ = _bbox_xyxy_to_xywh(bbox_xyxy)
|
||||
bbox_xyxy_ = _bbox_xywh_to_xyxy(bbox_xywh_)
|
||||
self.assertClose(bbox_xywh_, bbox_xywh)
|
||||
self.assertClose(bbox_xyxy, bbox_xyxy_)
|
||||
|
||||
@@ -55,8 +47,8 @@ class TestBBox(TestCaseMixin, unittest.TestCase):
|
||||
]
|
||||
)
|
||||
for bbox_xywh, bbox_xyxy_expected in bbox_xywh_to_xyxy_expected:
|
||||
self.assertClose(bbox_xywh_to_xyxy(bbox_xywh), bbox_xyxy_expected)
|
||||
self.assertClose(bbox_xyxy_to_xywh(bbox_xyxy_expected), bbox_xywh)
|
||||
self.assertClose(_bbox_xywh_to_xyxy(bbox_xywh), bbox_xyxy_expected)
|
||||
self.assertClose(_bbox_xyxy_to_xywh(bbox_xyxy_expected), bbox_xywh)
|
||||
|
||||
clamp_amnt = 3
|
||||
bbox_xywh_to_xyxy_clamped_expected = torch.LongTensor(
|
||||
@@ -69,7 +61,7 @@ class TestBBox(TestCaseMixin, unittest.TestCase):
|
||||
)
|
||||
for bbox_xywh, bbox_xyxy_expected in bbox_xywh_to_xyxy_clamped_expected:
|
||||
self.assertClose(
|
||||
bbox_xywh_to_xyxy(bbox_xywh, clamp_size=clamp_amnt),
|
||||
_bbox_xywh_to_xyxy(bbox_xywh, clamp_size=clamp_amnt),
|
||||
bbox_xyxy_expected,
|
||||
)
|
||||
|
||||
@@ -82,61 +74,5 @@ class TestBBox(TestCaseMixin, unittest.TestCase):
|
||||
]
|
||||
).astype(np.float32)
|
||||
expected_bbox_xywh = [2, 1, 2, 1]
|
||||
bbox_xywh = get_bbox_from_mask(mask, 0.5)
|
||||
bbox_xywh = _get_bbox_from_mask(mask, 0.5)
|
||||
self.assertClose(bbox_xywh, expected_bbox_xywh)
|
||||
|
||||
def test_crop_around_box(self):
|
||||
bbox = torch.LongTensor([0, 1, 2, 3]) # (x_min, y_min, x_max, y_max)
|
||||
image = torch.LongTensor(
|
||||
[
|
||||
[0, 0, 10, 20],
|
||||
[10, 20, 5, 1],
|
||||
[10, 20, 1, 1],
|
||||
[5, 4, 0, 1],
|
||||
]
|
||||
)
|
||||
cropped = crop_around_box(image, bbox)
|
||||
self.assertClose(cropped, image[1:3, 0:2])
|
||||
|
||||
def test_clamp_box_to_image_bounds_and_round(self):
|
||||
bbox = torch.LongTensor([0, 1, 10, 12])
|
||||
image_size = (5, 6)
|
||||
expected_clamped_bbox = torch.LongTensor([0, 1, image_size[1], image_size[0]])
|
||||
clamped_bbox = clamp_box_to_image_bounds_and_round(bbox, image_size)
|
||||
self.assertClose(clamped_bbox, expected_clamped_bbox)
|
||||
|
||||
def test_get_clamp_bbox(self):
|
||||
bbox_xywh = torch.LongTensor([1, 1, 4, 5])
|
||||
clamped_bbox_xyxy = get_clamp_bbox(bbox_xywh, box_crop_context=2)
|
||||
# size multiplied by 2 and added coordinates
|
||||
self.assertClose(clamped_bbox_xyxy, torch.Tensor([-3, -4, 9, 11]))
|
||||
|
||||
def test_rescale_bbox(self):
|
||||
bbox = torch.Tensor([0.0, 1.0, 3.0, 4.0])
|
||||
original_resolution = (4, 4)
|
||||
new_resolution = (8, 8) # twice bigger
|
||||
rescaled_bbox = rescale_bbox(bbox, original_resolution, new_resolution)
|
||||
self.assertClose(bbox * 2, rescaled_bbox)
|
||||
|
||||
def test_get_1d_bounds(self):
|
||||
array = [0, 1, 2]
|
||||
bounds = get_1d_bounds(array)
|
||||
# make nonzero 1d bounds of image
|
||||
self.assertClose(bounds, [1, 3])
|
||||
|
||||
def test_resize_image(self):
|
||||
image = np.random.rand(3, 300, 500) # rgb image 300x500
|
||||
expected_shape = (150, 250)
|
||||
|
||||
resized_image, scale, mask_crop = resize_image(
|
||||
image, image_height=expected_shape[0], image_width=expected_shape[1]
|
||||
)
|
||||
|
||||
original_shape = image.shape[-2:]
|
||||
expected_scale = min(
|
||||
expected_shape[0] / original_shape[0], expected_shape[1] / original_shape[1]
|
||||
)
|
||||
|
||||
self.assertEqual(scale, expected_scale)
|
||||
self.assertEqual(resized_image.shape[-2:], expected_shape)
|
||||
self.assertEqual(mask_crop.shape[-2:], expected_shape)
|
||||
|
||||
@@ -10,7 +10,6 @@ import unittest
|
||||
from dataclasses import dataclass, field, is_dataclass
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
from unittest.mock import Mock
|
||||
|
||||
from omegaconf import DictConfig, ListConfig, OmegaConf, ValidationError
|
||||
from pytorch3d.implicitron.tools.config import (
|
||||
@@ -806,39 +805,6 @@ class TestConfig(unittest.TestCase):
|
||||
|
||||
self.assertEqual(control_args, ["Orange", "Orange", True, True])
|
||||
|
||||
def test_pre_expand(self):
|
||||
# Check that the precreate method of a class is called once before
|
||||
# when expand_args_fields is called on the class.
|
||||
|
||||
class A(Configurable):
|
||||
n: int = 9
|
||||
|
||||
@classmethod
|
||||
def pre_expand(cls):
|
||||
pass
|
||||
|
||||
A.pre_expand = Mock()
|
||||
expand_args_fields(A)
|
||||
A.pre_expand.assert_called()
|
||||
|
||||
def test_pre_expand_replaceable(self):
|
||||
# Check that the precreate method of a class is called once before
|
||||
# when expand_args_fields is called on the class.
|
||||
|
||||
class A(ReplaceableBase):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def pre_expand(cls):
|
||||
pass
|
||||
|
||||
class A1(A):
|
||||
n: 9
|
||||
|
||||
A.pre_expand = Mock()
|
||||
expand_args_fields(A1)
|
||||
A.pre_expand.assert_called()
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MockDataclass:
|
||||
|
||||
@@ -8,7 +8,7 @@ import os
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
from pytorch3d.implicitron.dataset.frame_data import FrameData
|
||||
from pytorch3d.implicitron.dataset.dataset_base import FrameData
|
||||
from pytorch3d.implicitron.dataset.rendered_mesh_dataset_map_provider import (
|
||||
RenderedMeshDatasetMapProvider,
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user