mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-12-22 07:10:34 +08:00
Compare commits
71 Commits
v0.7.5
...
bottler/ac
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9c586b1351 | ||
|
|
e13848265d | ||
|
|
58566963d6 | ||
|
|
e17ed5cd50 | ||
|
|
8ed0c7a002 | ||
|
|
2da913c7e6 | ||
|
|
fca83e6369 | ||
|
|
75ebeeaea0 | ||
|
|
ab793177c6 | ||
|
|
9acdd67b83 | ||
|
|
3f428d9981 | ||
|
|
05cbea115a | ||
|
|
38afdcfc68 | ||
|
|
1e0b1d9c72 | ||
|
|
44702fdb4b | ||
|
|
7edaee71a9 | ||
|
|
d0d0e02007 | ||
|
|
4df110b0a9 | ||
|
|
51fd114d8b | ||
|
|
89653419d0 | ||
|
|
7980854d44 | ||
|
|
51d7c06ddd | ||
|
|
00c36ec01c | ||
|
|
b0462d8079 | ||
|
|
b66d17a324 | ||
|
|
717493cb79 | ||
|
|
302da69461 | ||
|
|
4ae25bfce7 | ||
|
|
bd52f4a408 | ||
|
|
17117106e4 | ||
|
|
aec76bb4c8 | ||
|
|
47d5dc8824 | ||
|
|
fe0b1bae49 | ||
|
|
ccf22911d4 | ||
|
|
128be02fc0 | ||
|
|
31e3488a51 | ||
|
|
b215776f2d | ||
|
|
38cf0dc1c5 | ||
|
|
7566530669 | ||
|
|
a27755db41 | ||
|
|
3da7703c5a | ||
|
|
f34104cf6e | ||
|
|
f247c86dc0 | ||
|
|
ae9d8787ce | ||
|
|
8772fe0de8 | ||
|
|
c292c71c1a | ||
|
|
d0d9cae9cd | ||
|
|
1f92c4e9d2 | ||
|
|
9b981f2c7e | ||
|
|
85eccbbf77 | ||
|
|
b80ab0caf0 | ||
|
|
1e817914b3 | ||
|
|
799c1cd21b | ||
|
|
292acc71a3 | ||
|
|
3621a36494 | ||
|
|
3087ab7f62 | ||
|
|
e46ab49a34 | ||
|
|
8a27590c5f | ||
|
|
06cdc313a7 | ||
|
|
94da8841af | ||
|
|
fbc6725f03 | ||
|
|
6b8766080d | ||
|
|
c373a84400 | ||
|
|
7606854ff7 | ||
|
|
83bacda8fb | ||
|
|
f74fc450e8 | ||
|
|
3b4f8a4980 | ||
|
|
79b46734cb | ||
|
|
55638f3bae | ||
|
|
f4f2209271 | ||
|
|
f613682551 |
@@ -162,90 +162,6 @@ workflows:
|
||||
jobs:
|
||||
# - main:
|
||||
# context: DOCKERHUB_TOKEN
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py38_cu113_pyt1120
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py38_cu116_pyt1120
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py38_cu113_pyt1121
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py38_cu116_pyt1121
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py38_cu116_pyt1130
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py38_cu117_pyt1130
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py38_cu116_pyt1131
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py38_cu117_pyt1131
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py38_cu117_pyt200
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.0.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt200
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.0.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py38_cu117_pyt201
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.0.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt201
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.0.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
@@ -261,89 +177,103 @@ workflows:
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py39_cu113_pyt1120
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.0
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt211
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py39_cu116_pyt1120
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py39_cu113_pyt1121
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py39_cu116_pyt1121
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py39_cu116_pyt1130
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py39_cu117_pyt1130
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py39_cu116_pyt1131
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py39_cu117_pyt1131
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py39_cu117_pyt200
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.0.0
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt211
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt200
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.0.0
|
||||
name: linux_conda_py38_cu118_pyt212
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py39_cu117_pyt201
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.0.1
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt212
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt201
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.0.1
|
||||
name: linux_conda_py38_cu118_pyt220
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt220
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt222
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt222
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt231
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt231
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt240
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt240
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt241
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt241
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
@@ -359,89 +289,103 @@ workflows:
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py310_cu113_pyt1120
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.0
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt211
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py310_cu116_pyt1120
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py310_cu113_pyt1121
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py310_cu116_pyt1121
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py310_cu116_pyt1130
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py310_cu117_pyt1130
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py310_cu116_pyt1131
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py310_cu117_pyt1131
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py310_cu117_pyt200
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.0.0
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt211
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt200
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.0.0
|
||||
name: linux_conda_py39_cu118_pyt212
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py310_cu117_pyt201
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.0.1
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt212
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt201
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.0.1
|
||||
name: linux_conda_py39_cu118_pyt220
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt220
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt222
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt222
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt231
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt231
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt240
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt240
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt241
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt241
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
@@ -456,6 +400,104 @@ workflows:
|
||||
name: linux_conda_py310_cu121_pyt210
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt211
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt211
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt212
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt212
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt220
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt220
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt222
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt222
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt231
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt231
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt240
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt240
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt241
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt241
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
@@ -470,6 +512,174 @@ workflows:
|
||||
name: linux_conda_py311_cu121_pyt210
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt211
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt211
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt212
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt212
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt220
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt220
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt222
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt222
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt231
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt231
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt240
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt240
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt241
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt241
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py312_cu118_pyt220
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py312_cu121_pyt220
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py312_cu118_pyt222
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py312_cu121_pyt222
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py312_cu118_pyt231
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py312_cu121_pyt231
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py312_cu118_pyt240
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py312_cu121_pyt240
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py312_cu118_pyt241
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py312_cu121_pyt241
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda_cuda:
|
||||
name: testrun_conda_cuda_py310_cu117_pyt201
|
||||
context: DOCKERHUB_TOKEN
|
||||
|
||||
@@ -19,19 +19,18 @@ from packaging import version
|
||||
# The CUDA versions which have pytorch conda packages available for linux for each
|
||||
# version of pytorch.
|
||||
CONDA_CUDA_VERSIONS = {
|
||||
"1.12.0": ["cu113", "cu116"],
|
||||
"1.12.1": ["cu113", "cu116"],
|
||||
"1.13.0": ["cu116", "cu117"],
|
||||
"1.13.1": ["cu116", "cu117"],
|
||||
"2.0.0": ["cu117", "cu118"],
|
||||
"2.0.1": ["cu117", "cu118"],
|
||||
"2.1.0": ["cu118", "cu121"],
|
||||
"2.1.1": ["cu118", "cu121"],
|
||||
"2.1.2": ["cu118", "cu121"],
|
||||
"2.2.0": ["cu118", "cu121"],
|
||||
"2.2.2": ["cu118", "cu121"],
|
||||
"2.3.1": ["cu118", "cu121"],
|
||||
"2.4.0": ["cu118", "cu121"],
|
||||
"2.4.1": ["cu118", "cu121"],
|
||||
}
|
||||
|
||||
|
||||
def conda_docker_image_for_cuda(cuda_version):
|
||||
if cuda_version in ("cu101", "cu102", "cu111"):
|
||||
return None
|
||||
if len(cuda_version) != 5:
|
||||
raise ValueError("Unknown cuda version")
|
||||
return "pytorch/conda-builder:cuda" + cuda_version[2:]
|
||||
@@ -52,12 +51,18 @@ def pytorch_versions_for_python(python_version):
|
||||
for i in CONDA_CUDA_VERSIONS
|
||||
if version.Version(i) >= version.Version("2.1.0")
|
||||
]
|
||||
if python_version == "3.12":
|
||||
return [
|
||||
i
|
||||
for i in CONDA_CUDA_VERSIONS
|
||||
if version.Version(i) >= version.Version("2.2.0")
|
||||
]
|
||||
|
||||
|
||||
def workflows(prefix="", filter_branch=None, upload=False, indentation=6):
|
||||
w = []
|
||||
for btype in ["conda"]:
|
||||
for python_version in ["3.8", "3.9", "3.10", "3.11"]:
|
||||
for python_version in ["3.8", "3.9", "3.10", "3.11", "3.12"]:
|
||||
for pytorch_version in pytorch_versions_for_python(python_version):
|
||||
for cu_version in CONDA_CUDA_VERSIONS[pytorch_version]:
|
||||
w += workflow_pair(
|
||||
|
||||
5
.flake8
5
.flake8
@@ -1,5 +1,8 @@
|
||||
[flake8]
|
||||
ignore = E203, E266, E501, W503, E221
|
||||
# B028 No explicit stacklevel argument found.
|
||||
# B907 'foo' is manually surrounded by quotes, consider using the `!r` conversion flag.
|
||||
# B905 `zip()` without an explicit `strict=` parameter.
|
||||
ignore = E203, E266, E501, W503, E221, B028, B905, B907
|
||||
max-line-length = 88
|
||||
max-complexity = 18
|
||||
select = B,C,E,F,W,T4,B9
|
||||
|
||||
20
.github/workflows/build.yml
vendored
Normal file
20
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: facebookresearch/pytorch3d/build_and_test
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
binary_linux_conda_cuda:
|
||||
runs-on: 4-core-ubuntu-gpu-t4
|
||||
env:
|
||||
PYTHON_VERSION: "3.12"
|
||||
BUILD_VERSION: "${{ github.run_number }}"
|
||||
PYTORCH_VERSION: "2.4.1"
|
||||
CU_VERSION: "cu121"
|
||||
JUST_TESTRUN: 1
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Build and run tests
|
||||
run: |-
|
||||
conda create --name env --yes --quiet conda-build
|
||||
conda run --no-capture-output --name env python3 ./packaging/build_conda.py --use-conda-cuda
|
||||
11
INSTALL.md
11
INSTALL.md
@@ -8,11 +8,10 @@
|
||||
The core library is written in PyTorch. Several components have underlying implementation in CUDA for improved performance. A subset of these components have CPU implementations in C++/PyTorch. It is advised to use PyTorch3D with GPU support in order to use all the features.
|
||||
|
||||
- Linux or macOS or Windows
|
||||
- Python 3.8, 3.9 or 3.10
|
||||
- PyTorch 1.12.0, 1.12.1, 1.13.0, 2.0.0, 2.0.1 or 2.1.0.
|
||||
- Python
|
||||
- PyTorch 2.1.0, 2.1.1, 2.1.2, 2.2.0, 2.2.1, 2.2.2, 2.3.0, 2.3.1, 2.4.0 or 2.4.1.
|
||||
- torchvision that matches the PyTorch installation. You can install them together as explained at pytorch.org to make sure of this.
|
||||
- gcc & g++ ≥ 4.9
|
||||
- [fvcore](https://github.com/facebookresearch/fvcore)
|
||||
- [ioPath](https://github.com/facebookresearch/iopath)
|
||||
- If CUDA is to be used, use a version which is supported by the corresponding pytorch version and at least version 9.2.
|
||||
- If CUDA older than 11.7 is to be used and you are building from source, the CUB library must be available. We recommend version 1.10.0.
|
||||
@@ -22,7 +21,7 @@ The runtime dependencies can be installed by running:
|
||||
conda create -n pytorch3d python=3.9
|
||||
conda activate pytorch3d
|
||||
conda install pytorch=1.13.0 torchvision pytorch-cuda=11.6 -c pytorch -c nvidia
|
||||
conda install -c fvcore -c iopath -c conda-forge fvcore iopath
|
||||
conda install -c iopath iopath
|
||||
```
|
||||
|
||||
For the CUB build time dependency, which you only need if you have CUDA older than 11.7, if you are using conda, you can continue with
|
||||
@@ -49,6 +48,7 @@ For developing on top of PyTorch3D or contributing, you will need to run the lin
|
||||
- tdqm
|
||||
- jupyter
|
||||
- imageio
|
||||
- fvcore
|
||||
- plotly
|
||||
- opencv-python
|
||||
|
||||
@@ -59,6 +59,7 @@ conda install jupyter
|
||||
pip install scikit-image matplotlib imageio plotly opencv-python
|
||||
|
||||
# Tests/Linting
|
||||
conda install -c fvcore -c conda-forge fvcore
|
||||
pip install black usort flake8 flake8-bugbear flake8-comprehensions
|
||||
```
|
||||
|
||||
@@ -97,7 +98,7 @@ version_str="".join([
|
||||
torch.version.cuda.replace(".",""),
|
||||
f"_pyt{pyt_version_str}"
|
||||
])
|
||||
!pip install fvcore iopath
|
||||
!pip install iopath
|
||||
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||
```
|
||||
|
||||
|
||||
@@ -146,6 +146,12 @@ If you are using the pulsar backend for sphere-rendering (the `PulsarPointRender
|
||||
|
||||
Please see below for a timeline of the codebase updates in reverse chronological order. We are sharing updates on the releases as well as research projects which are built with PyTorch3D. The changelogs for the releases are available under [`Releases`](https://github.com/facebookresearch/pytorch3d/releases), and the builds can be installed using `conda` as per the instructions in [INSTALL.md](INSTALL.md).
|
||||
|
||||
**[Oct 31st 2023]:** PyTorch3D [v0.7.5](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.5) released.
|
||||
|
||||
**[May 10th 2023]:** PyTorch3D [v0.7.4](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.4) released.
|
||||
|
||||
**[Apr 5th 2023]:** PyTorch3D [v0.7.3](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.3) released.
|
||||
|
||||
**[Dec 19th 2022]:** PyTorch3D [v0.7.2](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.2) released.
|
||||
|
||||
**[Oct 23rd 2022]:** PyTorch3D [v0.7.1](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.1) released.
|
||||
|
||||
@@ -23,7 +23,7 @@ conda init bash
|
||||
source ~/.bashrc
|
||||
conda create -y -n myenv python=3.8 matplotlib ipython ipywidgets nbconvert
|
||||
conda activate myenv
|
||||
conda install -y -c fvcore -c iopath -c conda-forge fvcore iopath
|
||||
conda install -y -c iopath iopath
|
||||
conda install -y -c pytorch pytorch=1.6.0 cudatoolkit=10.1 torchvision
|
||||
conda install -y -c pytorch3d-nightly pytorch3d
|
||||
pip install plotly scikit-image
|
||||
|
||||
@@ -5,7 +5,6 @@ sphinx_rtd_theme
|
||||
sphinx_markdown_tables
|
||||
numpy
|
||||
iopath
|
||||
fvcore
|
||||
https://download.pytorch.org/whl/cpu/torchvision-0.15.2%2Bcpu-cp311-cp311-linux_x86_64.whl
|
||||
https://download.pytorch.org/whl/cpu/torch-2.0.1%2Bcpu-cp311-cp311-linux_x86_64.whl
|
||||
omegaconf
|
||||
|
||||
@@ -83,25 +83,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -70,25 +70,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -45,25 +45,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -405,7 +411,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"random_model_images = shapenet_dataset.render(\n",
|
||||
" sample_nums=[3],\n",
|
||||
" sample_nums=[5],\n",
|
||||
" device=device,\n",
|
||||
" cameras=cameras,\n",
|
||||
" raster_settings=raster_settings,\n",
|
||||
|
||||
@@ -84,25 +84,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -50,25 +50,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -62,25 +62,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -41,25 +41,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -72,25 +72,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -66,25 +66,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -44,25 +44,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -51,25 +51,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -67,25 +67,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -4,10 +4,11 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import argparse
|
||||
import os.path
|
||||
import runpy
|
||||
import subprocess
|
||||
from typing import List
|
||||
from typing import List, Tuple
|
||||
|
||||
# required env vars:
|
||||
# CU_VERSION: E.g. cu112
|
||||
@@ -23,7 +24,7 @@ pytorch_major_minor = tuple(int(i) for i in PYTORCH_VERSION.split(".")[:2])
|
||||
source_root_dir = os.environ["PWD"]
|
||||
|
||||
|
||||
def version_constraint(version):
|
||||
def version_constraint(version) -> str:
|
||||
"""
|
||||
Given version "11.3" returns " >=11.3,<11.4"
|
||||
"""
|
||||
@@ -32,7 +33,7 @@ def version_constraint(version):
|
||||
return f" >={version},<{upper}"
|
||||
|
||||
|
||||
def get_cuda_major_minor():
|
||||
def get_cuda_major_minor() -> Tuple[str, str]:
|
||||
if CU_VERSION == "cpu":
|
||||
raise ValueError("fn only for cuda builds")
|
||||
if len(CU_VERSION) != 5 or CU_VERSION[:2] != "cu":
|
||||
@@ -42,11 +43,10 @@ def get_cuda_major_minor():
|
||||
return major, minor
|
||||
|
||||
|
||||
def setup_cuda():
|
||||
def setup_cuda(use_conda_cuda: bool) -> List[str]:
|
||||
if CU_VERSION == "cpu":
|
||||
return
|
||||
return []
|
||||
major, minor = get_cuda_major_minor()
|
||||
os.environ["CUDA_HOME"] = f"/usr/local/cuda-{major}.{minor}/"
|
||||
os.environ["FORCE_CUDA"] = "1"
|
||||
|
||||
basic_nvcc_flags = (
|
||||
@@ -75,11 +75,26 @@ def setup_cuda():
|
||||
|
||||
if os.environ.get("JUST_TESTRUN", "0") != "1":
|
||||
os.environ["NVCC_FLAGS"] = nvcc_flags
|
||||
if use_conda_cuda:
|
||||
os.environ["CONDA_CUDA_TOOLKIT_BUILD_CONSTRAINT1"] = "- cuda-toolkit"
|
||||
os.environ["CONDA_CUDA_TOOLKIT_BUILD_CONSTRAINT2"] = (
|
||||
f"- cuda-version={major}.{minor}"
|
||||
)
|
||||
return ["-c", f"nvidia/label/cuda-{major}.{minor}.0"]
|
||||
else:
|
||||
os.environ["CUDA_HOME"] = f"/usr/local/cuda-{major}.{minor}/"
|
||||
return []
|
||||
|
||||
|
||||
def setup_conda_pytorch_constraint() -> List[str]:
|
||||
pytorch_constraint = f"- pytorch=={PYTORCH_VERSION}"
|
||||
os.environ["CONDA_PYTORCH_CONSTRAINT"] = pytorch_constraint
|
||||
if pytorch_major_minor < (2, 2):
|
||||
os.environ["CONDA_PYTORCH_MKL_CONSTRAINT"] = "- mkl!=2024.1.0"
|
||||
os.environ["SETUPTOOLS_CONSTRAINT"] = "- setuptools<70"
|
||||
else:
|
||||
os.environ["CONDA_PYTORCH_MKL_CONSTRAINT"] = ""
|
||||
os.environ["SETUPTOOLS_CONSTRAINT"] = "- setuptools"
|
||||
os.environ["CONDA_PYTORCH_BUILD_CONSTRAINT"] = pytorch_constraint
|
||||
os.environ["PYTORCH_VERSION_NODOT"] = PYTORCH_VERSION.replace(".", "")
|
||||
|
||||
@@ -89,7 +104,7 @@ def setup_conda_pytorch_constraint() -> List[str]:
|
||||
return ["-c", "pytorch", "-c", "nvidia"]
|
||||
|
||||
|
||||
def setup_conda_cudatoolkit_constraint():
|
||||
def setup_conda_cudatoolkit_constraint() -> None:
|
||||
if CU_VERSION == "cpu":
|
||||
os.environ["CONDA_CPUONLY_FEATURE"] = "- cpuonly"
|
||||
os.environ["CONDA_CUDATOOLKIT_CONSTRAINT"] = ""
|
||||
@@ -110,14 +125,14 @@ def setup_conda_cudatoolkit_constraint():
|
||||
os.environ["CONDA_CUDATOOLKIT_CONSTRAINT"] = toolkit
|
||||
|
||||
|
||||
def do_build(start_args: List[str]):
|
||||
def do_build(start_args: List[str]) -> None:
|
||||
args = start_args.copy()
|
||||
|
||||
test_flag = os.environ.get("TEST_FLAG")
|
||||
if test_flag is not None:
|
||||
args.append(test_flag)
|
||||
|
||||
args.extend(["-c", "bottler", "-c", "fvcore", "-c", "iopath", "-c", "conda-forge"])
|
||||
args.extend(["-c", "bottler", "-c", "iopath", "-c", "conda-forge"])
|
||||
args.append("--no-anaconda-upload")
|
||||
args.extend(["--python", os.environ["PYTHON_VERSION"]])
|
||||
args.append("packaging/pytorch3d")
|
||||
@@ -126,8 +141,16 @@ def do_build(start_args: List[str]):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Build the conda package.")
|
||||
parser.add_argument(
|
||||
"--use-conda-cuda",
|
||||
action="store_true",
|
||||
help="get cuda from conda ignoring local cuda",
|
||||
)
|
||||
our_args = parser.parse_args()
|
||||
|
||||
args = ["conda", "build"]
|
||||
setup_cuda()
|
||||
args += setup_cuda(use_conda_cuda=our_args.use_conda_cuda)
|
||||
|
||||
init_path = source_root_dir + "/pytorch3d/__init__.py"
|
||||
build_version = runpy.run_path(init_path)["__version__"]
|
||||
|
||||
@@ -26,6 +26,6 @@ version_str="".join([
|
||||
torch.version.cuda.replace(".",""),
|
||||
f"_pyt{pyt_version_str}"
|
||||
])
|
||||
!pip install fvcore iopath
|
||||
!pip install iopath
|
||||
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||
```
|
||||
|
||||
@@ -144,7 +144,7 @@ do
|
||||
conda activate "$tag"
|
||||
# shellcheck disable=SC2086
|
||||
conda install -y -c pytorch $extra_channel "pytorch=$pytorch_version" "$cudatools=$CUDA_TAG"
|
||||
pip install fvcore iopath
|
||||
pip install iopath
|
||||
echo "python version" "$python_version" "pytorch version" "$pytorch_version" "cuda version" "$cu_version" "tag" "$tag"
|
||||
|
||||
rm -rf dist
|
||||
|
||||
@@ -8,12 +8,16 @@ source:
|
||||
requirements:
|
||||
build:
|
||||
- {{ compiler('c') }} # [win]
|
||||
{{ environ.get('CONDA_CUDA_TOOLKIT_BUILD_CONSTRAINT1', '') }}
|
||||
{{ environ.get('CONDA_CUDA_TOOLKIT_BUILD_CONSTRAINT2', '') }}
|
||||
{{ environ.get('CONDA_CUB_CONSTRAINT') }}
|
||||
|
||||
host:
|
||||
- python
|
||||
- setuptools
|
||||
- mkl =2023 # [x86_64]
|
||||
{{ environ.get('SETUPTOOLS_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_PYTORCH_BUILD_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_PYTORCH_MKL_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_CPUONLY_FEATURE') }}
|
||||
|
||||
@@ -21,13 +25,14 @@ requirements:
|
||||
- python
|
||||
- numpy >=1.11
|
||||
- torchvision >=0.5
|
||||
- fvcore
|
||||
- mkl =2023 # [x86_64]
|
||||
- iopath
|
||||
{{ environ.get('CONDA_PYTORCH_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }}
|
||||
|
||||
build:
|
||||
string: py{{py}}_{{ environ['CU_VERSION'] }}_pyt{{ environ['PYTORCH_VERSION_NODOT']}}
|
||||
# script: LD_LIBRARY_PATH=$PREFIX/lib:$BUILD_PREFIX/lib:$LD_LIBRARY_PATH python setup.py install --single-version-externally-managed --record=record.txt # [not win]
|
||||
script: python setup.py install --single-version-externally-managed --record=record.txt # [not win]
|
||||
script_env:
|
||||
- CUDA_HOME
|
||||
@@ -47,6 +52,10 @@ test:
|
||||
- imageio
|
||||
- hydra-core
|
||||
- accelerate
|
||||
- matplotlib
|
||||
- tabulate
|
||||
- pandas
|
||||
- sqlalchemy
|
||||
commands:
|
||||
#pytest .
|
||||
python -m unittest discover -v -s tests -t .
|
||||
|
||||
@@ -3,3 +3,5 @@
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
""""
|
||||
This file is the entry point for launching experiments with Implicitron.
|
||||
|
||||
@@ -97,7 +99,7 @@ except ModuleNotFoundError:
|
||||
no_accelerate = os.environ.get("PYTORCH3D_NO_ACCELERATE") is not None
|
||||
|
||||
|
||||
class Experiment(Configurable): # pyre-ignore: 13
|
||||
class Experiment(Configurable):
|
||||
"""
|
||||
This class is at the top level of Implicitron's config hierarchy. Its
|
||||
members are high-level components necessary for training an implicit rende-
|
||||
@@ -118,12 +120,16 @@ class Experiment(Configurable): # pyre-ignore: 13
|
||||
will be saved here.
|
||||
"""
|
||||
|
||||
# pyre-fixme[13]: Attribute `data_source` is never initialized.
|
||||
data_source: DataSourceBase
|
||||
data_source_class_type: str = "ImplicitronDataSource"
|
||||
# pyre-fixme[13]: Attribute `model_factory` is never initialized.
|
||||
model_factory: ModelFactoryBase
|
||||
model_factory_class_type: str = "ImplicitronModelFactory"
|
||||
# pyre-fixme[13]: Attribute `optimizer_factory` is never initialized.
|
||||
optimizer_factory: OptimizerFactoryBase
|
||||
optimizer_factory_class_type: str = "ImplicitronOptimizerFactory"
|
||||
# pyre-fixme[13]: Attribute `training_loop` is never initialized.
|
||||
training_loop: TrainingLoopBase
|
||||
training_loop_class_type: str = "ImplicitronTrainingLoop"
|
||||
|
||||
|
||||
@@ -3,3 +3,5 @@
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Optional
|
||||
@@ -43,7 +45,7 @@ class ModelFactoryBase(ReplaceableBase):
|
||||
|
||||
|
||||
@registry.register
|
||||
class ImplicitronModelFactory(ModelFactoryBase): # pyre-ignore [13]
|
||||
class ImplicitronModelFactory(ModelFactoryBase):
|
||||
"""
|
||||
A factory class that initializes an implicit rendering model.
|
||||
|
||||
@@ -59,6 +61,7 @@ class ImplicitronModelFactory(ModelFactoryBase): # pyre-ignore [13]
|
||||
|
||||
"""
|
||||
|
||||
# pyre-fixme[13]: Attribute `model` is never initialized.
|
||||
model: ImplicitronModelBase
|
||||
model_class_type: str = "GenericModel"
|
||||
resume: bool = True
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
import os
|
||||
@@ -121,7 +123,6 @@ class ImplicitronOptimizerFactory(OptimizerFactoryBase):
|
||||
"""
|
||||
# Get the parameters to optimize
|
||||
if hasattr(model, "_get_param_groups"): # use the model function
|
||||
# pyre-ignore[29]
|
||||
p_groups = model._get_param_groups(self.lr, wd=self.weight_decay)
|
||||
else:
|
||||
p_groups = [
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
@@ -28,13 +30,13 @@ from .utils import seed_all_random_engines
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# pyre-fixme[13]: Attribute `evaluator` is never initialized.
|
||||
class TrainingLoopBase(ReplaceableBase):
|
||||
"""
|
||||
Members:
|
||||
evaluator: An EvaluatorBase instance, used to evaluate training results.
|
||||
"""
|
||||
|
||||
# pyre-fixme[13]: Attribute `evaluator` is never initialized.
|
||||
evaluator: Optional[EvaluatorBase]
|
||||
evaluator_class_type: Optional[str] = "ImplicitronEvaluator"
|
||||
|
||||
@@ -110,6 +112,8 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
def __post_init__(self):
|
||||
run_auto_creation(self)
|
||||
|
||||
# pyre-fixme[14]: `run` overrides method defined in `TrainingLoopBase`
|
||||
# inconsistently.
|
||||
def run(
|
||||
self,
|
||||
*,
|
||||
@@ -391,7 +395,6 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
):
|
||||
prefix = f"e{stats.epoch}_it{stats.it[trainmode]}"
|
||||
if hasattr(model, "visualize"):
|
||||
# pyre-ignore [29]
|
||||
model.visualize(
|
||||
viz,
|
||||
visdom_env_imgs,
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
|
||||
import random
|
||||
|
||||
|
||||
@@ -3,3 +3,5 @@
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import logging
|
||||
import os
|
||||
import unittest
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import os
|
||||
import unittest
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
"""
|
||||
Script to visualize a previously trained model. Example call:
|
||||
|
||||
|
||||
@@ -343,12 +343,14 @@ class RadianceFieldRenderer(torch.nn.Module):
|
||||
# For a full render pass concatenate the output chunks,
|
||||
# and reshape to image size.
|
||||
out = {
|
||||
k: torch.cat(
|
||||
[ch_o[k] for ch_o in chunk_outputs],
|
||||
dim=1,
|
||||
).view(-1, *self._image_size, 3)
|
||||
if chunk_outputs[0][k] is not None
|
||||
else None
|
||||
k: (
|
||||
torch.cat(
|
||||
[ch_o[k] for ch_o in chunk_outputs],
|
||||
dim=1,
|
||||
).view(-1, *self._image_size, 3)
|
||||
if chunk_outputs[0][k] is not None
|
||||
else None
|
||||
)
|
||||
for k in ("rgb_fine", "rgb_coarse", "rgb_gt")
|
||||
}
|
||||
else:
|
||||
|
||||
@@ -4,4 +4,6 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
__version__ = "0.7.5"
|
||||
# pyre-unsafe
|
||||
|
||||
__version__ = "0.7.8"
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from .datatypes import Device, get_device, make_device
|
||||
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from typing import Sequence, Tuple, Union
|
||||
|
||||
import torch
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from typing import Optional, Union
|
||||
|
||||
import torch
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import math
|
||||
from typing import Tuple
|
||||
|
||||
|
||||
@@ -4,5 +4,7 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from .symeig3x3 import symeig3x3
|
||||
from .utils import _safe_det_3x3
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import math
|
||||
from typing import Optional, Tuple
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
@@ -99,6 +99,7 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
||||
m.def("marching_cubes", &MarchingCubes);
|
||||
|
||||
// Pulsar.
|
||||
// Pulsar not enabled on AMD.
|
||||
#ifdef PULSAR_LOGGING_ENABLED
|
||||
c10::ShowLogInfoToStderr();
|
||||
#endif
|
||||
|
||||
@@ -338,7 +338,7 @@ std::tuple<at::Tensor, at::Tensor> KNearestNeighborIdxCuda(
|
||||
|
||||
TORCH_CHECK((norm == 1) || (norm == 2), "Norm must be 1 or 2.");
|
||||
|
||||
TORCH_CHECK(p2.size(2) == D, "Point sets must have the same last dimension");
|
||||
TORCH_CHECK(p1.size(2) == D, "Point sets must have the same last dimension");
|
||||
auto long_dtype = lengths1.options().dtype(at::kLong);
|
||||
auto idxs = at::zeros({N, P1, K}, long_dtype);
|
||||
auto dists = at::zeros({N, P1, K}, p1.options());
|
||||
@@ -495,7 +495,7 @@ __global__ void KNearestNeighborBackwardKernel(
|
||||
if ((p1_idx < num1) && (k < num2)) {
|
||||
const float grad_dist = grad_dists[n * P1 * K + p1_idx * K + k];
|
||||
// index of point in p2 corresponding to the k-th nearest neighbor
|
||||
const size_t p2_idx = idxs[n * P1 * K + p1_idx * K + k];
|
||||
const int64_t p2_idx = idxs[n * P1 * K + p1_idx * K + k];
|
||||
// If the index is the pad value of -1 then ignore it
|
||||
if (p2_idx == -1) {
|
||||
continue;
|
||||
|
||||
@@ -223,7 +223,7 @@ __global__ void CompactVoxelsKernel(
|
||||
compactedVoxelArray,
|
||||
const at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
||||
voxelOccupied,
|
||||
const at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
||||
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
|
||||
voxelOccupiedScan,
|
||||
uint numVoxels) {
|
||||
uint id = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
@@ -255,7 +255,8 @@ __global__ void GenerateFacesKernel(
|
||||
at::PackedTensorAccessor<int64_t, 1, at::RestrictPtrTraits> ids,
|
||||
at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
||||
compactedVoxelArray,
|
||||
at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits> numVertsScanned,
|
||||
at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
|
||||
numVertsScanned,
|
||||
const uint activeVoxels,
|
||||
const at::PackedTensorAccessor32<float, 3, at::RestrictPtrTraits> vol,
|
||||
const at::PackedTensorAccessor32<int, 2, at::RestrictPtrTraits> faceTable,
|
||||
@@ -381,6 +382,44 @@ __global__ void GenerateFacesKernel(
|
||||
} // end for grid-strided kernel
|
||||
}
|
||||
|
||||
// ATen/Torch does not have an exclusive-scan operator. Additionally, in the
|
||||
// code below we need to get the "total number of items to work on" after
|
||||
// a scan, which with an inclusive-scan would simply be the value of the last
|
||||
// element in the tensor.
|
||||
//
|
||||
// This utility function hits two birds with one stone, by running
|
||||
// an inclusive-scan into a right-shifted view of a tensor that's
|
||||
// allocated to be one element bigger than the input tensor.
|
||||
//
|
||||
// Note; return tensor is `int64_t` per element, even if the input
|
||||
// tensor is only 32-bit. Also, the return tensor is one element bigger
|
||||
// than the input one.
|
||||
//
|
||||
// Secondary optional argument is an output argument that gets the
|
||||
// value of the last element of the return tensor (because you almost
|
||||
// always need this CPU-side right after this function anyway).
|
||||
static at::Tensor ExclusiveScanAndTotal(
|
||||
const at::Tensor& inTensor,
|
||||
int64_t* optTotal = nullptr) {
|
||||
const auto inSize = inTensor.sizes()[0];
|
||||
auto retTensor = at::zeros({inSize + 1}, at::kLong).to(inTensor.device());
|
||||
|
||||
using at::indexing::None;
|
||||
using at::indexing::Slice;
|
||||
auto rightShiftedView = retTensor.index({Slice(1, None)});
|
||||
|
||||
// Do an (inclusive-scan) cumulative sum in to the view that's
|
||||
// shifted one element to the right...
|
||||
at::cumsum_out(rightShiftedView, inTensor, 0, at::kLong);
|
||||
|
||||
if (optTotal) {
|
||||
*optTotal = retTensor[inSize].cpu().item<int64_t>();
|
||||
}
|
||||
|
||||
// ...so that the not-shifted tensor holds the exclusive-scan
|
||||
return retTensor;
|
||||
}
|
||||
|
||||
// Entrance for marching cubes cuda extension. Marching Cubes is an algorithm to
|
||||
// create triangle meshes from an implicit function (one of the form f(x, y, z)
|
||||
// = 0). It works by iteratively checking a grid of cubes superimposed over a
|
||||
@@ -443,20 +482,18 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
using at::indexing::Slice;
|
||||
|
||||
auto d_voxelVerts =
|
||||
at::zeros({numVoxels + 1}, at::TensorOptions().dtype(at::kInt))
|
||||
at::zeros({numVoxels}, at::TensorOptions().dtype(at::kInt))
|
||||
.to(vol.device());
|
||||
auto d_voxelVerts_ = d_voxelVerts.index({Slice(1, None)});
|
||||
auto d_voxelOccupied =
|
||||
at::zeros({numVoxels + 1}, at::TensorOptions().dtype(at::kInt))
|
||||
at::zeros({numVoxels}, at::TensorOptions().dtype(at::kInt))
|
||||
.to(vol.device());
|
||||
auto d_voxelOccupied_ = d_voxelOccupied.index({Slice(1, None)});
|
||||
|
||||
// Execute "ClassifyVoxelKernel" kernel to precompute
|
||||
// two arrays - d_voxelOccupied and d_voxelVertices to global memory,
|
||||
// which stores the occupancy state and number of voxel vertices per voxel.
|
||||
ClassifyVoxelKernel<<<grid, threads, 0, stream>>>(
|
||||
d_voxelVerts_.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupied_.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelVerts.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupied.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
vol.packed_accessor32<float, 3, at::RestrictPtrTraits>(),
|
||||
isolevel);
|
||||
AT_CUDA_CHECK(cudaGetLastError());
|
||||
@@ -466,12 +503,9 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
// count for voxels in the grid and compute the number of active voxels.
|
||||
// If the number of active voxels is 0, return zero tensor for verts and
|
||||
// faces.
|
||||
|
||||
auto d_voxelOccupiedScan = at::cumsum(d_voxelOccupied, 0);
|
||||
auto d_voxelOccupiedScan_ = d_voxelOccupiedScan.index({Slice(1, None)});
|
||||
|
||||
// number of active voxels
|
||||
int activeVoxels = d_voxelOccupiedScan[numVoxels].cpu().item<int>();
|
||||
int64_t activeVoxels = 0;
|
||||
auto d_voxelOccupiedScan =
|
||||
ExclusiveScanAndTotal(d_voxelOccupied, &activeVoxels);
|
||||
|
||||
const int device_id = vol.device().index();
|
||||
auto opt = at::TensorOptions().dtype(at::kInt).device(at::kCUDA, device_id);
|
||||
@@ -486,23 +520,21 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
return std::make_tuple(verts, faces, ids);
|
||||
}
|
||||
|
||||
// Execute "CompactVoxelsKernel" kernel to compress voxels for accleration.
|
||||
// Execute "CompactVoxelsKernel" kernel to compress voxels for acceleration.
|
||||
// This allows us to run triangle generation on only the occupied voxels.
|
||||
auto d_compVoxelArray = at::zeros({activeVoxels}, opt);
|
||||
CompactVoxelsKernel<<<grid, threads, 0, stream>>>(
|
||||
d_compVoxelArray.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupied.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupiedScan_.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupiedScan
|
||||
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
|
||||
numVoxels);
|
||||
AT_CUDA_CHECK(cudaGetLastError());
|
||||
cudaDeviceSynchronize();
|
||||
|
||||
// Scan d_voxelVerts array to generate offsets of vertices for each voxel
|
||||
auto d_voxelVertsScan = at::cumsum(d_voxelVerts, 0);
|
||||
auto d_voxelVertsScan_ = d_voxelVertsScan.index({Slice(1, None)});
|
||||
|
||||
// total number of vertices
|
||||
int totalVerts = d_voxelVertsScan[numVoxels].cpu().item<int>();
|
||||
int64_t totalVerts = 0;
|
||||
auto d_voxelVertsScan = ExclusiveScanAndTotal(d_voxelVerts, &totalVerts);
|
||||
|
||||
// Execute "GenerateFacesKernel" kernel
|
||||
// This runs only on the occupied voxels.
|
||||
@@ -522,7 +554,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
faces.packed_accessor<int64_t, 2, at::RestrictPtrTraits>(),
|
||||
ids.packed_accessor<int64_t, 1, at::RestrictPtrTraits>(),
|
||||
d_compVoxelArray.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelVertsScan_.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelVertsScan.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
|
||||
activeVoxels,
|
||||
vol.packed_accessor32<float, 3, at::RestrictPtrTraits>(),
|
||||
faceTable.packed_accessor32<int, 2, at::RestrictPtrTraits>(),
|
||||
|
||||
@@ -71,8 +71,8 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCpu(
|
||||
if ((j + 1) % 3 == 0 && ps[0] != ps[1] && ps[1] != ps[2] &&
|
||||
ps[2] != ps[0]) {
|
||||
for (int k = 0; k < 3; k++) {
|
||||
int v = tri[k];
|
||||
edge_id_to_v[tri.at(k)] = ps.at(k);
|
||||
int64_t v = tri.at(k);
|
||||
edge_id_to_v[v] = ps.at(k);
|
||||
if (!uniq_edge_id.count(v)) {
|
||||
uniq_edge_id[v] = verts.size();
|
||||
verts.push_back(edge_id_to_v[v]);
|
||||
|
||||
@@ -30,11 +30,20 @@
|
||||
#define GLOBAL __global__
|
||||
#define RESTRICT __restrict__
|
||||
#define DEBUGBREAK()
|
||||
#ifdef __NVCC_DIAG_PRAGMA_SUPPORT__
|
||||
#pragma nv_diag_suppress 1866
|
||||
#pragma nv_diag_suppress 2941
|
||||
#pragma nv_diag_suppress 2951
|
||||
#pragma nv_diag_suppress 2967
|
||||
#else
|
||||
#if !defined(USE_ROCM)
|
||||
#pragma diag_suppress = attribute_not_allowed
|
||||
#pragma diag_suppress = 1866
|
||||
#pragma diag_suppress = 2941
|
||||
#pragma diag_suppress = 2951
|
||||
#pragma diag_suppress = 2967
|
||||
#endif //! USE_ROCM
|
||||
#endif
|
||||
#else // __CUDACC__
|
||||
#define INLINE inline
|
||||
#define HOST
|
||||
@@ -49,6 +58,9 @@
|
||||
#pragma clang diagnostic pop
|
||||
#ifdef WITH_CUDA
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#if !defined(USE_ROCM)
|
||||
#include <vector_functions.h>
|
||||
#endif //! USE_ROCM
|
||||
#else
|
||||
#ifndef cudaStream_t
|
||||
typedef void* cudaStream_t;
|
||||
@@ -65,8 +77,6 @@ struct float2 {
|
||||
struct float3 {
|
||||
float x, y, z;
|
||||
};
|
||||
#endif
|
||||
namespace py = pybind11;
|
||||
inline float3 make_float3(const float& x, const float& y, const float& z) {
|
||||
float3 res;
|
||||
res.x = x;
|
||||
@@ -74,6 +84,8 @@ inline float3 make_float3(const float& x, const float& y, const float& z) {
|
||||
res.z = z;
|
||||
return res;
|
||||
}
|
||||
#endif
|
||||
namespace py = pybind11;
|
||||
|
||||
inline bool operator==(const float3& a, const float3& b) {
|
||||
return a.x == b.x && a.y == b.y && a.z == b.z;
|
||||
|
||||
@@ -59,6 +59,11 @@ getLastCudaError(const char* errorMessage, const char* file, const int line) {
|
||||
#define SHARED __shared__
|
||||
#define ACTIVEMASK() __activemask()
|
||||
#define BALLOT(mask, val) __ballot_sync((mask), val)
|
||||
|
||||
/* TODO (ROCM-6.2): None of the WARP_* are used anywhere and ROCM-6.2 natively
|
||||
* supports __shfl_*. Disabling until the move to ROCM-6.2.
|
||||
*/
|
||||
#if !defined(USE_ROCM)
|
||||
/**
|
||||
* Find the cumulative sum within a warp up to the current
|
||||
* thread lane, with each mask thread contributing base.
|
||||
@@ -115,6 +120,7 @@ INLINE DEVICE float3 WARP_SUM_FLOAT3(
|
||||
ret.z = WARP_SUM(group, mask, base.z);
|
||||
return ret;
|
||||
}
|
||||
#endif //! USE_ROCM
|
||||
|
||||
// Floating point.
|
||||
// #define FMUL(a, b) __fmul_rn((a), (b))
|
||||
@@ -142,6 +148,7 @@ INLINE DEVICE float3 WARP_SUM_FLOAT3(
|
||||
#define FMA(x, y, z) __fmaf_rn((x), (y), (z))
|
||||
#define I2F(a) __int2float_rn(a)
|
||||
#define FRCP(x) __frcp_rn(x)
|
||||
#if !defined(USE_ROCM)
|
||||
__device__ static float atomicMax(float* address, float val) {
|
||||
int* address_as_i = (int*)address;
|
||||
int old = *address_as_i, assumed;
|
||||
@@ -166,6 +173,7 @@ __device__ static float atomicMin(float* address, float val) {
|
||||
} while (assumed != old);
|
||||
return __int_as_float(old);
|
||||
}
|
||||
#endif //! USE_ROCM
|
||||
#define DMAX(a, b) FMAX(a, b)
|
||||
#define DMIN(a, b) FMIN(a, b)
|
||||
#define DSQRT(a) sqrt(a)
|
||||
@@ -357,11 +357,11 @@ void MAX_WS(
|
||||
//
|
||||
//
|
||||
#define END_PARALLEL() \
|
||||
end_parallel:; \
|
||||
end_parallel :; \
|
||||
}
|
||||
#define END_PARALLEL_NORET() }
|
||||
#define END_PARALLEL_2D() \
|
||||
end_parallel:; \
|
||||
end_parallel :; \
|
||||
} \
|
||||
}
|
||||
#define END_PARALLEL_2D_NORET() \
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include "./commands.h"
|
||||
|
||||
namespace pulsar {
|
||||
IHD CamGradInfo::CamGradInfo() {
|
||||
IHD CamGradInfo::CamGradInfo(int x) {
|
||||
cam_pos = make_float3(0.f, 0.f, 0.f);
|
||||
pixel_0_0_center = make_float3(0.f, 0.f, 0.f);
|
||||
pixel_dir_x = make_float3(0.f, 0.f, 0.f);
|
||||
|
||||
@@ -63,7 +63,7 @@ inline bool operator==(const CamInfo& a, const CamInfo& b) {
|
||||
};
|
||||
|
||||
struct CamGradInfo {
|
||||
HOST DEVICE CamGradInfo();
|
||||
HOST DEVICE CamGradInfo(int = 0);
|
||||
float3 cam_pos;
|
||||
float3 pixel_0_0_center;
|
||||
float3 pixel_dir_x;
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
// #pragma diag_suppress = 68
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
// #pragma pop
|
||||
#include "../cuda/commands.h"
|
||||
#include "../gpu/commands.h"
|
||||
#else
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Weverything"
|
||||
|
||||
@@ -46,6 +46,7 @@ IHD float3 outer_product_sum(const float3& a) {
|
||||
}
|
||||
|
||||
// TODO: put intrinsics here.
|
||||
#if !defined(USE_ROCM)
|
||||
IHD float3 operator+(const float3& a, const float3& b) {
|
||||
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
|
||||
}
|
||||
@@ -93,6 +94,7 @@ IHD float3 operator*(const float3& a, const float3& b) {
|
||||
IHD float3 operator*(const float& a, const float3& b) {
|
||||
return b * a;
|
||||
}
|
||||
#endif //! USE_ROCM
|
||||
|
||||
INLINE DEVICE float length(const float3& v) {
|
||||
// TODO: benchmark what's faster.
|
||||
|
||||
@@ -93,7 +93,7 @@ HOST void construct(
|
||||
MALLOC(self->di_sorted_d, DrawInfo, max_num_balls);
|
||||
MALLOC(self->region_flags_d, char, max_num_balls);
|
||||
MALLOC(self->num_selected_d, size_t, 1);
|
||||
MALLOC(self->forw_info_d, float, width* height*(3 + 2 * n_track));
|
||||
MALLOC(self->forw_info_d, float, width* height * (3 + 2 * n_track));
|
||||
MALLOC(self->min_max_pixels_d, IntersectInfo, 1);
|
||||
MALLOC(self->grad_pos_d, float3, max_num_balls);
|
||||
MALLOC(self->grad_col_d, float, max_num_balls* n_channels);
|
||||
|
||||
@@ -99,7 +99,7 @@ GLOBAL void render(
|
||||
/** Whether loading of balls is completed. */
|
||||
SHARED bool loading_done;
|
||||
/** The number of balls loaded overall (just for statistics). */
|
||||
SHARED int n_balls_loaded;
|
||||
[[maybe_unused]] SHARED int n_balls_loaded;
|
||||
/** The area this thread block covers. */
|
||||
SHARED IntersectInfo block_area;
|
||||
if (thread_block.thread_rank() == 0) {
|
||||
@@ -283,9 +283,15 @@ GLOBAL void render(
|
||||
(percent_allowed_difference > 0.f &&
|
||||
max_closest_possible_intersection > depth_threshold) ||
|
||||
tracker.get_n_hits() >= max_n_hits;
|
||||
#if defined(__CUDACC__) && defined(__HIP_PLATFORM_AMD__)
|
||||
unsigned long long warp_done = __ballot(done);
|
||||
int warp_done_bit_cnt = __popcll(warp_done);
|
||||
#else
|
||||
uint warp_done = thread_warp.ballot(done);
|
||||
int warp_done_bit_cnt = POPC(warp_done);
|
||||
#endif //__CUDACC__ && __HIP_PLATFORM_AMD__
|
||||
if (thread_warp.thread_rank() == 0)
|
||||
ATOMICADD_B(&n_pixels_done, POPC(warp_done));
|
||||
ATOMICADD_B(&n_pixels_done, warp_done_bit_cnt);
|
||||
// This sync is necessary to keep n_loaded until all threads are done with
|
||||
// painting.
|
||||
thread_block.sync();
|
||||
|
||||
@@ -213,8 +213,8 @@ std::tuple<size_t, size_t, bool, torch::Tensor> Renderer::arg_check(
|
||||
const float& gamma,
|
||||
const float& max_depth,
|
||||
float& min_depth,
|
||||
const c10::optional<torch::Tensor>& bg_col,
|
||||
const c10::optional<torch::Tensor>& opacity,
|
||||
const std::optional<torch::Tensor>& bg_col,
|
||||
const std::optional<torch::Tensor>& opacity,
|
||||
const float& percent_allowed_difference,
|
||||
const uint& max_n_hits,
|
||||
const uint& mode) {
|
||||
@@ -668,8 +668,8 @@ std::tuple<torch::Tensor, torch::Tensor> Renderer::forward(
|
||||
const float& gamma,
|
||||
const float& max_depth,
|
||||
float min_depth,
|
||||
const c10::optional<torch::Tensor>& bg_col,
|
||||
const c10::optional<torch::Tensor>& opacity,
|
||||
const std::optional<torch::Tensor>& bg_col,
|
||||
const std::optional<torch::Tensor>& opacity,
|
||||
const float& percent_allowed_difference,
|
||||
const uint& max_n_hits,
|
||||
const uint& mode) {
|
||||
@@ -888,14 +888,14 @@ std::tuple<torch::Tensor, torch::Tensor> Renderer::forward(
|
||||
};
|
||||
|
||||
std::tuple<
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>>
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>>
|
||||
Renderer::backward(
|
||||
const torch::Tensor& grad_im,
|
||||
const torch::Tensor& image,
|
||||
@@ -912,8 +912,8 @@ Renderer::backward(
|
||||
const float& gamma,
|
||||
const float& max_depth,
|
||||
float min_depth,
|
||||
const c10::optional<torch::Tensor>& bg_col,
|
||||
const c10::optional<torch::Tensor>& opacity,
|
||||
const std::optional<torch::Tensor>& bg_col,
|
||||
const std::optional<torch::Tensor>& opacity,
|
||||
const float& percent_allowed_difference,
|
||||
const uint& max_n_hits,
|
||||
const uint& mode,
|
||||
@@ -922,7 +922,7 @@ Renderer::backward(
|
||||
const bool& dif_rad,
|
||||
const bool& dif_cam,
|
||||
const bool& dif_opy,
|
||||
const at::optional<std::pair<uint, uint>>& dbg_pos) {
|
||||
const std::optional<std::pair<uint, uint>>& dbg_pos) {
|
||||
this->ensure_on_device(this->device_tracker.device());
|
||||
size_t batch_size;
|
||||
size_t n_points;
|
||||
@@ -1045,14 +1045,14 @@ Renderer::backward(
|
||||
}
|
||||
// Prepare the return value.
|
||||
std::tuple<
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>>
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>>
|
||||
ret;
|
||||
if (mode == 1 || (!dif_pos && !dif_col && !dif_rad && !dif_cam && !dif_opy)) {
|
||||
return ret;
|
||||
|
||||
@@ -44,21 +44,21 @@ struct Renderer {
|
||||
const float& gamma,
|
||||
const float& max_depth,
|
||||
float min_depth,
|
||||
const c10::optional<torch::Tensor>& bg_col,
|
||||
const c10::optional<torch::Tensor>& opacity,
|
||||
const std::optional<torch::Tensor>& bg_col,
|
||||
const std::optional<torch::Tensor>& opacity,
|
||||
const float& percent_allowed_difference,
|
||||
const uint& max_n_hits,
|
||||
const uint& mode);
|
||||
|
||||
std::tuple<
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>>
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>>
|
||||
backward(
|
||||
const torch::Tensor& grad_im,
|
||||
const torch::Tensor& image,
|
||||
@@ -75,8 +75,8 @@ struct Renderer {
|
||||
const float& gamma,
|
||||
const float& max_depth,
|
||||
float min_depth,
|
||||
const c10::optional<torch::Tensor>& bg_col,
|
||||
const c10::optional<torch::Tensor>& opacity,
|
||||
const std::optional<torch::Tensor>& bg_col,
|
||||
const std::optional<torch::Tensor>& opacity,
|
||||
const float& percent_allowed_difference,
|
||||
const uint& max_n_hits,
|
||||
const uint& mode,
|
||||
@@ -85,7 +85,7 @@ struct Renderer {
|
||||
const bool& dif_rad,
|
||||
const bool& dif_cam,
|
||||
const bool& dif_opy,
|
||||
const at::optional<std::pair<uint, uint>>& dbg_pos);
|
||||
const std::optional<std::pair<uint, uint>>& dbg_pos);
|
||||
|
||||
// Infrastructure.
|
||||
/**
|
||||
@@ -115,8 +115,8 @@ struct Renderer {
|
||||
const float& gamma,
|
||||
const float& max_depth,
|
||||
float& min_depth,
|
||||
const c10::optional<torch::Tensor>& bg_col,
|
||||
const c10::optional<torch::Tensor>& opacity,
|
||||
const std::optional<torch::Tensor>& bg_col,
|
||||
const std::optional<torch::Tensor>& opacity,
|
||||
const float& percent_allowed_difference,
|
||||
const uint& max_n_hits,
|
||||
const uint& mode);
|
||||
|
||||
@@ -244,8 +244,7 @@ at::Tensor RasterizeCoarseCuda(
|
||||
if (num_bins_y >= kMaxItemsPerBin || num_bins_x >= kMaxItemsPerBin) {
|
||||
std::stringstream ss;
|
||||
ss << "In RasterizeCoarseCuda got num_bins_y: " << num_bins_y
|
||||
<< ", num_bins_x: " << num_bins_x << ", "
|
||||
<< "; that's too many!";
|
||||
<< ", num_bins_x: " << num_bins_x << ", " << "; that's too many!";
|
||||
AT_ERROR(ss.str());
|
||||
}
|
||||
auto opts = elems_per_batch.options().dtype(at::kInt);
|
||||
|
||||
@@ -144,7 +144,7 @@ __device__ void CheckPixelInsideFace(
|
||||
const bool zero_face_area =
|
||||
(face_area <= kEpsilon && face_area >= -1.0f * kEpsilon);
|
||||
|
||||
if (zmax < 0 || cull_backfaces && back_face || outside_bbox ||
|
||||
if (zmax < 0 || (cull_backfaces && back_face) || outside_bbox ||
|
||||
zero_face_area) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ const auto vEpsilon = 1e-8;
|
||||
|
||||
// Common functions and operators for float2.
|
||||
|
||||
// Complex arithmetic is already defined for AMD.
|
||||
#if !defined(USE_ROCM)
|
||||
__device__ inline float2 operator-(const float2& a, const float2& b) {
|
||||
return make_float2(a.x - b.x, a.y - b.y);
|
||||
}
|
||||
@@ -41,6 +43,7 @@ __device__ inline float2 operator*(const float2& a, const float2& b) {
|
||||
__device__ inline float2 operator*(const float a, const float2& b) {
|
||||
return make_float2(a * b.x, a * b.y);
|
||||
}
|
||||
#endif
|
||||
|
||||
__device__ inline float FloatMin3(const float a, const float b, const float c) {
|
||||
return fminf(a, fminf(b, c));
|
||||
|
||||
@@ -23,37 +23,51 @@ WarpReduceMin(scalar_t* min_dists, int64_t* min_idxs, const size_t tid) {
|
||||
min_idxs[tid] = min_idxs[tid + 32];
|
||||
min_dists[tid] = min_dists[tid + 32];
|
||||
}
|
||||
// AMD does not use explicit syncwarp and instead automatically inserts memory
|
||||
// fences during compilation.
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
// s = 16
|
||||
if (min_dists[tid] > min_dists[tid + 16]) {
|
||||
min_idxs[tid] = min_idxs[tid + 16];
|
||||
min_dists[tid] = min_dists[tid + 16];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
// s = 8
|
||||
if (min_dists[tid] > min_dists[tid + 8]) {
|
||||
min_idxs[tid] = min_idxs[tid + 8];
|
||||
min_dists[tid] = min_dists[tid + 8];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
// s = 4
|
||||
if (min_dists[tid] > min_dists[tid + 4]) {
|
||||
min_idxs[tid] = min_idxs[tid + 4];
|
||||
min_dists[tid] = min_dists[tid + 4];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
// s = 2
|
||||
if (min_dists[tid] > min_dists[tid + 2]) {
|
||||
min_idxs[tid] = min_idxs[tid + 2];
|
||||
min_dists[tid] = min_dists[tid + 2];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
// s = 1
|
||||
if (min_dists[tid] > min_dists[tid + 1]) {
|
||||
min_idxs[tid] = min_idxs[tid + 1];
|
||||
min_dists[tid] = min_dists[tid + 1];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename scalar_t>
|
||||
@@ -65,30 +79,42 @@ __device__ void WarpReduceMax(
|
||||
dists[tid] = dists[tid + 32];
|
||||
dists_idx[tid] = dists_idx[tid + 32];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
if (dists[tid] < dists[tid + 16]) {
|
||||
dists[tid] = dists[tid + 16];
|
||||
dists_idx[tid] = dists_idx[tid + 16];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
if (dists[tid] < dists[tid + 8]) {
|
||||
dists[tid] = dists[tid + 8];
|
||||
dists_idx[tid] = dists_idx[tid + 8];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
if (dists[tid] < dists[tid + 4]) {
|
||||
dists[tid] = dists[tid + 4];
|
||||
dists_idx[tid] = dists_idx[tid + 4];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
if (dists[tid] < dists[tid + 2]) {
|
||||
dists[tid] = dists[tid + 2];
|
||||
dists_idx[tid] = dists_idx[tid + 2];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
if (dists[tid] < dists[tid + 1]) {
|
||||
dists[tid] = dists[tid + 1];
|
||||
dists_idx[tid] = dists_idx[tid + 1];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from .r2n2 import BlenderCamera, collate_batched_R2N2, R2N2, render_cubified_voxels
|
||||
from .shapenet import ShapeNetCore
|
||||
from .utils import collate_batched_meshes
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from .r2n2 import R2N2
|
||||
from .utils import BlenderCamera, collate_batched_R2N2, render_cubified_voxels
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import json
|
||||
import warnings
|
||||
from os import path
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import math
|
||||
from typing import Dict, List
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from .shapenet_core import ShapeNetCore
|
||||
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import json
|
||||
import os
|
||||
import warnings
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import warnings
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from typing import Dict, List
|
||||
|
||||
from pytorch3d.renderer.mesh import TexturesAtlas
|
||||
|
||||
@@ -3,3 +3,5 @@
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
@@ -3,3 +3,5 @@
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
|
||||
import torch
|
||||
from pytorch3d.implicitron.tools.config import registry
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Iterator, List, Optional, Tuple
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from pytorch3d.implicitron.tools.config import (
|
||||
@@ -39,7 +41,7 @@ class DataSourceBase(ReplaceableBase):
|
||||
|
||||
|
||||
@registry.register
|
||||
class ImplicitronDataSource(DataSourceBase): # pyre-ignore[13]
|
||||
class ImplicitronDataSource(DataSourceBase):
|
||||
"""
|
||||
Represents the data used in Implicitron. This is the only implementation
|
||||
of DataSourceBase provided.
|
||||
@@ -50,8 +52,11 @@ class ImplicitronDataSource(DataSourceBase): # pyre-ignore[13]
|
||||
data_loader_map_provider_class_type: identifies type for data_loader_map_provider.
|
||||
"""
|
||||
|
||||
# pyre-fixme[13]: Attribute `dataset_map_provider` is never initialized.
|
||||
dataset_map_provider: DatasetMapProviderBase
|
||||
# pyre-fixme[13]: Attribute `dataset_map_provider_class_type` is never initialized.
|
||||
dataset_map_provider_class_type: str
|
||||
# pyre-fixme[13]: Attribute `data_loader_map_provider` is never initialized.
|
||||
data_loader_map_provider: DataLoaderMapProviderBase
|
||||
data_loader_map_provider_class_type: str = "SequenceDataLoaderMapProvider"
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass
|
||||
from typing import (
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from collections import defaultdict
|
||||
@@ -274,6 +276,7 @@ class FrameData(Mapping[str, Any]):
|
||||
image_size_hw=tuple(self.effective_image_size_hw), # pyre-ignore
|
||||
)
|
||||
crop_bbox_xywh = bbox_xyxy_to_xywh(clamp_bbox_xyxy)
|
||||
self.crop_bbox_xywh = crop_bbox_xywh
|
||||
|
||||
if self.fg_probability is not None:
|
||||
self.fg_probability = crop_around_box(
|
||||
@@ -432,7 +435,7 @@ class FrameData(Mapping[str, Any]):
|
||||
# TODO: don't store K; enforce working in NDC space
|
||||
return join_cameras_as_batch(batch)
|
||||
else:
|
||||
return torch.utils.data._utils.collate.default_collate(batch)
|
||||
return torch.utils.data.dataloader.default_collate(batch)
|
||||
|
||||
|
||||
FrameDataSubtype = TypeVar("FrameDataSubtype", bound=FrameData)
|
||||
@@ -576,11 +579,11 @@ class GenericFrameDataBuilder(FrameDataBuilderBase[FrameDataSubtype], ABC):
|
||||
camera_quality_score=safe_as_tensor(
|
||||
sequence_annotation.viewpoint_quality_score, torch.float
|
||||
),
|
||||
point_cloud_quality_score=safe_as_tensor(
|
||||
point_cloud.quality_score, torch.float
|
||||
)
|
||||
if point_cloud is not None
|
||||
else None,
|
||||
point_cloud_quality_score=(
|
||||
safe_as_tensor(point_cloud.quality_score, torch.float)
|
||||
if point_cloud is not None
|
||||
else None
|
||||
),
|
||||
)
|
||||
|
||||
fg_mask_np: Optional[np.ndarray] = None
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import copy
|
||||
import functools
|
||||
import gzip
|
||||
@@ -124,9 +126,9 @@ class JsonIndexDataset(DatasetBase, ReplaceableBase):
|
||||
dimension of the cropping bounding box, relative to box size.
|
||||
"""
|
||||
|
||||
frame_annotations_type: ClassVar[
|
||||
Type[types.FrameAnnotation]
|
||||
] = types.FrameAnnotation
|
||||
frame_annotations_type: ClassVar[Type[types.FrameAnnotation]] = (
|
||||
types.FrameAnnotation
|
||||
)
|
||||
|
||||
path_manager: Any = None
|
||||
frame_annotations_file: str = ""
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
|
||||
import json
|
||||
import os
|
||||
@@ -64,7 +66,7 @@ _NEED_CONTROL: Tuple[str, ...] = (
|
||||
|
||||
|
||||
@registry.register
|
||||
class JsonIndexDatasetMapProvider(DatasetMapProviderBase): # pyre-ignore [13]
|
||||
class JsonIndexDatasetMapProvider(DatasetMapProviderBase):
|
||||
"""
|
||||
Generates the training / validation and testing dataset objects for
|
||||
a dataset laid out on disk like Co3D, with annotations in json files.
|
||||
@@ -93,6 +95,7 @@ class JsonIndexDatasetMapProvider(DatasetMapProviderBase): # pyre-ignore [13]
|
||||
path_manager_factory_class_type: The class type of `path_manager_factory`.
|
||||
"""
|
||||
|
||||
# pyre-fixme[13]: Attribute `category` is never initialized.
|
||||
category: str
|
||||
task_str: str = "singlesequence"
|
||||
dataset_root: str = _CO3D_DATASET_ROOT
|
||||
@@ -102,8 +105,10 @@ class JsonIndexDatasetMapProvider(DatasetMapProviderBase): # pyre-ignore [13]
|
||||
test_restrict_sequence_id: int = -1
|
||||
assert_single_seq: bool = False
|
||||
only_test_set: bool = False
|
||||
# pyre-fixme[13]: Attribute `dataset` is never initialized.
|
||||
dataset: JsonIndexDataset
|
||||
dataset_class_type: str = "JsonIndexDataset"
|
||||
# pyre-fixme[13]: Attribute `path_manager_factory` is never initialized.
|
||||
path_manager_factory: PathManagerFactory
|
||||
path_manager_factory_class_type: str = "PathManagerFactory"
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
|
||||
import copy
|
||||
import json
|
||||
@@ -54,7 +56,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@registry.register
|
||||
class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
|
||||
class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase):
|
||||
"""
|
||||
Generates the training, validation, and testing dataset objects for
|
||||
a dataset laid out on disk like CO3Dv2, with annotations in gzipped json files.
|
||||
@@ -169,7 +171,9 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
|
||||
path_manager_factory_class_type: The class type of `path_manager_factory`.
|
||||
"""
|
||||
|
||||
# pyre-fixme[13]: Attribute `category` is never initialized.
|
||||
category: str
|
||||
# pyre-fixme[13]: Attribute `subset_name` is never initialized.
|
||||
subset_name: str
|
||||
dataset_root: str = _CO3DV2_DATASET_ROOT
|
||||
|
||||
@@ -181,8 +185,10 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
|
||||
n_known_frames_for_test: int = 0
|
||||
|
||||
dataset_class_type: str = "JsonIndexDataset"
|
||||
# pyre-fixme[13]: Attribute `dataset` is never initialized.
|
||||
dataset: JsonIndexDataset
|
||||
|
||||
# pyre-fixme[13]: Attribute `path_manager_factory` is never initialized.
|
||||
path_manager_factory: PathManagerFactory
|
||||
path_manager_factory_class_type: str = "PathManagerFactory"
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# @lint-ignore-every LICENSELINT
|
||||
# Adapted from https://github.com/bmild/nerf/blob/master/load_blender.py
|
||||
# Copyright (c) 2020 bmild
|
||||
|
||||
# pyre-unsafe
|
||||
import json
|
||||
import os
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# @lint-ignore-every LICENSELINT
|
||||
# Adapted from https://github.com/bmild/nerf/blob/master/load_llff.py
|
||||
# Copyright (c) 2020 bmild
|
||||
|
||||
# pyre-unsafe
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
@@ -34,11 +36,7 @@ def _minify(basedir, path_manager, factors=(), resolutions=()):
|
||||
|
||||
imgdir = os.path.join(basedir, "images")
|
||||
imgs = [os.path.join(imgdir, f) for f in sorted(_ls(path_manager, imgdir))]
|
||||
imgs = [
|
||||
f
|
||||
for f in imgs
|
||||
if any([f.endswith(ex) for ex in ["JPG", "jpg", "png", "jpeg", "PNG"]])
|
||||
]
|
||||
imgs = [f for f in imgs if f.endswith("JPG", "jpg", "png", "jpeg", "PNG")]
|
||||
imgdir_orig = imgdir
|
||||
|
||||
wd = os.getcwd()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user