mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-08-20 21:02:48 +08:00
Compare commits
114 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
50f8efa1cb | ||
|
5043d15361 | ||
|
e3d3a67a89 | ||
|
e55ea90609 | ||
|
3aee2a6005 | ||
|
c5ea8fa49e | ||
|
3ff6c5ab85 | ||
|
267bd8ef87 | ||
|
177eec6378 | ||
|
71db7a0ea2 | ||
|
6020323d94 | ||
|
182e845c19 | ||
|
f315ac131b | ||
|
fc08621879 | ||
|
3f327a516b | ||
|
366eff21d9 | ||
|
0a59450f0e | ||
|
3987612062 | ||
|
06a76ef8dd | ||
|
21205730d9 | ||
|
7e09505538 | ||
|
20bd8b33f6 | ||
|
7a3c0cbc9d | ||
|
215590b497 | ||
|
43cd681d4f | ||
|
42a4a7d432 | ||
|
699bc671ca | ||
|
49cf5a0f37 | ||
|
89b851e64c | ||
|
5247f6ad74 | ||
|
e41aff47db | ||
|
64a5bfadc8 | ||
|
055ab3a2e3 | ||
|
f6c2ca6bfc | ||
|
e20cbe9b0e | ||
|
c17e6f947a | ||
|
91c9f34137 | ||
|
81d82980bc | ||
|
8fe6934885 | ||
|
c434957b2a | ||
|
dd2a11b5fc | ||
|
9563ef79ca | ||
|
008c7ab58c | ||
|
9eaed4c495 | ||
|
e13848265d | ||
|
58566963d6 | ||
|
e17ed5cd50 | ||
|
8ed0c7a002 | ||
|
2da913c7e6 | ||
|
fca83e6369 | ||
|
75ebeeaea0 | ||
|
ab793177c6 | ||
|
9acdd67b83 | ||
|
3f428d9981 | ||
|
05cbea115a | ||
|
38afdcfc68 | ||
|
1e0b1d9c72 | ||
|
44702fdb4b | ||
|
7edaee71a9 | ||
|
d0d0e02007 | ||
|
4df110b0a9 | ||
|
51fd114d8b | ||
|
89653419d0 | ||
|
7980854d44 | ||
|
51d7c06ddd | ||
|
00c36ec01c | ||
|
b0462d8079 | ||
|
b66d17a324 | ||
|
717493cb79 | ||
|
302da69461 | ||
|
4ae25bfce7 | ||
|
bd52f4a408 | ||
|
17117106e4 | ||
|
aec76bb4c8 | ||
|
47d5dc8824 | ||
|
fe0b1bae49 | ||
|
ccf22911d4 | ||
|
128be02fc0 | ||
|
31e3488a51 | ||
|
b215776f2d | ||
|
38cf0dc1c5 | ||
|
7566530669 | ||
|
a27755db41 | ||
|
3da7703c5a | ||
|
f34104cf6e | ||
|
f247c86dc0 | ||
|
ae9d8787ce | ||
|
8772fe0de8 | ||
|
c292c71c1a | ||
|
d0d9cae9cd | ||
|
1f92c4e9d2 | ||
|
9b981f2c7e | ||
|
85eccbbf77 | ||
|
b80ab0caf0 | ||
|
1e817914b3 | ||
|
799c1cd21b | ||
|
292acc71a3 | ||
|
3621a36494 | ||
|
3087ab7f62 | ||
|
e46ab49a34 | ||
|
8a27590c5f | ||
|
06cdc313a7 | ||
|
94da8841af | ||
|
fbc6725f03 | ||
|
6b8766080d | ||
|
c373a84400 | ||
|
7606854ff7 | ||
|
83bacda8fb | ||
|
f74fc450e8 | ||
|
3b4f8a4980 | ||
|
79b46734cb | ||
|
55638f3bae | ||
|
f4f2209271 | ||
|
f613682551 |
@ -162,90 +162,6 @@ workflows:
|
||||
jobs:
|
||||
# - main:
|
||||
# context: DOCKERHUB_TOKEN
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py38_cu113_pyt1120
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py38_cu116_pyt1120
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py38_cu113_pyt1121
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py38_cu116_pyt1121
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py38_cu116_pyt1130
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py38_cu117_pyt1130
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py38_cu116_pyt1131
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py38_cu117_pyt1131
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py38_cu117_pyt200
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.0.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt200
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.0.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py38_cu117_pyt201
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.0.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt201
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.0.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
@ -261,89 +177,103 @@ workflows:
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py39_cu113_pyt1120
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.0
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt211
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py39_cu116_pyt1120
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py39_cu113_pyt1121
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py39_cu116_pyt1121
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py39_cu116_pyt1130
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py39_cu117_pyt1130
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py39_cu116_pyt1131
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py39_cu117_pyt1131
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py39_cu117_pyt200
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.0.0
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt211
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt200
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.0.0
|
||||
name: linux_conda_py38_cu118_pyt212
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py39_cu117_pyt201
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.0.1
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt212
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt201
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.0.1
|
||||
name: linux_conda_py38_cu118_pyt220
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt220
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt222
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt222
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt231
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt231
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt240
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt240
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py38_cu118_pyt241
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py38_cu121_pyt241
|
||||
python_version: '3.8'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
@ -359,89 +289,103 @@ workflows:
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py310_cu113_pyt1120
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.0
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt211
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py310_cu116_pyt1120
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py310_cu113_pyt1121
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py310_cu116_pyt1121
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py310_cu116_pyt1130
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py310_cu117_pyt1130
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.13.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py310_cu116_pyt1131
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py310_cu117_pyt1131
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.13.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py310_cu117_pyt200
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.0.0
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt211
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt200
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.0.0
|
||||
name: linux_conda_py39_cu118_pyt212
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda117
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu117
|
||||
name: linux_conda_py310_cu117_pyt201
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.0.1
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt212
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt201
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.0.1
|
||||
name: linux_conda_py39_cu118_pyt220
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt220
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt222
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt222
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt231
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt231
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt240
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt240
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py39_cu118_pyt241
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py39_cu121_pyt241
|
||||
python_version: '3.9'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
@ -456,6 +400,104 @@ workflows:
|
||||
name: linux_conda_py310_cu121_pyt210
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt211
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt211
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt212
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt212
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt220
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt220
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt222
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt222
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt231
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt231
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt240
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt240
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py310_cu118_pyt241
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py310_cu121_pyt241
|
||||
python_version: '3.10'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
@ -470,6 +512,174 @@ workflows:
|
||||
name: linux_conda_py311_cu121_pyt210
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt211
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt211
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt212
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt212
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.1.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt220
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt220
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt222
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt222
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt231
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt231
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt240
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt240
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py311_cu118_pyt241
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py311_cu121_pyt241
|
||||
python_version: '3.11'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py312_cu118_pyt220
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py312_cu121_pyt220
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.2.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py312_cu118_pyt222
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py312_cu121_pyt222
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.2.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py312_cu118_pyt231
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py312_cu121_pyt231
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.3.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py312_cu118_pyt240
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py312_cu121_pyt240
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.4.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda118
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu118
|
||||
name: linux_conda_py312_cu118_pyt241
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda121
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu121
|
||||
name: linux_conda_py312_cu121_pyt241
|
||||
python_version: '3.12'
|
||||
pytorch_version: 2.4.1
|
||||
- binary_linux_conda_cuda:
|
||||
name: testrun_conda_cuda_py310_cu117_pyt201
|
||||
context: DOCKERHUB_TOKEN
|
||||
|
@ -19,19 +19,18 @@ from packaging import version
|
||||
# The CUDA versions which have pytorch conda packages available for linux for each
|
||||
# version of pytorch.
|
||||
CONDA_CUDA_VERSIONS = {
|
||||
"1.12.0": ["cu113", "cu116"],
|
||||
"1.12.1": ["cu113", "cu116"],
|
||||
"1.13.0": ["cu116", "cu117"],
|
||||
"1.13.1": ["cu116", "cu117"],
|
||||
"2.0.0": ["cu117", "cu118"],
|
||||
"2.0.1": ["cu117", "cu118"],
|
||||
"2.1.0": ["cu118", "cu121"],
|
||||
"2.1.1": ["cu118", "cu121"],
|
||||
"2.1.2": ["cu118", "cu121"],
|
||||
"2.2.0": ["cu118", "cu121"],
|
||||
"2.2.2": ["cu118", "cu121"],
|
||||
"2.3.1": ["cu118", "cu121"],
|
||||
"2.4.0": ["cu118", "cu121"],
|
||||
"2.4.1": ["cu118", "cu121"],
|
||||
}
|
||||
|
||||
|
||||
def conda_docker_image_for_cuda(cuda_version):
|
||||
if cuda_version in ("cu101", "cu102", "cu111"):
|
||||
return None
|
||||
if len(cuda_version) != 5:
|
||||
raise ValueError("Unknown cuda version")
|
||||
return "pytorch/conda-builder:cuda" + cuda_version[2:]
|
||||
@ -52,12 +51,18 @@ def pytorch_versions_for_python(python_version):
|
||||
for i in CONDA_CUDA_VERSIONS
|
||||
if version.Version(i) >= version.Version("2.1.0")
|
||||
]
|
||||
if python_version == "3.12":
|
||||
return [
|
||||
i
|
||||
for i in CONDA_CUDA_VERSIONS
|
||||
if version.Version(i) >= version.Version("2.2.0")
|
||||
]
|
||||
|
||||
|
||||
def workflows(prefix="", filter_branch=None, upload=False, indentation=6):
|
||||
w = []
|
||||
for btype in ["conda"]:
|
||||
for python_version in ["3.8", "3.9", "3.10", "3.11"]:
|
||||
for python_version in ["3.8", "3.9", "3.10", "3.11", "3.12"]:
|
||||
for pytorch_version in pytorch_versions_for_python(python_version):
|
||||
for cu_version in CONDA_CUDA_VERSIONS[pytorch_version]:
|
||||
w += workflow_pair(
|
||||
@ -83,7 +88,6 @@ def workflow_pair(
|
||||
upload=False,
|
||||
filter_branch,
|
||||
):
|
||||
|
||||
w = []
|
||||
py = python_version.replace(".", "")
|
||||
pyt = pytorch_version.replace(".", "")
|
||||
@ -122,7 +126,6 @@ def generate_base_workflow(
|
||||
btype,
|
||||
filter_branch=None,
|
||||
):
|
||||
|
||||
d = {
|
||||
"name": base_workflow_name,
|
||||
"python_version": python_version,
|
||||
|
5
.flake8
5
.flake8
@ -1,5 +1,8 @@
|
||||
[flake8]
|
||||
ignore = E203, E266, E501, W503, E221
|
||||
# B028 No explicit stacklevel argument found.
|
||||
# B907 'foo' is manually surrounded by quotes, consider using the `!r` conversion flag.
|
||||
# B905 `zip()` without an explicit `strict=` parameter.
|
||||
ignore = E203, E266, E501, W503, E221, B028, B905, B907
|
||||
max-line-length = 88
|
||||
max-complexity = 18
|
||||
select = B,C,E,F,W,T4,B9
|
||||
|
23
.github/workflows/build.yml
vendored
Normal file
23
.github/workflows/build.yml
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
name: facebookresearch/pytorch3d/build_and_test
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
binary_linux_conda_cuda:
|
||||
runs-on: 4-core-ubuntu-gpu-t4
|
||||
env:
|
||||
PYTHON_VERSION: "3.12"
|
||||
BUILD_VERSION: "${{ github.run_number }}"
|
||||
PYTORCH_VERSION: "2.4.1"
|
||||
CU_VERSION: "cu121"
|
||||
JUST_TESTRUN: 1
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Build and run tests
|
||||
run: |-
|
||||
conda create --name env --yes --quiet conda-build
|
||||
conda run --no-capture-output --name env python3 ./packaging/build_conda.py --use-conda-cuda
|
11
INSTALL.md
11
INSTALL.md
@ -8,11 +8,10 @@
|
||||
The core library is written in PyTorch. Several components have underlying implementation in CUDA for improved performance. A subset of these components have CPU implementations in C++/PyTorch. It is advised to use PyTorch3D with GPU support in order to use all the features.
|
||||
|
||||
- Linux or macOS or Windows
|
||||
- Python 3.8, 3.9 or 3.10
|
||||
- PyTorch 1.12.0, 1.12.1, 1.13.0, 2.0.0, 2.0.1 or 2.1.0.
|
||||
- Python
|
||||
- PyTorch 2.1.0, 2.1.1, 2.1.2, 2.2.0, 2.2.1, 2.2.2, 2.3.0, 2.3.1, 2.4.0 or 2.4.1.
|
||||
- torchvision that matches the PyTorch installation. You can install them together as explained at pytorch.org to make sure of this.
|
||||
- gcc & g++ ≥ 4.9
|
||||
- [fvcore](https://github.com/facebookresearch/fvcore)
|
||||
- [ioPath](https://github.com/facebookresearch/iopath)
|
||||
- If CUDA is to be used, use a version which is supported by the corresponding pytorch version and at least version 9.2.
|
||||
- If CUDA older than 11.7 is to be used and you are building from source, the CUB library must be available. We recommend version 1.10.0.
|
||||
@ -22,7 +21,7 @@ The runtime dependencies can be installed by running:
|
||||
conda create -n pytorch3d python=3.9
|
||||
conda activate pytorch3d
|
||||
conda install pytorch=1.13.0 torchvision pytorch-cuda=11.6 -c pytorch -c nvidia
|
||||
conda install -c fvcore -c iopath -c conda-forge fvcore iopath
|
||||
conda install -c iopath iopath
|
||||
```
|
||||
|
||||
For the CUB build time dependency, which you only need if you have CUDA older than 11.7, if you are using conda, you can continue with
|
||||
@ -49,6 +48,7 @@ For developing on top of PyTorch3D or contributing, you will need to run the lin
|
||||
- tdqm
|
||||
- jupyter
|
||||
- imageio
|
||||
- fvcore
|
||||
- plotly
|
||||
- opencv-python
|
||||
|
||||
@ -59,6 +59,7 @@ conda install jupyter
|
||||
pip install scikit-image matplotlib imageio plotly opencv-python
|
||||
|
||||
# Tests/Linting
|
||||
conda install -c fvcore -c conda-forge fvcore
|
||||
pip install black usort flake8 flake8-bugbear flake8-comprehensions
|
||||
```
|
||||
|
||||
@ -97,7 +98,7 @@ version_str="".join([
|
||||
torch.version.cuda.replace(".",""),
|
||||
f"_pyt{pyt_version_str}"
|
||||
])
|
||||
!pip install fvcore iopath
|
||||
!pip install iopath
|
||||
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||
```
|
||||
|
||||
|
@ -146,6 +146,12 @@ If you are using the pulsar backend for sphere-rendering (the `PulsarPointRender
|
||||
|
||||
Please see below for a timeline of the codebase updates in reverse chronological order. We are sharing updates on the releases as well as research projects which are built with PyTorch3D. The changelogs for the releases are available under [`Releases`](https://github.com/facebookresearch/pytorch3d/releases), and the builds can be installed using `conda` as per the instructions in [INSTALL.md](INSTALL.md).
|
||||
|
||||
**[Oct 31st 2023]:** PyTorch3D [v0.7.5](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.5) released.
|
||||
|
||||
**[May 10th 2023]:** PyTorch3D [v0.7.4](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.4) released.
|
||||
|
||||
**[Apr 5th 2023]:** PyTorch3D [v0.7.3](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.3) released.
|
||||
|
||||
**[Dec 19th 2022]:** PyTorch3D [v0.7.2](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.2) released.
|
||||
|
||||
**[Oct 23rd 2022]:** PyTorch3D [v0.7.1](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.1) released.
|
||||
|
@ -36,5 +36,5 @@ then
|
||||
|
||||
echo "Running pyre..."
|
||||
echo "To restart/kill pyre server, run 'pyre restart' or 'pyre kill' in fbcode/"
|
||||
( cd ~/fbsource/fbcode; pyre -l vision/fair/pytorch3d/ )
|
||||
( cd ~/fbsource/fbcode; arc pyre check //vision/fair/pytorch3d/... )
|
||||
fi
|
||||
|
@ -23,7 +23,7 @@ conda init bash
|
||||
source ~/.bashrc
|
||||
conda create -y -n myenv python=3.8 matplotlib ipython ipywidgets nbconvert
|
||||
conda activate myenv
|
||||
conda install -y -c fvcore -c iopath -c conda-forge fvcore iopath
|
||||
conda install -y -c iopath iopath
|
||||
conda install -y -c pytorch pytorch=1.6.0 cudatoolkit=10.1 torchvision
|
||||
conda install -y -c pytorch3d-nightly pytorch3d
|
||||
pip install plotly scikit-image
|
||||
|
@ -10,6 +10,7 @@ This example demonstrates the most trivial, direct interface of the pulsar
|
||||
sphere renderer. It renders and saves an image with 10 random spheres.
|
||||
Output: basic.png.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import math
|
||||
from os import path
|
||||
|
@ -11,6 +11,7 @@ interface for sphere renderering. It renders and saves an image with
|
||||
10 random spheres.
|
||||
Output: basic-pt3d.png.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from os import path
|
||||
|
||||
|
@ -14,6 +14,7 @@ distorted. Gradient-based optimization is used to converge towards the
|
||||
original camera parameters.
|
||||
Output: cam.gif.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import math
|
||||
from os import path
|
||||
|
@ -14,6 +14,7 @@ distorted. Gradient-based optimization is used to converge towards the
|
||||
original camera parameters.
|
||||
Output: cam-pt3d.gif
|
||||
"""
|
||||
|
||||
import logging
|
||||
from os import path
|
||||
|
||||
|
@ -18,6 +18,7 @@ This example is not available yet through the 'unified' interface,
|
||||
because opacity support has not landed in PyTorch3D for general data
|
||||
structures yet.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import math
|
||||
from os import path
|
||||
|
@ -13,6 +13,7 @@ The scene is initialized with random spheres. Gradient-based
|
||||
optimization is used to converge towards a faithful
|
||||
scene representation.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import math
|
||||
|
||||
|
@ -13,6 +13,7 @@ The scene is initialized with random spheres. Gradient-based
|
||||
optimization is used to converge towards a faithful
|
||||
scene representation.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import math
|
||||
|
||||
|
@ -5,7 +5,6 @@ sphinx_rtd_theme
|
||||
sphinx_markdown_tables
|
||||
numpy
|
||||
iopath
|
||||
fvcore
|
||||
https://download.pytorch.org/whl/cpu/torchvision-0.15.2%2Bcpu-cp311-cp311-linux_x86_64.whl
|
||||
https://download.pytorch.org/whl/cpu/torch-2.0.1%2Bcpu-cp311-cp311-linux_x86_64.whl
|
||||
omegaconf
|
||||
|
@ -83,25 +83,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -70,25 +70,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -45,25 +45,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -405,7 +411,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"random_model_images = shapenet_dataset.render(\n",
|
||||
" sample_nums=[3],\n",
|
||||
" sample_nums=[5],\n",
|
||||
" device=device,\n",
|
||||
" cameras=cameras,\n",
|
||||
" raster_settings=raster_settings,\n",
|
||||
|
@ -84,25 +84,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -50,25 +50,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -62,25 +62,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -41,25 +41,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -72,25 +72,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -66,25 +66,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -44,25 +44,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -51,25 +51,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -67,25 +67,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"2.1.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -4,10 +4,11 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import argparse
|
||||
import os.path
|
||||
import runpy
|
||||
import subprocess
|
||||
from typing import List
|
||||
from typing import List, Tuple
|
||||
|
||||
# required env vars:
|
||||
# CU_VERSION: E.g. cu112
|
||||
@ -23,7 +24,7 @@ pytorch_major_minor = tuple(int(i) for i in PYTORCH_VERSION.split(".")[:2])
|
||||
source_root_dir = os.environ["PWD"]
|
||||
|
||||
|
||||
def version_constraint(version):
|
||||
def version_constraint(version) -> str:
|
||||
"""
|
||||
Given version "11.3" returns " >=11.3,<11.4"
|
||||
"""
|
||||
@ -32,7 +33,7 @@ def version_constraint(version):
|
||||
return f" >={version},<{upper}"
|
||||
|
||||
|
||||
def get_cuda_major_minor():
|
||||
def get_cuda_major_minor() -> Tuple[str, str]:
|
||||
if CU_VERSION == "cpu":
|
||||
raise ValueError("fn only for cuda builds")
|
||||
if len(CU_VERSION) != 5 or CU_VERSION[:2] != "cu":
|
||||
@ -42,11 +43,10 @@ def get_cuda_major_minor():
|
||||
return major, minor
|
||||
|
||||
|
||||
def setup_cuda():
|
||||
def setup_cuda(use_conda_cuda: bool) -> List[str]:
|
||||
if CU_VERSION == "cpu":
|
||||
return
|
||||
return []
|
||||
major, minor = get_cuda_major_minor()
|
||||
os.environ["CUDA_HOME"] = f"/usr/local/cuda-{major}.{minor}/"
|
||||
os.environ["FORCE_CUDA"] = "1"
|
||||
|
||||
basic_nvcc_flags = (
|
||||
@ -75,11 +75,26 @@ def setup_cuda():
|
||||
|
||||
if os.environ.get("JUST_TESTRUN", "0") != "1":
|
||||
os.environ["NVCC_FLAGS"] = nvcc_flags
|
||||
if use_conda_cuda:
|
||||
os.environ["CONDA_CUDA_TOOLKIT_BUILD_CONSTRAINT1"] = "- cuda-toolkit"
|
||||
os.environ["CONDA_CUDA_TOOLKIT_BUILD_CONSTRAINT2"] = (
|
||||
f"- cuda-version={major}.{minor}"
|
||||
)
|
||||
return ["-c", f"nvidia/label/cuda-{major}.{minor}.0"]
|
||||
else:
|
||||
os.environ["CUDA_HOME"] = f"/usr/local/cuda-{major}.{minor}/"
|
||||
return []
|
||||
|
||||
|
||||
def setup_conda_pytorch_constraint() -> List[str]:
|
||||
pytorch_constraint = f"- pytorch=={PYTORCH_VERSION}"
|
||||
os.environ["CONDA_PYTORCH_CONSTRAINT"] = pytorch_constraint
|
||||
if pytorch_major_minor < (2, 2):
|
||||
os.environ["CONDA_PYTORCH_MKL_CONSTRAINT"] = "- mkl!=2024.1.0"
|
||||
os.environ["SETUPTOOLS_CONSTRAINT"] = "- setuptools<70"
|
||||
else:
|
||||
os.environ["CONDA_PYTORCH_MKL_CONSTRAINT"] = ""
|
||||
os.environ["SETUPTOOLS_CONSTRAINT"] = "- setuptools"
|
||||
os.environ["CONDA_PYTORCH_BUILD_CONSTRAINT"] = pytorch_constraint
|
||||
os.environ["PYTORCH_VERSION_NODOT"] = PYTORCH_VERSION.replace(".", "")
|
||||
|
||||
@ -89,7 +104,7 @@ def setup_conda_pytorch_constraint() -> List[str]:
|
||||
return ["-c", "pytorch", "-c", "nvidia"]
|
||||
|
||||
|
||||
def setup_conda_cudatoolkit_constraint():
|
||||
def setup_conda_cudatoolkit_constraint() -> None:
|
||||
if CU_VERSION == "cpu":
|
||||
os.environ["CONDA_CPUONLY_FEATURE"] = "- cpuonly"
|
||||
os.environ["CONDA_CUDATOOLKIT_CONSTRAINT"] = ""
|
||||
@ -110,14 +125,14 @@ def setup_conda_cudatoolkit_constraint():
|
||||
os.environ["CONDA_CUDATOOLKIT_CONSTRAINT"] = toolkit
|
||||
|
||||
|
||||
def do_build(start_args: List[str]):
|
||||
def do_build(start_args: List[str]) -> None:
|
||||
args = start_args.copy()
|
||||
|
||||
test_flag = os.environ.get("TEST_FLAG")
|
||||
if test_flag is not None:
|
||||
args.append(test_flag)
|
||||
|
||||
args.extend(["-c", "bottler", "-c", "fvcore", "-c", "iopath", "-c", "conda-forge"])
|
||||
args.extend(["-c", "bottler", "-c", "iopath", "-c", "conda-forge"])
|
||||
args.append("--no-anaconda-upload")
|
||||
args.extend(["--python", os.environ["PYTHON_VERSION"]])
|
||||
args.append("packaging/pytorch3d")
|
||||
@ -126,8 +141,16 @@ def do_build(start_args: List[str]):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Build the conda package.")
|
||||
parser.add_argument(
|
||||
"--use-conda-cuda",
|
||||
action="store_true",
|
||||
help="get cuda from conda ignoring local cuda",
|
||||
)
|
||||
our_args = parser.parse_args()
|
||||
|
||||
args = ["conda", "build"]
|
||||
setup_cuda()
|
||||
args += setup_cuda(use_conda_cuda=our_args.use_conda_cuda)
|
||||
|
||||
init_path = source_root_dir + "/pytorch3d/__init__.py"
|
||||
build_version = runpy.run_path(init_path)["__version__"]
|
||||
|
@ -26,6 +26,6 @@ version_str="".join([
|
||||
torch.version.cuda.replace(".",""),
|
||||
f"_pyt{pyt_version_str}"
|
||||
])
|
||||
!pip install fvcore iopath
|
||||
!pip install iopath
|
||||
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||
```
|
||||
|
@ -144,7 +144,7 @@ do
|
||||
conda activate "$tag"
|
||||
# shellcheck disable=SC2086
|
||||
conda install -y -c pytorch $extra_channel "pytorch=$pytorch_version" "$cudatools=$CUDA_TAG"
|
||||
pip install fvcore iopath
|
||||
pip install iopath
|
||||
echo "python version" "$python_version" "pytorch version" "$pytorch_version" "cuda version" "$cu_version" "tag" "$tag"
|
||||
|
||||
rm -rf dist
|
||||
|
@ -8,12 +8,16 @@ source:
|
||||
requirements:
|
||||
build:
|
||||
- {{ compiler('c') }} # [win]
|
||||
{{ environ.get('CONDA_CUDA_TOOLKIT_BUILD_CONSTRAINT1', '') }}
|
||||
{{ environ.get('CONDA_CUDA_TOOLKIT_BUILD_CONSTRAINT2', '') }}
|
||||
{{ environ.get('CONDA_CUB_CONSTRAINT') }}
|
||||
|
||||
host:
|
||||
- python
|
||||
- setuptools
|
||||
- mkl =2023 # [x86_64]
|
||||
{{ environ.get('SETUPTOOLS_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_PYTORCH_BUILD_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_PYTORCH_MKL_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_CPUONLY_FEATURE') }}
|
||||
|
||||
@ -21,7 +25,7 @@ requirements:
|
||||
- python
|
||||
- numpy >=1.11
|
||||
- torchvision >=0.5
|
||||
- fvcore
|
||||
- mkl =2023 # [x86_64]
|
||||
- iopath
|
||||
{{ environ.get('CONDA_PYTORCH_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }}
|
||||
@ -47,8 +51,11 @@ test:
|
||||
- imageio
|
||||
- hydra-core
|
||||
- accelerate
|
||||
- matplotlib
|
||||
- tabulate
|
||||
- pandas
|
||||
- sqlalchemy
|
||||
commands:
|
||||
#pytest .
|
||||
python -m unittest discover -v -s tests -t .
|
||||
|
||||
|
||||
|
@ -3,3 +3,5 @@
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
@ -5,7 +5,9 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
""""
|
||||
# pyre-unsafe
|
||||
|
||||
""" "
|
||||
This file is the entry point for launching experiments with Implicitron.
|
||||
|
||||
Launch Training
|
||||
@ -42,6 +44,7 @@ The outputs of the experiment are saved and logged in multiple ways:
|
||||
config file.
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
@ -97,7 +100,7 @@ except ModuleNotFoundError:
|
||||
no_accelerate = os.environ.get("PYTORCH3D_NO_ACCELERATE") is not None
|
||||
|
||||
|
||||
class Experiment(Configurable): # pyre-ignore: 13
|
||||
class Experiment(Configurable):
|
||||
"""
|
||||
This class is at the top level of Implicitron's config hierarchy. Its
|
||||
members are high-level components necessary for training an implicit rende-
|
||||
@ -118,12 +121,16 @@ class Experiment(Configurable): # pyre-ignore: 13
|
||||
will be saved here.
|
||||
"""
|
||||
|
||||
# pyre-fixme[13]: Attribute `data_source` is never initialized.
|
||||
data_source: DataSourceBase
|
||||
data_source_class_type: str = "ImplicitronDataSource"
|
||||
# pyre-fixme[13]: Attribute `model_factory` is never initialized.
|
||||
model_factory: ModelFactoryBase
|
||||
model_factory_class_type: str = "ImplicitronModelFactory"
|
||||
# pyre-fixme[13]: Attribute `optimizer_factory` is never initialized.
|
||||
optimizer_factory: OptimizerFactoryBase
|
||||
optimizer_factory_class_type: str = "ImplicitronOptimizerFactory"
|
||||
# pyre-fixme[13]: Attribute `training_loop` is never initialized.
|
||||
training_loop: TrainingLoopBase
|
||||
training_loop_class_type: str = "ImplicitronTrainingLoop"
|
||||
|
||||
|
@ -3,3 +3,5 @@
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Optional
|
||||
@ -24,7 +26,6 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ModelFactoryBase(ReplaceableBase):
|
||||
|
||||
resume: bool = True # resume from the last checkpoint
|
||||
|
||||
def __call__(self, **kwargs) -> ImplicitronModelBase:
|
||||
@ -43,7 +44,7 @@ class ModelFactoryBase(ReplaceableBase):
|
||||
|
||||
|
||||
@registry.register
|
||||
class ImplicitronModelFactory(ModelFactoryBase): # pyre-ignore [13]
|
||||
class ImplicitronModelFactory(ModelFactoryBase):
|
||||
"""
|
||||
A factory class that initializes an implicit rendering model.
|
||||
|
||||
@ -59,6 +60,7 @@ class ImplicitronModelFactory(ModelFactoryBase): # pyre-ignore [13]
|
||||
|
||||
"""
|
||||
|
||||
# pyre-fixme[13]: Attribute `model` is never initialized.
|
||||
model: ImplicitronModelBase
|
||||
model_class_type: str = "GenericModel"
|
||||
resume: bool = True
|
||||
@ -113,7 +115,9 @@ class ImplicitronModelFactory(ModelFactoryBase): # pyre-ignore [13]
|
||||
"cuda:%d" % 0: "cuda:%d" % accelerator.local_process_index
|
||||
}
|
||||
model_state_dict = torch.load(
|
||||
model_io.get_model_path(model_path), map_location=map_location
|
||||
model_io.get_model_path(model_path),
|
||||
map_location=map_location,
|
||||
weights_only=True,
|
||||
)
|
||||
|
||||
try:
|
||||
|
@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
import os
|
||||
@ -121,7 +123,7 @@ class ImplicitronOptimizerFactory(OptimizerFactoryBase):
|
||||
"""
|
||||
# Get the parameters to optimize
|
||||
if hasattr(model, "_get_param_groups"): # use the model function
|
||||
# pyre-ignore[29]
|
||||
# pyre-fixme[29]: `Union[Tensor, Module]` is not a function.
|
||||
p_groups = model._get_param_groups(self.lr, wd=self.weight_decay)
|
||||
else:
|
||||
p_groups = [
|
||||
@ -240,7 +242,7 @@ class ImplicitronOptimizerFactory(OptimizerFactoryBase):
|
||||
map_location = {
|
||||
"cuda:%d" % 0: "cuda:%d" % accelerator.local_process_index
|
||||
}
|
||||
optimizer_state = torch.load(opt_path, map_location)
|
||||
optimizer_state = torch.load(opt_path, map_location, weights_only=True)
|
||||
else:
|
||||
raise FileNotFoundError(f"Optimizer state {opt_path} does not exist.")
|
||||
return optimizer_state
|
||||
|
@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
@ -28,13 +30,13 @@ from .utils import seed_all_random_engines
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# pyre-fixme[13]: Attribute `evaluator` is never initialized.
|
||||
class TrainingLoopBase(ReplaceableBase):
|
||||
"""
|
||||
Members:
|
||||
evaluator: An EvaluatorBase instance, used to evaluate training results.
|
||||
"""
|
||||
|
||||
# pyre-fixme[13]: Attribute `evaluator` is never initialized.
|
||||
evaluator: Optional[EvaluatorBase]
|
||||
evaluator_class_type: Optional[str] = "ImplicitronEvaluator"
|
||||
|
||||
@ -110,6 +112,8 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
def __post_init__(self):
|
||||
run_auto_creation(self)
|
||||
|
||||
# pyre-fixme[14]: `run` overrides method defined in `TrainingLoopBase`
|
||||
# inconsistently.
|
||||
def run(
|
||||
self,
|
||||
*,
|
||||
@ -157,7 +161,6 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
for epoch in range(start_epoch, self.max_epochs):
|
||||
# automatic new_epoch and plotting of stats at every epoch start
|
||||
with stats:
|
||||
|
||||
# Make sure to re-seed random generators to ensure reproducibility
|
||||
# even after restart.
|
||||
seed_all_random_engines(seed + epoch)
|
||||
@ -391,7 +394,7 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
):
|
||||
prefix = f"e{stats.epoch}_it{stats.it[trainmode]}"
|
||||
if hasattr(model, "visualize"):
|
||||
# pyre-ignore [29]
|
||||
# pyre-fixme[29]: `Union[Tensor, Module]` is not a function.
|
||||
model.visualize(
|
||||
viz,
|
||||
visdom_env_imgs,
|
||||
|
@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
|
||||
import random
|
||||
|
||||
|
@ -3,3 +3,5 @@
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
import unittest
|
||||
@ -51,12 +53,8 @@ class TestExperiment(unittest.TestCase):
|
||||
cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_class_type = (
|
||||
"JsonIndexDatasetMapProvider"
|
||||
)
|
||||
dataset_args = (
|
||||
cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
|
||||
)
|
||||
dataloader_args = (
|
||||
cfg.data_source_ImplicitronDataSource_args.data_loader_map_provider_SequenceDataLoaderMapProvider_args
|
||||
)
|
||||
dataset_args = cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
|
||||
dataloader_args = cfg.data_source_ImplicitronDataSource_args.data_loader_map_provider_SequenceDataLoaderMapProvider_args
|
||||
dataset_args.category = "skateboard"
|
||||
dataset_args.test_restrict_sequence_id = 0
|
||||
dataset_args.dataset_root = "manifold://co3d/tree/extracted"
|
||||
@ -92,12 +90,8 @@ class TestExperiment(unittest.TestCase):
|
||||
cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_class_type = (
|
||||
"JsonIndexDatasetMapProvider"
|
||||
)
|
||||
dataset_args = (
|
||||
cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
|
||||
)
|
||||
dataloader_args = (
|
||||
cfg.data_source_ImplicitronDataSource_args.data_loader_map_provider_SequenceDataLoaderMapProvider_args
|
||||
)
|
||||
dataset_args = cfg.data_source_ImplicitronDataSource_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
|
||||
dataloader_args = cfg.data_source_ImplicitronDataSource_args.data_loader_map_provider_SequenceDataLoaderMapProvider_args
|
||||
dataset_args.category = "skateboard"
|
||||
dataset_args.test_restrict_sequence_id = 0
|
||||
dataset_args.dataset_root = "manifold://co3d/tree/extracted"
|
||||
@ -109,9 +103,7 @@ class TestExperiment(unittest.TestCase):
|
||||
cfg.training_loop_ImplicitronTrainingLoop_args.max_epochs = 2
|
||||
cfg.training_loop_ImplicitronTrainingLoop_args.store_checkpoints = False
|
||||
cfg.optimizer_factory_ImplicitronOptimizerFactory_args.lr_policy = "Exponential"
|
||||
cfg.optimizer_factory_ImplicitronOptimizerFactory_args.exponential_lr_step_size = (
|
||||
2
|
||||
)
|
||||
cfg.optimizer_factory_ImplicitronOptimizerFactory_args.exponential_lr_step_size = 2
|
||||
|
||||
if DEBUG:
|
||||
experiment.dump_cfg(cfg)
|
||||
|
@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import logging
|
||||
import os
|
||||
import unittest
|
||||
@ -79,8 +81,9 @@ class TestOptimizerFactory(unittest.TestCase):
|
||||
|
||||
def test_param_overrides_self_param_group_assignment(self):
|
||||
pa, pb, pc = [torch.nn.Parameter(data=torch.tensor(i * 1.0)) for i in range(3)]
|
||||
na, nb = Node(params=[pa]), Node(
|
||||
params=[pb], param_groups={"self": "pb_self", "p1": "pb_param"}
|
||||
na, nb = (
|
||||
Node(params=[pa]),
|
||||
Node(params=[pb], param_groups={"self": "pb_self", "p1": "pb_param"}),
|
||||
)
|
||||
root = Node(children=[na, nb], params=[pc], param_groups={"m1": "pb_member"})
|
||||
param_groups = self._get_param_groups(root)
|
||||
|
@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import os
|
||||
import unittest
|
||||
|
||||
|
@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
|
@ -5,6 +5,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
"""
|
||||
Script to visualize a previously trained model. Example call:
|
||||
|
||||
|
@ -84,9 +84,9 @@ def get_nerf_datasets(
|
||||
|
||||
if autodownload and any(not os.path.isfile(p) for p in (cameras_path, image_path)):
|
||||
# Automatically download the data files if missing.
|
||||
download_data((dataset_name,), data_root=data_root)
|
||||
download_data([dataset_name], data_root=data_root)
|
||||
|
||||
train_data = torch.load(cameras_path)
|
||||
train_data = torch.load(cameras_path, weights_only=True)
|
||||
n_cameras = train_data["cameras"]["R"].shape[0]
|
||||
|
||||
_image_max_image_pixels = Image.MAX_IMAGE_PIXELS
|
||||
|
@ -343,12 +343,14 @@ class RadianceFieldRenderer(torch.nn.Module):
|
||||
# For a full render pass concatenate the output chunks,
|
||||
# and reshape to image size.
|
||||
out = {
|
||||
k: torch.cat(
|
||||
[ch_o[k] for ch_o in chunk_outputs],
|
||||
dim=1,
|
||||
).view(-1, *self._image_size, 3)
|
||||
if chunk_outputs[0][k] is not None
|
||||
else None
|
||||
k: (
|
||||
torch.cat(
|
||||
[ch_o[k] for ch_o in chunk_outputs],
|
||||
dim=1,
|
||||
).view(-1, *self._image_size, 3)
|
||||
if chunk_outputs[0][k] is not None
|
||||
else None
|
||||
)
|
||||
for k in ("rgb_fine", "rgb_coarse", "rgb_gt")
|
||||
}
|
||||
else:
|
||||
|
@ -194,7 +194,6 @@ class Stats:
|
||||
it = self.it[stat_set]
|
||||
|
||||
for stat in self.log_vars:
|
||||
|
||||
if stat not in self.stats[stat_set]:
|
||||
self.stats[stat_set][stat] = AverageMeter()
|
||||
|
||||
|
@ -24,7 +24,6 @@ CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs"
|
||||
|
||||
@hydra.main(config_path=CONFIG_DIR, config_name="lego")
|
||||
def main(cfg: DictConfig):
|
||||
|
||||
# Device on which to run.
|
||||
if torch.cuda.is_available():
|
||||
device = "cuda"
|
||||
@ -63,7 +62,7 @@ def main(cfg: DictConfig):
|
||||
raise ValueError(f"Model checkpoint {checkpoint_path} does not exist!")
|
||||
|
||||
print(f"Loading checkpoint {checkpoint_path}.")
|
||||
loaded_data = torch.load(checkpoint_path)
|
||||
loaded_data = torch.load(checkpoint_path, weights_only=True)
|
||||
# Do not load the cached xy grid.
|
||||
# - this allows setting an arbitrary evaluation image size.
|
||||
state_dict = {
|
||||
|
@ -42,7 +42,6 @@ class TestRaysampler(unittest.TestCase):
|
||||
cameras, rays = [], []
|
||||
|
||||
for _ in range(batch_size):
|
||||
|
||||
R = random_rotations(1)
|
||||
T = torch.randn(1, 3)
|
||||
focal_length = torch.rand(1, 2) + 0.5
|
||||
|
@ -25,7 +25,6 @@ CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs"
|
||||
|
||||
@hydra.main(config_path=CONFIG_DIR, config_name="lego")
|
||||
def main(cfg: DictConfig):
|
||||
|
||||
# Set the relevant seeds for reproducibility.
|
||||
np.random.seed(cfg.seed)
|
||||
torch.manual_seed(cfg.seed)
|
||||
@ -77,7 +76,7 @@ def main(cfg: DictConfig):
|
||||
# Resume training if requested.
|
||||
if cfg.resume and os.path.isfile(checkpoint_path):
|
||||
print(f"Resuming from checkpoint {checkpoint_path}.")
|
||||
loaded_data = torch.load(checkpoint_path)
|
||||
loaded_data = torch.load(checkpoint_path, weights_only=True)
|
||||
model.load_state_dict(loaded_data["model"])
|
||||
stats = pickle.loads(loaded_data["stats"])
|
||||
print(f" => resuming from epoch {stats.epoch}.")
|
||||
@ -219,7 +218,6 @@ def main(cfg: DictConfig):
|
||||
|
||||
# Validation
|
||||
if epoch % cfg.validation_epoch_interval == 0 and epoch > 0:
|
||||
|
||||
# Sample a validation camera/image.
|
||||
val_batch = next(val_dataloader.__iter__())
|
||||
val_image, val_camera, camera_idx = val_batch[0].values()
|
||||
|
@ -4,4 +4,6 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
__version__ = "0.7.5"
|
||||
# pyre-unsafe
|
||||
|
||||
__version__ = "0.7.8"
|
||||
|
@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from .datatypes import Device, get_device, make_device
|
||||
|
||||
|
||||
|
@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from typing import Sequence, Tuple, Union
|
||||
|
||||
import torch
|
||||
@ -15,7 +17,7 @@ Some functions which depend on PyTorch or Python versions.
|
||||
|
||||
|
||||
def meshgrid_ij(
|
||||
*A: Union[torch.Tensor, Sequence[torch.Tensor]]
|
||||
*A: Union[torch.Tensor, Sequence[torch.Tensor]],
|
||||
) -> Tuple[torch.Tensor, ...]: # pragma: no cover
|
||||
"""
|
||||
Like torch.meshgrid was before PyTorch 1.10.0, i.e. with indexing set to ij
|
||||
|
@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from typing import Optional, Union
|
||||
|
||||
import torch
|
||||
|
@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import math
|
||||
from typing import Tuple
|
||||
|
||||
|
@ -4,5 +4,7 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from .symeig3x3 import symeig3x3
|
||||
from .utils import _safe_det_3x3
|
||||
|
@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import math
|
||||
from typing import Optional, Tuple
|
||||
|
||||
|
@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
|
||||
import torch
|
||||
|
||||
|
@ -81,6 +81,8 @@ inline std::tuple<at::Tensor, at::Tensor> BallQuery(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(p1);
|
||||
CHECK_CPU(p2);
|
||||
return BallQueryCpu(
|
||||
p1.contiguous(),
|
||||
p2.contiguous(),
|
||||
|
@ -7,7 +7,6 @@
|
||||
*/
|
||||
|
||||
#include <torch/extension.h>
|
||||
#include <queue>
|
||||
#include <tuple>
|
||||
|
||||
std::tuple<at::Tensor, at::Tensor> BallQueryCpu(
|
||||
|
@ -98,6 +98,11 @@ at::Tensor SigmoidAlphaBlendBackward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(distances);
|
||||
CHECK_CPU(pix_to_face);
|
||||
CHECK_CPU(alphas);
|
||||
CHECK_CPU(grad_alphas);
|
||||
|
||||
return SigmoidAlphaBlendBackwardCpu(
|
||||
grad_alphas, alphas, distances, pix_to_face, sigma);
|
||||
}
|
||||
|
@ -28,17 +28,16 @@ __global__ void alphaCompositeCudaForwardKernel(
|
||||
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
|
||||
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
|
||||
// clang-format on
|
||||
const int64_t batch_size = result.size(0);
|
||||
const int64_t C = features.size(0);
|
||||
const int64_t H = points_idx.size(2);
|
||||
const int64_t W = points_idx.size(3);
|
||||
|
||||
// Get the batch and index
|
||||
const int batch = blockIdx.x;
|
||||
const auto batch = blockIdx.x;
|
||||
|
||||
const int num_pixels = C * H * W;
|
||||
const int num_threads = gridDim.y * blockDim.x;
|
||||
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
|
||||
const auto num_threads = gridDim.y * blockDim.x;
|
||||
const auto tid = blockIdx.y * blockDim.x + threadIdx.x;
|
||||
|
||||
// Iterate over each feature in each pixel
|
||||
for (int pid = tid; pid < num_pixels; pid += num_threads) {
|
||||
@ -79,17 +78,16 @@ __global__ void alphaCompositeCudaBackwardKernel(
|
||||
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
|
||||
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
|
||||
// clang-format on
|
||||
const int64_t batch_size = points_idx.size(0);
|
||||
const int64_t C = features.size(0);
|
||||
const int64_t H = points_idx.size(2);
|
||||
const int64_t W = points_idx.size(3);
|
||||
|
||||
// Get the batch and index
|
||||
const int batch = blockIdx.x;
|
||||
const auto batch = blockIdx.x;
|
||||
|
||||
const int num_pixels = C * H * W;
|
||||
const int num_threads = gridDim.y * blockDim.x;
|
||||
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
|
||||
const auto num_threads = gridDim.y * blockDim.x;
|
||||
const auto tid = blockIdx.y * blockDim.x + threadIdx.x;
|
||||
|
||||
// Parallelize over each feature in each pixel in images of size H * W,
|
||||
// for each image in the batch of size batch_size
|
||||
|
@ -74,6 +74,9 @@ torch::Tensor alphaCompositeForward(
|
||||
AT_ERROR("Not compiled with GPU support");
|
||||
#endif
|
||||
} else {
|
||||
CHECK_CPU(features);
|
||||
CHECK_CPU(alphas);
|
||||
CHECK_CPU(points_idx);
|
||||
return alphaCompositeCpuForward(features, alphas, points_idx);
|
||||
}
|
||||
}
|
||||
@ -101,6 +104,11 @@ std::tuple<torch::Tensor, torch::Tensor> alphaCompositeBackward(
|
||||
AT_ERROR("Not compiled with GPU support");
|
||||
#endif
|
||||
} else {
|
||||
CHECK_CPU(grad_outputs);
|
||||
CHECK_CPU(features);
|
||||
CHECK_CPU(alphas);
|
||||
CHECK_CPU(points_idx);
|
||||
|
||||
return alphaCompositeCpuBackward(
|
||||
grad_outputs, features, alphas, points_idx);
|
||||
}
|
||||
|
@ -28,17 +28,16 @@ __global__ void weightedSumNormCudaForwardKernel(
|
||||
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
|
||||
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
|
||||
// clang-format on
|
||||
const int64_t batch_size = result.size(0);
|
||||
const int64_t C = features.size(0);
|
||||
const int64_t H = points_idx.size(2);
|
||||
const int64_t W = points_idx.size(3);
|
||||
|
||||
// Get the batch and index
|
||||
const int batch = blockIdx.x;
|
||||
const auto batch = blockIdx.x;
|
||||
|
||||
const int num_pixels = C * H * W;
|
||||
const int num_threads = gridDim.y * blockDim.x;
|
||||
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
|
||||
const auto num_threads = gridDim.y * blockDim.x;
|
||||
const auto tid = blockIdx.y * blockDim.x + threadIdx.x;
|
||||
|
||||
// Parallelize over each feature in each pixel in images of size H * W,
|
||||
// for each image in the batch of size batch_size
|
||||
@ -92,17 +91,16 @@ __global__ void weightedSumNormCudaBackwardKernel(
|
||||
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
|
||||
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
|
||||
// clang-format on
|
||||
const int64_t batch_size = points_idx.size(0);
|
||||
const int64_t C = features.size(0);
|
||||
const int64_t H = points_idx.size(2);
|
||||
const int64_t W = points_idx.size(3);
|
||||
|
||||
// Get the batch and index
|
||||
const int batch = blockIdx.x;
|
||||
const auto batch = blockIdx.x;
|
||||
|
||||
const int num_pixels = C * W * H;
|
||||
const int num_threads = gridDim.y * blockDim.x;
|
||||
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
|
||||
const auto num_threads = gridDim.y * blockDim.x;
|
||||
const auto tid = blockIdx.y * blockDim.x + threadIdx.x;
|
||||
|
||||
// Parallelize over each feature in each pixel in images of size H * W,
|
||||
// for each image in the batch of size batch_size
|
||||
|
@ -73,6 +73,10 @@ torch::Tensor weightedSumNormForward(
|
||||
AT_ERROR("Not compiled with GPU support");
|
||||
#endif
|
||||
} else {
|
||||
CHECK_CPU(features);
|
||||
CHECK_CPU(alphas);
|
||||
CHECK_CPU(points_idx);
|
||||
|
||||
return weightedSumNormCpuForward(features, alphas, points_idx);
|
||||
}
|
||||
}
|
||||
@ -100,6 +104,11 @@ std::tuple<torch::Tensor, torch::Tensor> weightedSumNormBackward(
|
||||
AT_ERROR("Not compiled with GPU support");
|
||||
#endif
|
||||
} else {
|
||||
CHECK_CPU(grad_outputs);
|
||||
CHECK_CPU(features);
|
||||
CHECK_CPU(alphas);
|
||||
CHECK_CPU(points_idx);
|
||||
|
||||
return weightedSumNormCpuBackward(
|
||||
grad_outputs, features, alphas, points_idx);
|
||||
}
|
||||
|
@ -26,17 +26,16 @@ __global__ void weightedSumCudaForwardKernel(
|
||||
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
|
||||
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
|
||||
// clang-format on
|
||||
const int64_t batch_size = result.size(0);
|
||||
const int64_t C = features.size(0);
|
||||
const int64_t H = points_idx.size(2);
|
||||
const int64_t W = points_idx.size(3);
|
||||
|
||||
// Get the batch and index
|
||||
const int batch = blockIdx.x;
|
||||
const auto batch = blockIdx.x;
|
||||
|
||||
const int num_pixels = C * H * W;
|
||||
const int num_threads = gridDim.y * blockDim.x;
|
||||
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
|
||||
const auto num_threads = gridDim.y * blockDim.x;
|
||||
const auto tid = blockIdx.y * blockDim.x + threadIdx.x;
|
||||
|
||||
// Parallelize over each feature in each pixel in images of size H * W,
|
||||
// for each image in the batch of size batch_size
|
||||
@ -74,17 +73,16 @@ __global__ void weightedSumCudaBackwardKernel(
|
||||
const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
|
||||
const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
|
||||
// clang-format on
|
||||
const int64_t batch_size = points_idx.size(0);
|
||||
const int64_t C = features.size(0);
|
||||
const int64_t H = points_idx.size(2);
|
||||
const int64_t W = points_idx.size(3);
|
||||
|
||||
// Get the batch and index
|
||||
const int batch = blockIdx.x;
|
||||
const auto batch = blockIdx.x;
|
||||
|
||||
const int num_pixels = C * H * W;
|
||||
const int num_threads = gridDim.y * blockDim.x;
|
||||
const int tid = blockIdx.y * blockDim.x + threadIdx.x;
|
||||
const auto num_threads = gridDim.y * blockDim.x;
|
||||
const auto tid = blockIdx.y * blockDim.x + threadIdx.x;
|
||||
|
||||
// Iterate over each pixel to compute the contribution to the
|
||||
// gradient for the features and weights
|
||||
|
@ -72,6 +72,9 @@ torch::Tensor weightedSumForward(
|
||||
AT_ERROR("Not compiled with GPU support");
|
||||
#endif
|
||||
} else {
|
||||
CHECK_CPU(features);
|
||||
CHECK_CPU(alphas);
|
||||
CHECK_CPU(points_idx);
|
||||
return weightedSumCpuForward(features, alphas, points_idx);
|
||||
}
|
||||
}
|
||||
@ -98,6 +101,11 @@ std::tuple<torch::Tensor, torch::Tensor> weightedSumBackward(
|
||||
AT_ERROR("Not compiled with GPU support");
|
||||
#endif
|
||||
} else {
|
||||
CHECK_CPU(grad_outputs);
|
||||
CHECK_CPU(features);
|
||||
CHECK_CPU(alphas);
|
||||
CHECK_CPU(points_idx);
|
||||
|
||||
return weightedSumCpuBackward(grad_outputs, features, alphas, points_idx);
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,6 @@
|
||||
|
||||
// clang-format off
|
||||
#include "./pulsar/global.h" // Include before <torch/extension.h>.
|
||||
#include <torch/extension.h>
|
||||
// clang-format on
|
||||
#include "./pulsar/pytorch/renderer.h"
|
||||
#include "./pulsar/pytorch/tensor_util.h"
|
||||
@ -99,6 +98,7 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
||||
m.def("marching_cubes", &MarchingCubes);
|
||||
|
||||
// Pulsar.
|
||||
// Pulsar not enabled on AMD.
|
||||
#ifdef PULSAR_LOGGING_ENABLED
|
||||
c10::ShowLogInfoToStderr();
|
||||
#endif
|
||||
@ -148,10 +148,10 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
||||
py::arg("gamma"),
|
||||
py::arg("max_depth"),
|
||||
py::arg("min_depth") /* = 0.f*/,
|
||||
py::arg(
|
||||
"bg_col") /* = at::nullopt not exposed properly in pytorch 1.1. */
|
||||
py::arg("bg_col") /* = std::nullopt not exposed properly in
|
||||
pytorch 1.1. */
|
||||
,
|
||||
py::arg("opacity") /* = at::nullopt ... */,
|
||||
py::arg("opacity") /* = std::nullopt ... */,
|
||||
py::arg("percent_allowed_difference") = 0.01f,
|
||||
py::arg("max_n_hits") = MAX_UINT,
|
||||
py::arg("mode") = 0)
|
||||
|
@ -60,6 +60,8 @@ std::tuple<at::Tensor, at::Tensor> FaceAreasNormalsForward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(verts);
|
||||
CHECK_CPU(faces);
|
||||
return FaceAreasNormalsForwardCpu(verts, faces);
|
||||
}
|
||||
|
||||
@ -80,5 +82,9 @@ at::Tensor FaceAreasNormalsBackward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(grad_areas);
|
||||
CHECK_CPU(grad_normals);
|
||||
CHECK_CPU(verts);
|
||||
CHECK_CPU(faces);
|
||||
return FaceAreasNormalsBackwardCpu(grad_areas, grad_normals, verts, faces);
|
||||
}
|
||||
|
@ -20,14 +20,14 @@ __global__ void GatherScatterCudaKernel(
|
||||
const size_t V,
|
||||
const size_t D,
|
||||
const size_t E) {
|
||||
const int tid = threadIdx.x;
|
||||
const auto tid = threadIdx.x;
|
||||
|
||||
// Reverse the vertex order if backward.
|
||||
const int v0_idx = backward ? 1 : 0;
|
||||
const int v1_idx = backward ? 0 : 1;
|
||||
|
||||
// Edges are split evenly across the blocks.
|
||||
for (int e = blockIdx.x; e < E; e += gridDim.x) {
|
||||
for (auto e = blockIdx.x; e < E; e += gridDim.x) {
|
||||
// Get indices of vertices which form the edge.
|
||||
const int64_t v0 = edges[2 * e + v0_idx];
|
||||
const int64_t v1 = edges[2 * e + v1_idx];
|
||||
@ -35,7 +35,7 @@ __global__ void GatherScatterCudaKernel(
|
||||
// Split vertex features evenly across threads.
|
||||
// This implementation will be quite wasteful when D<128 since there will be
|
||||
// a lot of threads doing nothing.
|
||||
for (int d = tid; d < D; d += blockDim.x) {
|
||||
for (auto d = tid; d < D; d += blockDim.x) {
|
||||
const float val = input[v1 * D + d];
|
||||
float* address = output + v0 * D + d;
|
||||
atomicAdd(address, val);
|
||||
|
@ -53,5 +53,7 @@ at::Tensor GatherScatter(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(input);
|
||||
CHECK_CPU(edges);
|
||||
return GatherScatterCpu(input, edges, directed, backward);
|
||||
}
|
||||
|
@ -20,8 +20,8 @@ __global__ void InterpFaceAttrsForwardKernel(
|
||||
const size_t P,
|
||||
const size_t F,
|
||||
const size_t D) {
|
||||
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
const int num_threads = blockDim.x * gridDim.x;
|
||||
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
const auto num_threads = blockDim.x * gridDim.x;
|
||||
for (int pd = tid; pd < P * D; pd += num_threads) {
|
||||
const int p = pd / D;
|
||||
const int d = pd % D;
|
||||
@ -93,8 +93,8 @@ __global__ void InterpFaceAttrsBackwardKernel(
|
||||
const size_t P,
|
||||
const size_t F,
|
||||
const size_t D) {
|
||||
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
const int num_threads = blockDim.x * gridDim.x;
|
||||
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
const auto num_threads = blockDim.x * gridDim.x;
|
||||
for (int pd = tid; pd < P * D; pd += num_threads) {
|
||||
const int p = pd / D;
|
||||
const int d = pd % D;
|
||||
|
@ -57,6 +57,8 @@ at::Tensor InterpFaceAttrsForward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(face_attrs);
|
||||
CHECK_CPU(barycentric_coords);
|
||||
return InterpFaceAttrsForwardCpu(pix_to_face, barycentric_coords, face_attrs);
|
||||
}
|
||||
|
||||
@ -106,6 +108,9 @@ std::tuple<at::Tensor, at::Tensor> InterpFaceAttrsBackward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(face_attrs);
|
||||
CHECK_CPU(barycentric_coords);
|
||||
CHECK_CPU(grad_pix_attrs);
|
||||
return InterpFaceAttrsBackwardCpu(
|
||||
pix_to_face, barycentric_coords, face_attrs, grad_pix_attrs);
|
||||
}
|
||||
|
@ -44,5 +44,7 @@ inline std::tuple<at::Tensor, at::Tensor> IoUBox3D(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(boxes1);
|
||||
CHECK_CPU(boxes2);
|
||||
return IoUBox3DCpu(boxes1.contiguous(), boxes2.contiguous());
|
||||
}
|
||||
|
@ -7,10 +7,7 @@
|
||||
*/
|
||||
|
||||
#include <torch/extension.h>
|
||||
#include <torch/torch.h>
|
||||
#include <list>
|
||||
#include <numeric>
|
||||
#include <queue>
|
||||
#include <tuple>
|
||||
#include "iou_box3d/iou_utils.h"
|
||||
|
||||
|
@ -461,10 +461,8 @@ __device__ inline std::tuple<float3, float3> ArgMaxVerts(
|
||||
__device__ inline bool IsCoplanarTriTri(
|
||||
const FaceVerts& tri1,
|
||||
const FaceVerts& tri2) {
|
||||
const float3 tri1_ctr = FaceCenter({tri1.v0, tri1.v1, tri1.v2});
|
||||
const float3 tri1_n = FaceNormal({tri1.v0, tri1.v1, tri1.v2});
|
||||
|
||||
const float3 tri2_ctr = FaceCenter({tri2.v0, tri2.v1, tri2.v2});
|
||||
const float3 tri2_n = FaceNormal({tri2.v0, tri2.v1, tri2.v2});
|
||||
|
||||
// Check if parallel
|
||||
@ -500,7 +498,6 @@ __device__ inline bool IsCoplanarTriPlane(
|
||||
const FaceVerts& tri,
|
||||
const FaceVerts& plane,
|
||||
const float3& normal) {
|
||||
const float3 tri_ctr = FaceCenter({tri.v0, tri.v1, tri.v2});
|
||||
const float3 nt = FaceNormal({tri.v0, tri.v1, tri.v2});
|
||||
|
||||
// check if parallel
|
||||
@ -728,7 +725,7 @@ __device__ inline int BoxIntersections(
|
||||
}
|
||||
}
|
||||
// Update the face_verts_out tris
|
||||
num_tris = offset;
|
||||
num_tris = min(MAX_TRIS, offset);
|
||||
for (int j = 0; j < num_tris; ++j) {
|
||||
face_verts_out[j] = tri_verts_updated[j];
|
||||
}
|
||||
|
@ -338,7 +338,7 @@ std::tuple<at::Tensor, at::Tensor> KNearestNeighborIdxCuda(
|
||||
|
||||
TORCH_CHECK((norm == 1) || (norm == 2), "Norm must be 1 or 2.");
|
||||
|
||||
TORCH_CHECK(p2.size(2) == D, "Point sets must have the same last dimension");
|
||||
TORCH_CHECK(p1.size(2) == D, "Point sets must have the same last dimension");
|
||||
auto long_dtype = lengths1.options().dtype(at::kLong);
|
||||
auto idxs = at::zeros({N, P1, K}, long_dtype);
|
||||
auto dists = at::zeros({N, P1, K}, p1.options());
|
||||
@ -495,7 +495,7 @@ __global__ void KNearestNeighborBackwardKernel(
|
||||
if ((p1_idx < num1) && (k < num2)) {
|
||||
const float grad_dist = grad_dists[n * P1 * K + p1_idx * K + k];
|
||||
// index of point in p2 corresponding to the k-th nearest neighbor
|
||||
const size_t p2_idx = idxs[n * P1 * K + p1_idx * K + k];
|
||||
const int64_t p2_idx = idxs[n * P1 * K + p1_idx * K + k];
|
||||
// If the index is the pad value of -1 then ignore it
|
||||
if (p2_idx == -1) {
|
||||
continue;
|
||||
|
@ -74,6 +74,8 @@ std::tuple<at::Tensor, at::Tensor> KNearestNeighborIdx(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(p1);
|
||||
CHECK_CPU(p2);
|
||||
return KNearestNeighborIdxCpu(p1, p2, lengths1, lengths2, norm, K);
|
||||
}
|
||||
|
||||
@ -140,6 +142,8 @@ std::tuple<at::Tensor, at::Tensor> KNearestNeighborBackward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(p1);
|
||||
CHECK_CPU(p2);
|
||||
return KNearestNeighborBackwardCpu(
|
||||
p1, p2, lengths1, lengths2, idxs, norm, grad_dists);
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ __global__ void CompactVoxelsKernel(
|
||||
compactedVoxelArray,
|
||||
const at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
||||
voxelOccupied,
|
||||
const at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
||||
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
|
||||
voxelOccupiedScan,
|
||||
uint numVoxels) {
|
||||
uint id = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
@ -255,7 +255,8 @@ __global__ void GenerateFacesKernel(
|
||||
at::PackedTensorAccessor<int64_t, 1, at::RestrictPtrTraits> ids,
|
||||
at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
||||
compactedVoxelArray,
|
||||
at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits> numVertsScanned,
|
||||
at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
|
||||
numVertsScanned,
|
||||
const uint activeVoxels,
|
||||
const at::PackedTensorAccessor32<float, 3, at::RestrictPtrTraits> vol,
|
||||
const at::PackedTensorAccessor32<int, 2, at::RestrictPtrTraits> faceTable,
|
||||
@ -381,6 +382,44 @@ __global__ void GenerateFacesKernel(
|
||||
} // end for grid-strided kernel
|
||||
}
|
||||
|
||||
// ATen/Torch does not have an exclusive-scan operator. Additionally, in the
|
||||
// code below we need to get the "total number of items to work on" after
|
||||
// a scan, which with an inclusive-scan would simply be the value of the last
|
||||
// element in the tensor.
|
||||
//
|
||||
// This utility function hits two birds with one stone, by running
|
||||
// an inclusive-scan into a right-shifted view of a tensor that's
|
||||
// allocated to be one element bigger than the input tensor.
|
||||
//
|
||||
// Note; return tensor is `int64_t` per element, even if the input
|
||||
// tensor is only 32-bit. Also, the return tensor is one element bigger
|
||||
// than the input one.
|
||||
//
|
||||
// Secondary optional argument is an output argument that gets the
|
||||
// value of the last element of the return tensor (because you almost
|
||||
// always need this CPU-side right after this function anyway).
|
||||
static at::Tensor ExclusiveScanAndTotal(
|
||||
const at::Tensor& inTensor,
|
||||
int64_t* optTotal = nullptr) {
|
||||
const auto inSize = inTensor.sizes()[0];
|
||||
auto retTensor = at::zeros({inSize + 1}, at::kLong).to(inTensor.device());
|
||||
|
||||
using at::indexing::None;
|
||||
using at::indexing::Slice;
|
||||
auto rightShiftedView = retTensor.index({Slice(1, None)});
|
||||
|
||||
// Do an (inclusive-scan) cumulative sum in to the view that's
|
||||
// shifted one element to the right...
|
||||
at::cumsum_out(rightShiftedView, inTensor, 0, at::kLong);
|
||||
|
||||
if (optTotal) {
|
||||
*optTotal = retTensor[inSize].cpu().item<int64_t>();
|
||||
}
|
||||
|
||||
// ...so that the not-shifted tensor holds the exclusive-scan
|
||||
return retTensor;
|
||||
}
|
||||
|
||||
// Entrance for marching cubes cuda extension. Marching Cubes is an algorithm to
|
||||
// create triangle meshes from an implicit function (one of the form f(x, y, z)
|
||||
// = 0). It works by iteratively checking a grid of cubes superimposed over a
|
||||
@ -443,20 +482,18 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
using at::indexing::Slice;
|
||||
|
||||
auto d_voxelVerts =
|
||||
at::zeros({numVoxels + 1}, at::TensorOptions().dtype(at::kInt))
|
||||
at::zeros({numVoxels}, at::TensorOptions().dtype(at::kInt))
|
||||
.to(vol.device());
|
||||
auto d_voxelVerts_ = d_voxelVerts.index({Slice(1, None)});
|
||||
auto d_voxelOccupied =
|
||||
at::zeros({numVoxels + 1}, at::TensorOptions().dtype(at::kInt))
|
||||
at::zeros({numVoxels}, at::TensorOptions().dtype(at::kInt))
|
||||
.to(vol.device());
|
||||
auto d_voxelOccupied_ = d_voxelOccupied.index({Slice(1, None)});
|
||||
|
||||
// Execute "ClassifyVoxelKernel" kernel to precompute
|
||||
// two arrays - d_voxelOccupied and d_voxelVertices to global memory,
|
||||
// which stores the occupancy state and number of voxel vertices per voxel.
|
||||
ClassifyVoxelKernel<<<grid, threads, 0, stream>>>(
|
||||
d_voxelVerts_.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupied_.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelVerts.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupied.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
vol.packed_accessor32<float, 3, at::RestrictPtrTraits>(),
|
||||
isolevel);
|
||||
AT_CUDA_CHECK(cudaGetLastError());
|
||||
@ -466,12 +503,9 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
// count for voxels in the grid and compute the number of active voxels.
|
||||
// If the number of active voxels is 0, return zero tensor for verts and
|
||||
// faces.
|
||||
|
||||
auto d_voxelOccupiedScan = at::cumsum(d_voxelOccupied, 0);
|
||||
auto d_voxelOccupiedScan_ = d_voxelOccupiedScan.index({Slice(1, None)});
|
||||
|
||||
// number of active voxels
|
||||
int activeVoxels = d_voxelOccupiedScan[numVoxels].cpu().item<int>();
|
||||
int64_t activeVoxels = 0;
|
||||
auto d_voxelOccupiedScan =
|
||||
ExclusiveScanAndTotal(d_voxelOccupied, &activeVoxels);
|
||||
|
||||
const int device_id = vol.device().index();
|
||||
auto opt = at::TensorOptions().dtype(at::kInt).device(at::kCUDA, device_id);
|
||||
@ -486,23 +520,21 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
return std::make_tuple(verts, faces, ids);
|
||||
}
|
||||
|
||||
// Execute "CompactVoxelsKernel" kernel to compress voxels for accleration.
|
||||
// Execute "CompactVoxelsKernel" kernel to compress voxels for acceleration.
|
||||
// This allows us to run triangle generation on only the occupied voxels.
|
||||
auto d_compVoxelArray = at::zeros({activeVoxels}, opt);
|
||||
CompactVoxelsKernel<<<grid, threads, 0, stream>>>(
|
||||
d_compVoxelArray.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupied.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupiedScan_.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupiedScan
|
||||
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
|
||||
numVoxels);
|
||||
AT_CUDA_CHECK(cudaGetLastError());
|
||||
cudaDeviceSynchronize();
|
||||
|
||||
// Scan d_voxelVerts array to generate offsets of vertices for each voxel
|
||||
auto d_voxelVertsScan = at::cumsum(d_voxelVerts, 0);
|
||||
auto d_voxelVertsScan_ = d_voxelVertsScan.index({Slice(1, None)});
|
||||
|
||||
// total number of vertices
|
||||
int totalVerts = d_voxelVertsScan[numVoxels].cpu().item<int>();
|
||||
int64_t totalVerts = 0;
|
||||
auto d_voxelVertsScan = ExclusiveScanAndTotal(d_voxelVerts, &totalVerts);
|
||||
|
||||
// Execute "GenerateFacesKernel" kernel
|
||||
// This runs only on the occupied voxels.
|
||||
@ -522,7 +554,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
faces.packed_accessor<int64_t, 2, at::RestrictPtrTraits>(),
|
||||
ids.packed_accessor<int64_t, 1, at::RestrictPtrTraits>(),
|
||||
d_compVoxelArray.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelVertsScan_.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelVertsScan.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
|
||||
activeVoxels,
|
||||
vol.packed_accessor32<float, 3, at::RestrictPtrTraits>(),
|
||||
faceTable.packed_accessor32<int, 2, at::RestrictPtrTraits>(),
|
||||
|
@ -58,5 +58,6 @@ inline std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubes(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(vol);
|
||||
return MarchingCubesCpu(vol.contiguous(), isolevel);
|
||||
}
|
||||
|
@ -71,8 +71,8 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCpu(
|
||||
if ((j + 1) % 3 == 0 && ps[0] != ps[1] && ps[1] != ps[2] &&
|
||||
ps[2] != ps[0]) {
|
||||
for (int k = 0; k < 3; k++) {
|
||||
int v = tri[k];
|
||||
edge_id_to_v[tri.at(k)] = ps.at(k);
|
||||
int64_t v = tri.at(k);
|
||||
edge_id_to_v[v] = ps.at(k);
|
||||
if (!uniq_edge_id.count(v)) {
|
||||
uniq_edge_id[v] = verts.size();
|
||||
verts.push_back(edge_id_to_v[v]);
|
||||
|
@ -88,6 +88,8 @@ at::Tensor PackedToPadded(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(inputs_packed);
|
||||
CHECK_CPU(first_idxs);
|
||||
return PackedToPaddedCpu(inputs_packed, first_idxs, max_size);
|
||||
}
|
||||
|
||||
@ -105,5 +107,7 @@ at::Tensor PaddedToPacked(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(inputs_padded);
|
||||
CHECK_CPU(first_idxs);
|
||||
return PaddedToPackedCpu(inputs_padded, first_idxs, num_inputs);
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ __global__ void DistanceForwardKernel(
|
||||
__syncthreads();
|
||||
|
||||
// Perform reduction in shared memory.
|
||||
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
|
||||
for (auto s = blockDim.x / 2; s > 32; s >>= 1) {
|
||||
if (tid < s) {
|
||||
if (min_dists[tid] > min_dists[tid + s]) {
|
||||
min_dists[tid] = min_dists[tid + s];
|
||||
@ -502,8 +502,8 @@ __global__ void PointFaceArrayForwardKernel(
|
||||
const float3* tris_f3 = (float3*)tris;
|
||||
|
||||
// Parallelize over P * S computations
|
||||
const int num_threads = gridDim.x * blockDim.x;
|
||||
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const auto num_threads = gridDim.x * blockDim.x;
|
||||
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
|
||||
for (int t_i = tid; t_i < P * T; t_i += num_threads) {
|
||||
const int t = t_i / P; // segment index.
|
||||
@ -576,8 +576,8 @@ __global__ void PointFaceArrayBackwardKernel(
|
||||
const float3* tris_f3 = (float3*)tris;
|
||||
|
||||
// Parallelize over P * S computations
|
||||
const int num_threads = gridDim.x * blockDim.x;
|
||||
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const auto num_threads = gridDim.x * blockDim.x;
|
||||
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
|
||||
for (int t_i = tid; t_i < P * T; t_i += num_threads) {
|
||||
const int t = t_i / P; // triangle index.
|
||||
@ -683,8 +683,8 @@ __global__ void PointEdgeArrayForwardKernel(
|
||||
float3* segms_f3 = (float3*)segms;
|
||||
|
||||
// Parallelize over P * S computations
|
||||
const int num_threads = gridDim.x * blockDim.x;
|
||||
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const auto num_threads = gridDim.x * blockDim.x;
|
||||
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
|
||||
for (int t_i = tid; t_i < P * S; t_i += num_threads) {
|
||||
const int s = t_i / P; // segment index.
|
||||
@ -752,8 +752,8 @@ __global__ void PointEdgeArrayBackwardKernel(
|
||||
float3* segms_f3 = (float3*)segms;
|
||||
|
||||
// Parallelize over P * S computations
|
||||
const int num_threads = gridDim.x * blockDim.x;
|
||||
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const auto num_threads = gridDim.x * blockDim.x;
|
||||
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
|
||||
for (int t_i = tid; t_i < P * S; t_i += num_threads) {
|
||||
const int s = t_i / P; // segment index.
|
||||
|
@ -88,6 +88,10 @@ std::tuple<torch::Tensor, torch::Tensor> PointFaceDistanceForward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(points);
|
||||
CHECK_CPU(points_first_idx);
|
||||
CHECK_CPU(tris);
|
||||
CHECK_CPU(tris_first_idx);
|
||||
return PointFaceDistanceForwardCpu(
|
||||
points, points_first_idx, tris, tris_first_idx, min_triangle_area);
|
||||
}
|
||||
@ -143,6 +147,10 @@ std::tuple<torch::Tensor, torch::Tensor> PointFaceDistanceBackward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(points);
|
||||
CHECK_CPU(tris);
|
||||
CHECK_CPU(idx_points);
|
||||
CHECK_CPU(grad_dists);
|
||||
return PointFaceDistanceBackwardCpu(
|
||||
points, tris, idx_points, grad_dists, min_triangle_area);
|
||||
}
|
||||
@ -221,6 +229,10 @@ std::tuple<torch::Tensor, torch::Tensor> FacePointDistanceForward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(points);
|
||||
CHECK_CPU(points_first_idx);
|
||||
CHECK_CPU(tris);
|
||||
CHECK_CPU(tris_first_idx);
|
||||
return FacePointDistanceForwardCpu(
|
||||
points, points_first_idx, tris, tris_first_idx, min_triangle_area);
|
||||
}
|
||||
@ -277,6 +289,10 @@ std::tuple<torch::Tensor, torch::Tensor> FacePointDistanceBackward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(points);
|
||||
CHECK_CPU(tris);
|
||||
CHECK_CPU(idx_tris);
|
||||
CHECK_CPU(grad_dists);
|
||||
return FacePointDistanceBackwardCpu(
|
||||
points, tris, idx_tris, grad_dists, min_triangle_area);
|
||||
}
|
||||
@ -346,6 +362,10 @@ std::tuple<torch::Tensor, torch::Tensor> PointEdgeDistanceForward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(points);
|
||||
CHECK_CPU(points_first_idx);
|
||||
CHECK_CPU(segms);
|
||||
CHECK_CPU(segms_first_idx);
|
||||
return PointEdgeDistanceForwardCpu(
|
||||
points, points_first_idx, segms, segms_first_idx, max_points);
|
||||
}
|
||||
@ -396,6 +416,10 @@ std::tuple<torch::Tensor, torch::Tensor> PointEdgeDistanceBackward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(points);
|
||||
CHECK_CPU(segms);
|
||||
CHECK_CPU(idx_points);
|
||||
CHECK_CPU(grad_dists);
|
||||
return PointEdgeDistanceBackwardCpu(points, segms, idx_points, grad_dists);
|
||||
}
|
||||
|
||||
@ -464,6 +488,10 @@ std::tuple<torch::Tensor, torch::Tensor> EdgePointDistanceForward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(points);
|
||||
CHECK_CPU(points_first_idx);
|
||||
CHECK_CPU(segms);
|
||||
CHECK_CPU(segms_first_idx);
|
||||
return EdgePointDistanceForwardCpu(
|
||||
points, points_first_idx, segms, segms_first_idx, max_segms);
|
||||
}
|
||||
@ -514,6 +542,10 @@ std::tuple<torch::Tensor, torch::Tensor> EdgePointDistanceBackward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(points);
|
||||
CHECK_CPU(segms);
|
||||
CHECK_CPU(idx_segms);
|
||||
CHECK_CPU(grad_dists);
|
||||
return EdgePointDistanceBackwardCpu(points, segms, idx_segms, grad_dists);
|
||||
}
|
||||
|
||||
@ -567,6 +599,8 @@ torch::Tensor PointFaceArrayDistanceForward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(points);
|
||||
CHECK_CPU(tris);
|
||||
return PointFaceArrayDistanceForwardCpu(points, tris, min_triangle_area);
|
||||
}
|
||||
|
||||
@ -613,6 +647,9 @@ std::tuple<torch::Tensor, torch::Tensor> PointFaceArrayDistanceBackward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(points);
|
||||
CHECK_CPU(tris);
|
||||
CHECK_CPU(grad_dists);
|
||||
return PointFaceArrayDistanceBackwardCpu(
|
||||
points, tris, grad_dists, min_triangle_area);
|
||||
}
|
||||
@ -661,6 +698,8 @@ torch::Tensor PointEdgeArrayDistanceForward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(points);
|
||||
CHECK_CPU(segms);
|
||||
return PointEdgeArrayDistanceForwardCpu(points, segms);
|
||||
}
|
||||
|
||||
@ -703,5 +742,8 @@ std::tuple<torch::Tensor, torch::Tensor> PointEdgeArrayDistanceBackward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(points);
|
||||
CHECK_CPU(segms);
|
||||
CHECK_CPU(grad_dists);
|
||||
return PointEdgeArrayDistanceBackwardCpu(points, segms, grad_dists);
|
||||
}
|
||||
|
@ -104,6 +104,12 @@ inline void PointsToVolumesForward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(points_3d);
|
||||
CHECK_CPU(points_features);
|
||||
CHECK_CPU(volume_densities);
|
||||
CHECK_CPU(volume_features);
|
||||
CHECK_CPU(grid_sizes);
|
||||
CHECK_CPU(mask);
|
||||
PointsToVolumesForwardCpu(
|
||||
points_3d,
|
||||
points_features,
|
||||
@ -183,6 +189,14 @@ inline void PointsToVolumesBackward(
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
CHECK_CPU(points_3d);
|
||||
CHECK_CPU(points_features);
|
||||
CHECK_CPU(grid_sizes);
|
||||
CHECK_CPU(mask);
|
||||
CHECK_CPU(grad_volume_densities);
|
||||
CHECK_CPU(grad_volume_features);
|
||||
CHECK_CPU(grad_points_3d);
|
||||
CHECK_CPU(grad_points_features);
|
||||
PointsToVolumesBackwardCpu(
|
||||
points_3d,
|
||||
points_features,
|
||||
|
@ -8,9 +8,7 @@
|
||||
|
||||
#include <torch/csrc/autograd/VariableTypeUtils.h>
|
||||
#include <torch/extension.h>
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
// In the x direction, the location {0, ..., grid_size_x - 1} correspond to
|
||||
|
@ -15,8 +15,8 @@
|
||||
#endif
|
||||
|
||||
#if defined(_WIN64) || defined(_WIN32)
|
||||
#define uint unsigned int
|
||||
#define ushort unsigned short
|
||||
using uint = unsigned int;
|
||||
using ushort = unsigned short;
|
||||
#endif
|
||||
|
||||
#include "./logging.h" // <- include before torch/extension.h
|
||||
@ -30,11 +30,20 @@
|
||||
#define GLOBAL __global__
|
||||
#define RESTRICT __restrict__
|
||||
#define DEBUGBREAK()
|
||||
#ifdef __NVCC_DIAG_PRAGMA_SUPPORT__
|
||||
#pragma nv_diag_suppress 1866
|
||||
#pragma nv_diag_suppress 2941
|
||||
#pragma nv_diag_suppress 2951
|
||||
#pragma nv_diag_suppress 2967
|
||||
#else
|
||||
#if !defined(USE_ROCM)
|
||||
#pragma diag_suppress = attribute_not_allowed
|
||||
#pragma diag_suppress = 1866
|
||||
#pragma diag_suppress = 2941
|
||||
#pragma diag_suppress = 2951
|
||||
#pragma diag_suppress = 2967
|
||||
#endif //! USE_ROCM
|
||||
#endif
|
||||
#else // __CUDACC__
|
||||
#define INLINE inline
|
||||
#define HOST
|
||||
@ -49,6 +58,9 @@
|
||||
#pragma clang diagnostic pop
|
||||
#ifdef WITH_CUDA
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#if !defined(USE_ROCM)
|
||||
#include <vector_functions.h>
|
||||
#endif //! USE_ROCM
|
||||
#else
|
||||
#ifndef cudaStream_t
|
||||
typedef void* cudaStream_t;
|
||||
@ -65,8 +77,6 @@ struct float2 {
|
||||
struct float3 {
|
||||
float x, y, z;
|
||||
};
|
||||
#endif
|
||||
namespace py = pybind11;
|
||||
inline float3 make_float3(const float& x, const float& y, const float& z) {
|
||||
float3 res;
|
||||
res.x = x;
|
||||
@ -74,6 +84,8 @@ inline float3 make_float3(const float& x, const float& y, const float& z) {
|
||||
res.z = z;
|
||||
return res;
|
||||
}
|
||||
#endif
|
||||
namespace py = pybind11;
|
||||
|
||||
inline bool operator==(const float3& a, const float3& b) {
|
||||
return a.x == b.x && a.y == b.y && a.z == b.z;
|
||||
|
@ -59,6 +59,11 @@ getLastCudaError(const char* errorMessage, const char* file, const int line) {
|
||||
#define SHARED __shared__
|
||||
#define ACTIVEMASK() __activemask()
|
||||
#define BALLOT(mask, val) __ballot_sync((mask), val)
|
||||
|
||||
/* TODO (ROCM-6.2): None of the WARP_* are used anywhere and ROCM-6.2 natively
|
||||
* supports __shfl_*. Disabling until the move to ROCM-6.2.
|
||||
*/
|
||||
#if !defined(USE_ROCM)
|
||||
/**
|
||||
* Find the cumulative sum within a warp up to the current
|
||||
* thread lane, with each mask thread contributing base.
|
||||
@ -115,6 +120,7 @@ INLINE DEVICE float3 WARP_SUM_FLOAT3(
|
||||
ret.z = WARP_SUM(group, mask, base.z);
|
||||
return ret;
|
||||
}
|
||||
#endif //! USE_ROCM
|
||||
|
||||
// Floating point.
|
||||
// #define FMUL(a, b) __fmul_rn((a), (b))
|
||||
@ -142,6 +148,7 @@ INLINE DEVICE float3 WARP_SUM_FLOAT3(
|
||||
#define FMA(x, y, z) __fmaf_rn((x), (y), (z))
|
||||
#define I2F(a) __int2float_rn(a)
|
||||
#define FRCP(x) __frcp_rn(x)
|
||||
#if !defined(USE_ROCM)
|
||||
__device__ static float atomicMax(float* address, float val) {
|
||||
int* address_as_i = (int*)address;
|
||||
int old = *address_as_i, assumed;
|
||||
@ -166,6 +173,7 @@ __device__ static float atomicMin(float* address, float val) {
|
||||
} while (assumed != old);
|
||||
return __int_as_float(old);
|
||||
}
|
||||
#endif //! USE_ROCM
|
||||
#define DMAX(a, b) FMAX(a, b)
|
||||
#define DMIN(a, b) FMIN(a, b)
|
||||
#define DSQRT(a) sqrt(a)
|
||||
@ -409,7 +417,7 @@ __device__ static float atomicMin(float* address, float val) {
|
||||
(OUT_PTR), \
|
||||
(NUM_SELECTED_PTR), \
|
||||
(NUM_ITEMS), \
|
||||
stream = (STREAM));
|
||||
(STREAM));
|
||||
|
||||
#define COPY_HOST_DEV(PTR_D, PTR_H, TYPE, SIZE) \
|
||||
HANDLECUDA(cudaMemcpy( \
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user