mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2026-02-28 17:26:03 +08:00
Compare commits
217 Commits
v0.6.1
...
classner-p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e7c1f026ea | ||
|
|
cb49550486 | ||
|
|
36edf2b302 | ||
|
|
78bb6d17fa | ||
|
|
54c75b4114 | ||
|
|
3783437d2f | ||
|
|
b2dc520210 | ||
|
|
8597d4c5c1 | ||
|
|
38fd8380f7 | ||
|
|
67840f8320 | ||
|
|
9b2e570536 | ||
|
|
0f966217e5 | ||
|
|
379c8b2780 | ||
|
|
8e0c82b89a | ||
|
|
8ba9a694ee | ||
|
|
36ba079bef | ||
|
|
b95ec190af | ||
|
|
55f67b0d18 | ||
|
|
4261e59f51 | ||
|
|
af55ba01f8 | ||
|
|
d3b7f5f421 | ||
|
|
4ecc9ea89d | ||
|
|
8d10ba52b2 | ||
|
|
aa8b03f31d | ||
|
|
57a40b3688 | ||
|
|
522e5f0644 | ||
|
|
e8390d3500 | ||
|
|
4300030d7a | ||
|
|
00acf0b0c7 | ||
|
|
a94f3f4c4b | ||
|
|
efb721320a | ||
|
|
40fb189c29 | ||
|
|
4e87c2b7f1 | ||
|
|
771cf8a328 | ||
|
|
0dce883241 | ||
|
|
ae35824f21 | ||
|
|
f4dd151037 | ||
|
|
7ce8ed55e1 | ||
|
|
7e0146ece4 | ||
|
|
0e4c53c612 | ||
|
|
879495d38f | ||
|
|
5c1ca757bb | ||
|
|
3e4fb0b9d9 | ||
|
|
731ea53c80 | ||
|
|
2e42ef793f | ||
|
|
81d63c6382 | ||
|
|
28c1afaa9d | ||
|
|
cba26506b6 | ||
|
|
65f667fd2e | ||
|
|
7978ffd1e4 | ||
|
|
ea4f3260e4 | ||
|
|
023a2369ae | ||
|
|
c0f88e04a0 | ||
|
|
6275283202 | ||
|
|
1d43251391 | ||
|
|
1fb268dea6 | ||
|
|
8bc0a04e86 | ||
|
|
5cd70067e2 | ||
|
|
5b74a2cc27 | ||
|
|
49ed7b07b1 | ||
|
|
c6519f29f0 | ||
|
|
a42a89a5ba | ||
|
|
c31bf85a23 | ||
|
|
fbd3c679ac | ||
|
|
34f648ede0 | ||
|
|
f625fe1f8b | ||
|
|
7c25d34d22 | ||
|
|
c5a83f46ef | ||
|
|
1702c85bec | ||
|
|
90d00f1b2b | ||
|
|
d27ef14ec7 | ||
|
|
2d1c6d5d93 | ||
|
|
9fe15da3cd | ||
|
|
0f12c51646 | ||
|
|
79c61a2d86 | ||
|
|
69c6d06ed8 | ||
|
|
73dc109dba | ||
|
|
9ec9d057cc | ||
|
|
cd7b885169 | ||
|
|
f632c423ef | ||
|
|
f36b11fe49 | ||
|
|
ea5df60d72 | ||
|
|
4372001981 | ||
|
|
61e2b87019 | ||
|
|
0143d63ba8 | ||
|
|
899a3192b6 | ||
|
|
3b2300641a | ||
|
|
b5f3d3ce12 | ||
|
|
2c1901522a | ||
|
|
90ab219d88 | ||
|
|
9e57b994ca | ||
|
|
e767c4b548 | ||
|
|
e85fa03c5a | ||
|
|
47d06c8924 | ||
|
|
bef959c755 | ||
|
|
c21ba144e7 | ||
|
|
d737a05e55 | ||
|
|
2374d19da5 | ||
|
|
1f3953795c | ||
|
|
a6dada399d | ||
|
|
5c59841863 | ||
|
|
2c64635daa | ||
|
|
ec9580a1d4 | ||
|
|
44cb00e468 | ||
|
|
44ca5f95d9 | ||
|
|
a51a300827 | ||
|
|
2bd65027ca | ||
|
|
11635fbd7d | ||
|
|
a268b18e07 | ||
|
|
7ea0756b05 | ||
|
|
96889deab9 | ||
|
|
9f443ed26b | ||
|
|
9320100abc | ||
|
|
2edb93d184 | ||
|
|
41c594ca37 | ||
|
|
c3c4495c7a | ||
|
|
34bbb3ad32 | ||
|
|
df08ea8eb4 | ||
|
|
78fd5af1a6 | ||
|
|
0a7c354dc1 | ||
|
|
b79764ea69 | ||
|
|
b1ff9d9fd4 | ||
|
|
22f86072ca | ||
|
|
050f650ae8 | ||
|
|
8596fcacd2 | ||
|
|
7f097b064b | ||
|
|
aab95575a6 | ||
|
|
67fff956a2 | ||
|
|
4b94649f7b | ||
|
|
3809b6094c | ||
|
|
722646863c | ||
|
|
e10a90140d | ||
|
|
4c48beb226 | ||
|
|
4db9fc11d2 | ||
|
|
3b8a33e9c5 | ||
|
|
199309fcf7 | ||
|
|
6473aa316c | ||
|
|
2802fd9398 | ||
|
|
a999fc22ee | ||
|
|
24260130ce | ||
|
|
a54ad2b912 | ||
|
|
b602edccc4 | ||
|
|
21262e38c7 | ||
|
|
e332f9ffa4 | ||
|
|
0c3bed55be | ||
|
|
97894fb37b | ||
|
|
645a47d054 | ||
|
|
8ac5e8f083 | ||
|
|
92f9dfe9d6 | ||
|
|
f2cf9d4d0b | ||
|
|
e2622d79c0 | ||
|
|
c0bb49b5f6 | ||
|
|
05f656c01f | ||
|
|
4c22855a23 | ||
|
|
cdd2142dd5 | ||
|
|
0e377c6850 | ||
|
|
e64f25c255 | ||
|
|
c85673c626 | ||
|
|
3de3c13a0f | ||
|
|
9b5a3ffa6c | ||
|
|
1701b76a31 | ||
|
|
57a33b25c1 | ||
|
|
c371a9a6cc | ||
|
|
4a1f176054 | ||
|
|
16d0aa82c1 | ||
|
|
69b27d160e | ||
|
|
84a569c0aa | ||
|
|
471b126818 | ||
|
|
4d043fc9ac | ||
|
|
f816568735 | ||
|
|
0e88b21de6 | ||
|
|
1cbf80dab6 | ||
|
|
ee71c7c447 | ||
|
|
3de41223dd | ||
|
|
967a099231 | ||
|
|
feb5d36394 | ||
|
|
db1f7c4506 | ||
|
|
59972b121d | ||
|
|
c8f3d6bc0b | ||
|
|
2a1de3b610 | ||
|
|
ef21a6f6aa | ||
|
|
12f20d799e | ||
|
|
47c0997227 | ||
|
|
e9fb6c27e3 | ||
|
|
c2862ff427 | ||
|
|
5053142363 | ||
|
|
67778caee8 | ||
|
|
3eb4233844 | ||
|
|
174738c33e | ||
|
|
45d096e219 | ||
|
|
39bb2ce063 | ||
|
|
9e2bc3a17f | ||
|
|
fddd6a700f | ||
|
|
85cdcc252d | ||
|
|
fc4dd80208 | ||
|
|
9640560541 | ||
|
|
6726500ad3 | ||
|
|
d6a12afbe7 | ||
|
|
49f93b6388 | ||
|
|
741777b5b5 | ||
|
|
9eeb456e82 | ||
|
|
7660ed1876 | ||
|
|
52c71b8816 | ||
|
|
f9a26a22fc | ||
|
|
d67662d13c | ||
|
|
28ccdb7328 | ||
|
|
cc3259ba93 | ||
|
|
b51be58f63 | ||
|
|
7449951850 | ||
|
|
262c1bfcd4 | ||
|
|
eb2bbf8433 | ||
|
|
1152a93b72 | ||
|
|
315f2487db | ||
|
|
ccfb72cc50 | ||
|
|
069c9fd759 | ||
|
|
9eec430f1c | ||
|
|
f8fe9a2be1 |
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash -e
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -81,7 +81,7 @@ jobs:
|
||||
command: |
|
||||
export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64
|
||||
python3 setup.py build_ext --inplace
|
||||
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64 python -m unittest discover -v -s tests
|
||||
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64 python -m unittest discover -v -s tests -t .
|
||||
- run: python3 setup.py bdist_wheel
|
||||
|
||||
binary_linux_wheel:
|
||||
@@ -128,55 +128,15 @@ jobs:
|
||||
binary_linux_conda_cuda:
|
||||
<<: *binary_common
|
||||
machine:
|
||||
image: ubuntu-1604:201903-01
|
||||
image: ubuntu-1604-cuda-10.2:202012-01
|
||||
resource_class: gpu.nvidia.small.multi
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Setup environment
|
||||
command: |
|
||||
set -e
|
||||
|
||||
curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
|
||||
curl -L https://dl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
|
||||
|
||||
sudo apt-get update
|
||||
|
||||
sudo apt-get install \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gnupg-agent \
|
||||
software-properties-common
|
||||
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
|
||||
sudo add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable"
|
||||
|
||||
sudo apt-get update
|
||||
export DOCKER_VERSION="5:19.03.2~3-0~ubuntu-xenial"
|
||||
sudo apt-get install docker-ce=${DOCKER_VERSION} docker-ce-cli=${DOCKER_VERSION} containerd.io=1.2.6-3
|
||||
|
||||
# Add the package repositories
|
||||
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
|
||||
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
|
||||
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
|
||||
|
||||
export NVIDIA_CONTAINER_VERSION="1.0.3-1"
|
||||
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit=${NVIDIA_CONTAINER_VERSION}
|
||||
sudo systemctl restart docker
|
||||
|
||||
DRIVER_FN="NVIDIA-Linux-x86_64-460.84.run"
|
||||
wget "https://us.download.nvidia.com/XFree86/Linux-x86_64/460.84/$DRIVER_FN"
|
||||
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
|
||||
nvidia-smi
|
||||
|
||||
- run:
|
||||
name: Pull docker image
|
||||
command: |
|
||||
nvidia-smi
|
||||
set -e
|
||||
|
||||
{ docker login -u="$DOCKERHUB_USERNAME" -p="$DOCKERHUB_TOKEN" ; } 2> /dev/null
|
||||
@@ -222,28 +182,23 @@ workflows:
|
||||
# context: DOCKERHUB_TOKEN
|
||||
{{workflows()}}
|
||||
- binary_linux_conda_cuda:
|
||||
name: testrun_conda_cuda_py37_cu102_pyt170
|
||||
name: testrun_conda_cuda_py37_cu102_pyt190
|
||||
context: DOCKERHUB_TOKEN
|
||||
python_version: "3.7"
|
||||
pytorch_version: '1.7.0'
|
||||
cu_version: "cu102"
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py36_cpu
|
||||
python_version: '3.6'
|
||||
pytorch_version: '1.9.0'
|
||||
cu_version: "cu102"
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py37_cpu
|
||||
python_version: '3.7'
|
||||
pytorch_version: '1.9.0'
|
||||
pytorch_version: '1.12.0'
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py38_cpu
|
||||
python_version: '3.8'
|
||||
pytorch_version: '1.9.0'
|
||||
pytorch_version: '1.12.0'
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py39_cpu
|
||||
python_version: '3.9'
|
||||
pytorch_version: '1.9.0'
|
||||
pytorch_version: '1.12.0'
|
||||
|
||||
@@ -81,7 +81,7 @@ jobs:
|
||||
command: |
|
||||
export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64
|
||||
python3 setup.py build_ext --inplace
|
||||
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64 python -m unittest discover -v -s tests
|
||||
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64 python -m unittest discover -v -s tests -t .
|
||||
- run: python3 setup.py bdist_wheel
|
||||
|
||||
binary_linux_wheel:
|
||||
@@ -128,55 +128,15 @@ jobs:
|
||||
binary_linux_conda_cuda:
|
||||
<<: *binary_common
|
||||
machine:
|
||||
image: ubuntu-1604:201903-01
|
||||
image: ubuntu-1604-cuda-10.2:202012-01
|
||||
resource_class: gpu.nvidia.small.multi
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Setup environment
|
||||
command: |
|
||||
set -e
|
||||
|
||||
curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
|
||||
curl -L https://dl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
|
||||
|
||||
sudo apt-get update
|
||||
|
||||
sudo apt-get install \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gnupg-agent \
|
||||
software-properties-common
|
||||
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
|
||||
sudo add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable"
|
||||
|
||||
sudo apt-get update
|
||||
export DOCKER_VERSION="5:19.03.2~3-0~ubuntu-xenial"
|
||||
sudo apt-get install docker-ce=${DOCKER_VERSION} docker-ce-cli=${DOCKER_VERSION} containerd.io=1.2.6-3
|
||||
|
||||
# Add the package repositories
|
||||
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
|
||||
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
|
||||
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
|
||||
|
||||
export NVIDIA_CONTAINER_VERSION="1.0.3-1"
|
||||
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit=${NVIDIA_CONTAINER_VERSION}
|
||||
sudo systemctl restart docker
|
||||
|
||||
DRIVER_FN="NVIDIA-Linux-x86_64-460.84.run"
|
||||
wget "https://us.download.nvidia.com/XFree86/Linux-x86_64/460.84/$DRIVER_FN"
|
||||
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
|
||||
nvidia-smi
|
||||
|
||||
- run:
|
||||
name: Pull docker image
|
||||
command: |
|
||||
nvidia-smi
|
||||
set -e
|
||||
|
||||
{ docker login -u="$DOCKERHUB_USERNAME" -p="$DOCKERHUB_TOKEN" ; } 2> /dev/null
|
||||
@@ -220,193 +180,6 @@ workflows:
|
||||
jobs:
|
||||
# - main:
|
||||
# context: DOCKERHUB_TOKEN
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu92
|
||||
name: linux_conda_py36_cu92_pyt160
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.6.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
name: linux_conda_py36_cu101_pyt160
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.6.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py36_cu102_pyt160
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.6.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
name: linux_conda_py36_cu101_pyt170
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py36_cu102_pyt170
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu110
|
||||
name: linux_conda_py36_cu110_pyt170
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
name: linux_conda_py36_cu101_pyt171
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.7.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py36_cu102_pyt171
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.7.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu110
|
||||
name: linux_conda_py36_cu110_pyt171
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.7.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
name: linux_conda_py36_cu101_pyt180
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.8.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py36_cu102_pyt180
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.8.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py36_cu111_pyt180
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.8.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
name: linux_conda_py36_cu101_pyt181
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.8.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py36_cu102_pyt181
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.8.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py36_cu111_pyt181
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.8.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py36_cu102_pyt190
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.9.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py36_cu111_pyt190
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.9.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py36_cu102_pyt191
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.9.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py36_cu111_pyt191
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.9.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py36_cu102_pyt1100
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.10.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py36_cu111_pyt1100
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.10.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py36_cu113_pyt1100
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.10.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu92
|
||||
name: linux_conda_py37_cu92_pyt160
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.6.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
name: linux_conda_py37_cu101_pyt160
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.6.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py37_cu102_pyt160
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.6.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
name: linux_conda_py37_cu101_pyt170
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py37_cu102_pyt170
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu110
|
||||
name: linux_conda_py37_cu110_pyt170
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
name: linux_conda_py37_cu101_pyt171
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.7.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py37_cu102_pyt171
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.7.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu110
|
||||
name: linux_conda_py37_cu110_pyt171
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.7.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
@@ -488,58 +261,88 @@ workflows:
|
||||
pytorch_version: 1.10.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu92
|
||||
name: linux_conda_py38_cu92_pyt160
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.6.0
|
||||
cu_version: cu102
|
||||
name: linux_conda_py37_cu102_pyt1101
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
name: linux_conda_py38_cu101_pyt160
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.6.0
|
||||
cu_version: cu111
|
||||
name: linux_conda_py37_cu111_pyt1101
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py37_cu113_pyt1101
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt160
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.6.0
|
||||
name: linux_conda_py37_cu102_pyt1102
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
name: linux_conda_py38_cu101_pyt170
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.7.0
|
||||
cu_version: cu111
|
||||
name: linux_conda_py37_cu111_pyt1102
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py37_cu113_pyt1102
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt170
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.7.0
|
||||
name: linux_conda_py37_cu102_pyt1110
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu110
|
||||
name: linux_conda_py38_cu110_pyt170
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.7.0
|
||||
cu_version: cu111
|
||||
name: linux_conda_py37_cu111_pyt1110
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
name: linux_conda_py38_cu101_pyt171
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.7.1
|
||||
cu_version: cu113
|
||||
name: linux_conda_py37_cu113_pyt1110
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda115
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu115
|
||||
name: linux_conda_py37_cu115_pyt1110
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt171
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.7.1
|
||||
name: linux_conda_py37_cu102_pyt1120
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu110
|
||||
name: linux_conda_py38_cu110_pyt171
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.7.1
|
||||
cu_version: cu113
|
||||
name: linux_conda_py37_cu113_pyt1120
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py37_cu116_pyt1120
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
@@ -621,22 +424,88 @@ workflows:
|
||||
pytorch_version: 1.10.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
name: linux_conda_py39_cu101_pyt171
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.7.1
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt1101
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py38_cu111_pyt1101
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py38_cu113_pyt1101
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py39_cu102_pyt171
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.7.1
|
||||
name: linux_conda_py38_cu102_pyt1102
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu110
|
||||
name: linux_conda_py39_cu110_pyt171
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.7.1
|
||||
cu_version: cu111
|
||||
name: linux_conda_py38_cu111_pyt1102
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py38_cu113_pyt1102
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt1110
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py38_cu111_pyt1110
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py38_cu113_pyt1110
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda115
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu115
|
||||
name: linux_conda_py38_cu115_pyt1110
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt1120
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py38_cu113_pyt1120
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py38_cu116_pyt1120
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu101
|
||||
@@ -716,29 +585,154 @@ workflows:
|
||||
name: linux_conda_py39_cu113_pyt1100
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py39_cu102_pyt1101
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py39_cu111_pyt1101
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py39_cu113_pyt1101
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.1
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py39_cu102_pyt1102
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py39_cu111_pyt1102
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py39_cu113_pyt1102
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.10.2
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py39_cu102_pyt1110
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py39_cu111_pyt1110
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py39_cu113_pyt1110
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda115
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu115
|
||||
name: linux_conda_py39_cu115_pyt1110
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py39_cu102_pyt1120
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py39_cu113_pyt1120
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py39_cu116_pyt1120
|
||||
python_version: '3.9'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py310_cu102_pyt1110
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu111
|
||||
name: linux_conda_py310_cu111_pyt1110
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py310_cu113_pyt1110
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda115
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu115
|
||||
name: linux_conda_py310_cu115_pyt1110
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.11.0
|
||||
- binary_linux_conda:
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu102
|
||||
name: linux_conda_py310_cu102_pyt1120
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda113
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu113
|
||||
name: linux_conda_py310_cu113_pyt1120
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda:
|
||||
conda_docker_image: pytorch/conda-builder:cuda116
|
||||
context: DOCKERHUB_TOKEN
|
||||
cu_version: cu116
|
||||
name: linux_conda_py310_cu116_pyt1120
|
||||
python_version: '3.10'
|
||||
pytorch_version: 1.12.0
|
||||
- binary_linux_conda_cuda:
|
||||
name: testrun_conda_cuda_py37_cu102_pyt170
|
||||
name: testrun_conda_cuda_py37_cu102_pyt190
|
||||
context: DOCKERHUB_TOKEN
|
||||
python_version: "3.7"
|
||||
pytorch_version: '1.7.0'
|
||||
cu_version: "cu102"
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py36_cpu
|
||||
python_version: '3.6'
|
||||
pytorch_version: '1.9.0'
|
||||
cu_version: "cu102"
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py37_cpu
|
||||
python_version: '3.7'
|
||||
pytorch_version: '1.9.0'
|
||||
pytorch_version: '1.12.0'
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py38_cpu
|
||||
python_version: '3.8'
|
||||
pytorch_version: '1.9.0'
|
||||
pytorch_version: '1.12.0'
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py39_cpu
|
||||
python_version: '3.9'
|
||||
pytorch_version: '1.9.0'
|
||||
pytorch_version: '1.12.0'
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
@@ -13,40 +13,58 @@ import os.path
|
||||
|
||||
import jinja2
|
||||
import yaml
|
||||
from packaging import version
|
||||
|
||||
|
||||
# The CUDA versions which have pytorch conda packages available for linux for each
|
||||
# version of pytorch.
|
||||
# Pytorch 1.4 also supports cuda 10.0 but we no longer build for cuda 10.0 at all.
|
||||
CONDA_CUDA_VERSIONS = {
|
||||
"1.6.0": ["cu92", "cu101", "cu102"],
|
||||
"1.7.0": ["cu101", "cu102", "cu110"],
|
||||
"1.7.1": ["cu101", "cu102", "cu110"],
|
||||
"1.8.0": ["cu101", "cu102", "cu111"],
|
||||
"1.8.1": ["cu101", "cu102", "cu111"],
|
||||
"1.9.0": ["cu102", "cu111"],
|
||||
"1.9.1": ["cu102", "cu111"],
|
||||
"1.10.0": ["cu102", "cu111", "cu113"],
|
||||
"1.10.1": ["cu102", "cu111", "cu113"],
|
||||
"1.10.2": ["cu102", "cu111", "cu113"],
|
||||
"1.11.0": ["cu102", "cu111", "cu113", "cu115"],
|
||||
"1.12.0": ["cu102", "cu113", "cu116"],
|
||||
}
|
||||
|
||||
|
||||
def conda_docker_image_for_cuda(cuda_version):
|
||||
if cuda_version in ("cu101", "cu102", "cu111"):
|
||||
return None
|
||||
if cuda_version == "cu113":
|
||||
return "pytorch/conda-builder:cuda113"
|
||||
return None
|
||||
if cuda_version == "cu115":
|
||||
return "pytorch/conda-builder:cuda115"
|
||||
if cuda_version == "cu116":
|
||||
return "pytorch/conda-builder:cuda116"
|
||||
raise ValueError("Unknown cuda version")
|
||||
|
||||
|
||||
def pytorch_versions_for_python(python_version):
|
||||
if python_version in ["3.6", "3.7", "3.8"]:
|
||||
if python_version in ["3.7", "3.8"]:
|
||||
return list(CONDA_CUDA_VERSIONS)
|
||||
pytorch_without_py39 = ["1.4", "1.5.0", "1.5.1", "1.6.0", "1.7.0"]
|
||||
return [i for i in CONDA_CUDA_VERSIONS if i not in pytorch_without_py39]
|
||||
if python_version == "3.9":
|
||||
return [
|
||||
i
|
||||
for i in CONDA_CUDA_VERSIONS
|
||||
if version.Version(i) > version.Version("1.7.0")
|
||||
]
|
||||
if python_version == "3.10":
|
||||
return [
|
||||
i
|
||||
for i in CONDA_CUDA_VERSIONS
|
||||
if version.Version(i) >= version.Version("1.11.0")
|
||||
]
|
||||
|
||||
|
||||
def workflows(prefix="", filter_branch=None, upload=False, indentation=6):
|
||||
w = []
|
||||
for btype in ["conda"]:
|
||||
for python_version in ["3.6", "3.7", "3.8", "3.9"]:
|
||||
for python_version in ["3.7", "3.8", "3.9", "3.10"]:
|
||||
for pytorch_version in pytorch_versions_for_python(python_version):
|
||||
for cu_version in CONDA_CUDA_VERSIONS[pytorch_version]:
|
||||
w += workflow_pair(
|
||||
|
||||
2
.github/CONTRIBUTING.md
vendored
2
.github/CONTRIBUTING.md
vendored
@@ -46,7 +46,7 @@ outlined on that page and do not file a public issue.
|
||||
## Coding Style
|
||||
We follow these [python](http://google.github.io/styleguide/pyguide.html) and [C++](https://google.github.io/styleguide/cppguide.html) style guides.
|
||||
|
||||
For the linter to work, you will need to install `black`, `flake`, `isort` and `clang-format`, and
|
||||
For the linter to work, you will need to install `black`, `flake`, `usort` and `clang-format`, and
|
||||
they need to be fairly up to date.
|
||||
|
||||
## License
|
||||
|
||||
27
INSTALL.md
27
INSTALL.md
@@ -9,7 +9,7 @@ The core library is written in PyTorch. Several components have underlying imple
|
||||
|
||||
- Linux or macOS or Windows
|
||||
- Python 3.6, 3.7, 3.8 or 3.9
|
||||
- PyTorch 1.6.0, 1.7.0, 1.7.1, 1.8.0, 1.8.1, 1.9.0, 1.9.1 or 1.10.0.
|
||||
- PyTorch 1.8.0, 1.8.1, 1.9.0, 1.9.1, 1.10.0, 1.10.1, 1.10.2, 1.11.0 or 1.12.0.
|
||||
- torchvision that matches the PyTorch installation. You can install them together as explained at pytorch.org to make sure of this.
|
||||
- gcc & g++ ≥ 4.9
|
||||
- [fvcore](https://github.com/facebookresearch/fvcore)
|
||||
@@ -43,7 +43,7 @@ export CUB_HOME=$PWD/cub-1.10.0
|
||||
For developing on top of PyTorch3D or contributing, you will need to run the linter and tests. If you want to run any of the notebook tutorials as `docs/tutorials` or the examples in `docs/examples` you will also need matplotlib and OpenCV.
|
||||
- scikit-image
|
||||
- black
|
||||
- isort
|
||||
- usort
|
||||
- flake8
|
||||
- matplotlib
|
||||
- tdqm
|
||||
@@ -59,7 +59,7 @@ conda install jupyter
|
||||
pip install scikit-image matplotlib imageio plotly opencv-python
|
||||
|
||||
# Tests/Linting
|
||||
pip install black 'isort<5' flake8 flake8-bugbear flake8-comprehensions
|
||||
pip install black usort flake8 flake8-bugbear flake8-comprehensions
|
||||
```
|
||||
|
||||
## Installing prebuilt binaries for PyTorch3D
|
||||
@@ -78,30 +78,31 @@ Or, to install a nightly (non-official, alpha) build:
|
||||
conda install pytorch3d -c pytorch3d-nightly
|
||||
```
|
||||
### 2. Install from PyPI, on Mac only.
|
||||
This works with pytorch 1.9.0 only. The build is CPU only.
|
||||
This works with pytorch 1.12.0 only. The build is CPU only.
|
||||
```
|
||||
pip install pytorch3d
|
||||
```
|
||||
|
||||
### 3. Install wheels for Linux
|
||||
We have prebuilt wheels with CUDA for Linux for PyTorch 1.10.0, for each of the CUDA versions that they support,
|
||||
for Python 3.7, 3.8 and 3.9.
|
||||
We have prebuilt wheels with CUDA for Linux for PyTorch 1.11.0, for each of the supported CUDA versions,
|
||||
for Python 3.7, 3.8 and 3.9. This is for ease of use on Google Colab.
|
||||
These are installed in a special way.
|
||||
For example, to install for Python 3.8, PyTorch 1.9.0 and CUDA 10.2
|
||||
For example, to install for Python 3.8, PyTorch 1.11.0 and CUDA 11.3
|
||||
```
|
||||
pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu102_pyt1100/download.html
|
||||
pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu113_pyt1110/download.html
|
||||
```
|
||||
|
||||
In general, from inside IPython, or in Google Colab or a jupyter notebook, you can install with
|
||||
```
|
||||
import sys
|
||||
import torch
|
||||
pyt_version_str=torch.__version__.split("+")[0].replace(".", "")
|
||||
version_str="".join([
|
||||
f"py3{sys.version_info.minor}_cu",
|
||||
torch.version.cuda.replace(".",""),
|
||||
f"_pyt{torch.__version__[0:5:2]}"
|
||||
f"_pyt{pyt_version_str}"
|
||||
])
|
||||
!pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||
```
|
||||
|
||||
## Building / installing from source.
|
||||
@@ -146,10 +147,10 @@ After any necessary patching, you can go to "x64 Native Tools Command Prompt for
|
||||
cd pytorch3d
|
||||
python3 setup.py install
|
||||
```
|
||||
After installing, verify whether all unit tests have passed
|
||||
|
||||
After installing, you can run **unit tests**
|
||||
```
|
||||
cd tests
|
||||
python3 -m unittest discover -p *.py
|
||||
python3 -m unittest discover -v -s tests -t .
|
||||
```
|
||||
|
||||
# FAQ
|
||||
|
||||
4
LICENSE
4
LICENSE
@@ -2,7 +2,7 @@ BSD License
|
||||
|
||||
For PyTorch3D software
|
||||
|
||||
Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
@@ -14,7 +14,7 @@ are permitted provided that the following conditions are met:
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name Facebook nor the names of its contributors may be used to
|
||||
* Neither the name Meta nor the names of its contributors may be used to
|
||||
endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
|
||||
71
LICENSE-3RD-PARTY
Normal file
71
LICENSE-3RD-PARTY
Normal file
@@ -0,0 +1,71 @@
|
||||
SRN license ( https://github.com/vsitzmann/scene-representation-networks/ ):
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Vincent Sitzmann
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
|
||||
IDR license ( github.com/lioryariv/idr ):
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2020 Lior Yariv
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
|
||||
NeRF https://github.com/bmild/nerf/
|
||||
|
||||
Copyright (c) 2020 bmild
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -136,6 +136,8 @@ If you are using the pulsar backend for sphere-rendering (the `PulsarPointRender
|
||||
|
||||
Please see below for a timeline of the codebase updates in reverse chronological order. We are sharing updates on the releases as well as research projects which are built with PyTorch3D. The changelogs for the releases are available under [`Releases`](https://github.com/facebookresearch/pytorch3d/releases), and the builds can be installed using `conda` as per the instructions in [INSTALL.md](INSTALL.md).
|
||||
|
||||
**[Dec 16th 2021]:** PyTorch3D [v0.6.1](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.6.1) released
|
||||
|
||||
**[Oct 6th 2021]:** PyTorch3D [v0.6.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.6.0) released
|
||||
|
||||
**[Aug 5th 2021]:** PyTorch3D [v0.5.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.5.0) released
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash -e
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
@@ -7,23 +7,17 @@
|
||||
|
||||
# Run this script at project root by "./dev/linter.sh" before you commit
|
||||
|
||||
{
|
||||
V=$(black --version|cut '-d ' -f3)
|
||||
code='import distutils.version; assert "19.3" < distutils.version.LooseVersion("'$V'")'
|
||||
PYTHON=false
|
||||
command -v python > /dev/null && PYTHON=python
|
||||
command -v python3 > /dev/null && PYTHON=python3
|
||||
${PYTHON} -c "${code}" 2> /dev/null
|
||||
} || {
|
||||
echo "Linter requires black 19.3b0 or higher!"
|
||||
exit 1
|
||||
}
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
DIR=$(dirname "${DIR}")
|
||||
|
||||
echo "Running isort..."
|
||||
isort -y -sp "${DIR}"
|
||||
if [[ -f "${DIR}/TARGETS" ]]
|
||||
then
|
||||
pyfmt "${DIR}"
|
||||
else
|
||||
# run usort externally only
|
||||
echo "Running usort..."
|
||||
usort "${DIR}"
|
||||
fi
|
||||
|
||||
echo "Running black..."
|
||||
black "${DIR}"
|
||||
@@ -36,7 +30,7 @@ clangformat=$(command -v clang-format-8 || echo clang-format)
|
||||
find "${DIR}" -regex ".*\.\(cpp\|c\|cc\|cu\|cuh\|cxx\|h\|hh\|hpp\|hxx\|tcc\|mm\|m\)" -print0 | xargs -0 "${clangformat}" -i
|
||||
|
||||
# Run arc and pyre internally only.
|
||||
if [[ -f "${DIR}/tests/TARGETS" ]]
|
||||
if [[ -f "${DIR}/TARGETS" ]]
|
||||
then
|
||||
(cd "${DIR}"; command -v arc > /dev/null && arc lint) || true
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
64
dev/test_list.py
Normal file
64
dev/test_list.py
Normal file
@@ -0,0 +1,64 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import ast
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
|
||||
"""
|
||||
This module outputs a list of tests for completion.
|
||||
It has no dependencies.
|
||||
"""
|
||||
|
||||
|
||||
def get_test_files() -> List[Path]:
|
||||
root = Path(__file__).parent.parent
|
||||
return list((root / "tests").glob("**/test*.py"))
|
||||
|
||||
|
||||
def tests_from_file(path: Path, base: str) -> List[str]:
|
||||
"""
|
||||
Returns all the tests in the given file, in format
|
||||
expected as arguments when running the tests.
|
||||
e.g.
|
||||
file_stem
|
||||
file_stem.TestFunctionality
|
||||
file_stem.TestFunctionality.test_f
|
||||
file_stem.TestFunctionality.test_g
|
||||
"""
|
||||
with open(path) as f:
|
||||
node = ast.parse(f.read())
|
||||
out = [base]
|
||||
for cls in node.body:
|
||||
if not isinstance(cls, ast.ClassDef):
|
||||
continue
|
||||
if not cls.name.startswith("Test"):
|
||||
continue
|
||||
class_base = base + "." + cls.name
|
||||
out.append(class_base)
|
||||
for method in cls.body:
|
||||
if not isinstance(method, ast.FunctionDef):
|
||||
continue
|
||||
if not method.name.startswith("test"):
|
||||
continue
|
||||
out.append(class_base + "." + method.name)
|
||||
return out
|
||||
|
||||
|
||||
def main() -> None:
|
||||
files = get_test_files()
|
||||
test_root = Path(__file__).parent.parent
|
||||
all_tests = []
|
||||
for f in files:
|
||||
file_base = str(f.relative_to(test_root))[:-3].replace("/", ".")
|
||||
all_tests.extend(tests_from_file(f, file_base))
|
||||
for test in sorted(all_tests):
|
||||
print(test)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,4 +1,8 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# Minimal makefile for Sphinx documentation
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -3,7 +3,6 @@ API Documentation
|
||||
|
||||
.. toctree::
|
||||
|
||||
common
|
||||
structures
|
||||
io
|
||||
loss
|
||||
@@ -12,3 +11,5 @@ API Documentation
|
||||
transforms
|
||||
utils
|
||||
datasets
|
||||
common
|
||||
vis
|
||||
|
||||
6
docs/modules/vis.rst
Normal file
6
docs/modules/vis.rst
Normal file
@@ -0,0 +1,6 @@
|
||||
pytorch3d.vis
|
||||
===========================
|
||||
|
||||
.. automodule:: pytorch3d.vis
|
||||
:members:
|
||||
:undoc-members:
|
||||
@@ -45,7 +45,7 @@ All cameras inherit from `CamerasBase` which is a base class for all cameras. Py
|
||||
* `transform_points` which takes a set of input points in world coordinates and projects to NDC coordinates ranging from [-1, -1, znear] to [+1, +1, zfar].
|
||||
* `get_ndc_camera_transform` which defines the conversion to PyTorch3D's NDC space and is called when interfacing with the PyTorch3D renderer. If the camera is defined in NDC space, then the identity transform is returned. If the cameras is defined in screen space, the conversion from screen to NDC is returned. If users define their own camera in screen space, they need to think of the screen to NDC conversion. We provide examples for the `PerspectiveCameras` and `OrthographicCameras`.
|
||||
* `transform_points_ndc` which takes a set of points in world coordinates and projects them to PyTorch3D's NDC space
|
||||
* `transform_points_screen` which takes a set of input points in world coordinates and projects them to the screen coordinates ranging from [0, 0, znear] to [W-1, H-1, zfar]
|
||||
* `transform_points_screen` which takes a set of input points in world coordinates and projects them to the screen coordinates ranging from [0, 0, znear] to [W, H, zfar]
|
||||
|
||||
Users can easily customize their own cameras. For each new camera, users should implement the `get_projection_transform` routine that returns the mapping `P` from camera view coordinates to NDC coordinates.
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ Our implementation decouples the rasterization and shading steps of rendering. T
|
||||
|
||||
## <u>Get started</u>
|
||||
|
||||
To learn about more the implementation and start using the renderer refer to [getting started with renderer](renderer_getting_started.md), which also contains the [architecture overview](assets/architecture_overview.png) and [coordinate transformation conventions](assets/transformations_overview.png).
|
||||
To learn about more the implementation and start using the renderer refer to [getting started with renderer](renderer_getting_started.md), which also contains the [architecture overview](assets/architecture_renderer.jpg) and [coordinate transformation conventions](assets/transforms_overview.jpg).
|
||||
|
||||
## <u>Tech Report</u>
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
||||
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -89,14 +89,16 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"1.10.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
||||
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -76,14 +76,16 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"1.10.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
||||
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -51,14 +51,16 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"1.10.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
||||
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -90,14 +90,16 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"1.10.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
||||
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -56,14 +56,16 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"1.10.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
@@ -98,7 +100,7 @@
|
||||
"from pytorch3d.transforms import so3_exp_map\n",
|
||||
"from pytorch3d.renderer import (\n",
|
||||
" FoVPerspectiveCameras, \n",
|
||||
" NDCGridRaysampler,\n",
|
||||
" NDCMultinomialRaysampler,\n",
|
||||
" MonteCarloRaysampler,\n",
|
||||
" EmissionAbsorptionRaymarcher,\n",
|
||||
" ImplicitRenderer,\n",
|
||||
@@ -184,7 +186,7 @@
|
||||
"The renderer is composed of a *raymarcher* and a *raysampler*.\n",
|
||||
"- The *raysampler* is responsible for emitting rays from image pixels and sampling the points along them. Here, we use two different raysamplers:\n",
|
||||
" - `MonteCarloRaysampler` is used to generate rays from a random subset of pixels of the image plane. The random subsampling of pixels is carried out during **training** to decrease the memory consumption of the implicit model.\n",
|
||||
" - `NDCGridRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user). In combination with the implicit model of the scene, `NDCGridRaysampler` consumes a large amount of memory and, hence, is only used for visualizing the results of the training at **test** time.\n",
|
||||
" - `NDCMultinomialRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user). In combination with the implicit model of the scene, `NDCMultinomialRaysampler` consumes a large amount of memory and, hence, is only used for visualizing the results of the training at **test** time.\n",
|
||||
"- The *raymarcher* takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the `EmissionAbsorptionRaymarcher` which implements the standard Emission-Absorption raymarching algorithm."
|
||||
]
|
||||
},
|
||||
@@ -209,10 +211,10 @@
|
||||
"\n",
|
||||
"# 1) Instantiate the raysamplers.\n",
|
||||
"\n",
|
||||
"# Here, NDCGridRaysampler generates a rectangular image\n",
|
||||
"# Here, NDCMultinomialRaysampler generates a rectangular image\n",
|
||||
"# grid of rays whose coordinates follow the PyTorch3D\n",
|
||||
"# coordinate conventions.\n",
|
||||
"raysampler_grid = NDCGridRaysampler(\n",
|
||||
"raysampler_grid = NDCMultinomialRaysampler(\n",
|
||||
" image_height=render_size,\n",
|
||||
" image_width=render_size,\n",
|
||||
" n_pts_per_ray=128,\n",
|
||||
@@ -813,7 +815,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 5. Visualizing the optimized neural radiance field\n",
|
||||
"## 6. Visualizing the optimized neural radiance field\n",
|
||||
"\n",
|
||||
"Finally, we visualize the neural radiance field by rendering from multiple viewpoints that rotate around the volume's y-axis."
|
||||
]
|
||||
@@ -842,7 +844,7 @@
|
||||
" fov=target_cameras.fov[0],\n",
|
||||
" device=device,\n",
|
||||
" )\n",
|
||||
" # Note that we again render with `NDCGridRaySampler`\n",
|
||||
" # Note that we again render with `NDCMultinomialRaysampler`\n",
|
||||
" # and the batched_forward function of neural_radiance_field.\n",
|
||||
" frames.append(\n",
|
||||
" renderer_grid(\n",
|
||||
@@ -863,9 +865,9 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 6. Conclusion\n",
|
||||
"## 7. Conclusion\n",
|
||||
"\n",
|
||||
"In this tutorial, we have shown how to optimize an implicit representation of a scene such that the renders of the scene from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's implicit function renderer composed of either a `MonteCarloRaysampler` or `NDCGridRaysampler`, and an `EmissionAbsorptionRaymarcher`."
|
||||
"In this tutorial, we have shown how to optimize an implicit representation of a scene such that the renders of the scene from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's implicit function renderer composed of either a `MonteCarloRaysampler` or `NDCMultinomialRaysampler`, and an `EmissionAbsorptionRaymarcher`."
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
||||
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -68,14 +68,16 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"1.10.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
@@ -116,7 +118,7 @@
|
||||
"from pytorch3d.structures import Meshes\n",
|
||||
"from pytorch3d.renderer import (\n",
|
||||
" look_at_view_transform,\n",
|
||||
" OpenGLPerspectiveCameras, \n",
|
||||
" FoVPerspectiveCameras, \n",
|
||||
" PointLights, \n",
|
||||
" DirectionalLights, \n",
|
||||
" Materials, \n",
|
||||
@@ -302,11 +304,11 @@
|
||||
"# broadcasting. So we can view the camera from the a distance of dist=2.7, and \n",
|
||||
"# then specify elevation and azimuth angles for each viewpoint as tensors. \n",
|
||||
"R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)\n",
|
||||
"cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)\n",
|
||||
"cameras = FoVPerspectiveCameras(device=device, R=R, T=T)\n",
|
||||
"\n",
|
||||
"# We arbitrarily choose one particular view that will be used to visualize \n",
|
||||
"# results\n",
|
||||
"camera = OpenGLPerspectiveCameras(device=device, R=R[None, 1, ...], \n",
|
||||
"camera = FoVPerspectiveCameras(device=device, R=R[None, 1, ...], \n",
|
||||
" T=T[None, 1, ...]) \n",
|
||||
"\n",
|
||||
"# Define the settings for rasterization and shading. Here we set the output \n",
|
||||
@@ -349,7 +351,7 @@
|
||||
"# Our multi-view cow dataset will be represented by these 2 lists of tensors,\n",
|
||||
"# each of length num_views.\n",
|
||||
"target_rgb = [target_images[i, ..., :3] for i in range(num_views)]\n",
|
||||
"target_cameras = [OpenGLPerspectiveCameras(device=device, R=R[None, i, ...], \n",
|
||||
"target_cameras = [FoVPerspectiveCameras(device=device, R=R[None, i, ...], \n",
|
||||
" T=T[None, i, ...]) for i in range(num_views)]"
|
||||
]
|
||||
},
|
||||
@@ -706,6 +708,7 @@
|
||||
" image_size=128, \n",
|
||||
" blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n",
|
||||
" faces_per_pixel=50, \n",
|
||||
" perspective_correct=False, \n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Differentiable soft renderer using per vertex RGB colors for texture\n",
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
||||
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -47,14 +47,16 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"1.10.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
@@ -87,7 +89,7 @@
|
||||
"from pytorch3d.renderer import (\n",
|
||||
" FoVPerspectiveCameras, \n",
|
||||
" VolumeRenderer,\n",
|
||||
" NDCGridRaysampler,\n",
|
||||
" NDCMultinomialRaysampler,\n",
|
||||
" EmissionAbsorptionRaymarcher\n",
|
||||
")\n",
|
||||
"from pytorch3d.transforms import so3_exp_map\n",
|
||||
@@ -162,7 +164,7 @@
|
||||
"The following initializes a volumetric renderer that emits a ray from each pixel of a target image and samples a set of uniformly-spaced points along the ray. At each ray-point, the corresponding density and color value is obtained by querying the corresponding location in the volumetric model of the scene (the model is described & instantiated in a later cell).\n",
|
||||
"\n",
|
||||
"The renderer is composed of a *raymarcher* and a *raysampler*.\n",
|
||||
"- The *raysampler* is responsible for emitting rays from image pixels and sampling the points along them. Here, we use the `NDCGridRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user).\n",
|
||||
"- The *raysampler* is responsible for emitting rays from image pixels and sampling the points along them. Here, we use the `NDCMultinomialRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user).\n",
|
||||
"- The *raymarcher* takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the `EmissionAbsorptionRaymarcher` which implements the standard Emission-Absorption raymarching algorithm."
|
||||
]
|
||||
},
|
||||
@@ -184,14 +186,14 @@
|
||||
"volume_extent_world = 3.0\n",
|
||||
"\n",
|
||||
"# 1) Instantiate the raysampler.\n",
|
||||
"# Here, NDCGridRaysampler generates a rectangular image\n",
|
||||
"# Here, NDCMultinomialRaysampler generates a rectangular image\n",
|
||||
"# grid of rays whose coordinates follow the PyTorch3D\n",
|
||||
"# coordinate conventions.\n",
|
||||
"# Since we use a volume of size 128^3, we sample n_pts_per_ray=150,\n",
|
||||
"# which roughly corresponds to a one ray-point per voxel.\n",
|
||||
"# We further set the min_depth=0.1 since there is no surface within\n",
|
||||
"# 0.1 units of any camera plane.\n",
|
||||
"raysampler = NDCGridRaysampler(\n",
|
||||
"raysampler = NDCMultinomialRaysampler(\n",
|
||||
" image_width=render_size,\n",
|
||||
" image_height=render_size,\n",
|
||||
" n_pts_per_ray=150,\n",
|
||||
@@ -460,7 +462,7 @@
|
||||
"source": [
|
||||
"## 6. Conclusion\n",
|
||||
"\n",
|
||||
"In this tutorial, we have shown how to optimize a 3D volumetric representation of a scene such that the renders of the volume from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's volumetric renderer composed of an `NDCGridRaysampler` and an `EmissionAbsorptionRaymarcher`."
|
||||
"In this tutorial, we have shown how to optimize a 3D volumetric representation of a scene such that the renders of the volume from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's volumetric renderer composed of an `NDCMultinomialRaysampler` and an `EmissionAbsorptionRaymarcher`."
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
||||
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -50,14 +50,16 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"1.10.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
||||
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -57,14 +57,16 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"1.10.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
||||
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -73,14 +73,16 @@
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith(\"1.10.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
@@ -12,13 +12,13 @@ from pytorch3d.io import load_objs_as_meshes
|
||||
from pytorch3d.renderer import (
|
||||
BlendParams,
|
||||
FoVPerspectiveCameras,
|
||||
look_at_view_transform,
|
||||
MeshRasterizer,
|
||||
MeshRenderer,
|
||||
PointLights,
|
||||
RasterizationSettings,
|
||||
SoftPhongShader,
|
||||
SoftSilhouetteShader,
|
||||
look_at_view_transform,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
@REM Copyright (c) Facebook, Inc. and its affiliates.
|
||||
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
@REM All rights reserved.
|
||||
@REM
|
||||
@REM This source code is licensed under the BSD-style license found in the
|
||||
@REM LICENSE file in the root directory of this source tree.
|
||||
|
||||
:: Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
start /wait "" "%miniconda_exe%" /S /InstallationType=JustMe /RegisterPython=0 /AddToPath=0 /D=%tmp_conda%
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -20,10 +20,11 @@ commands.
|
||||
```
|
||||
import sys
|
||||
import torch
|
||||
pyt_version_str=torch.__version__.split("+")[0].replace(".", "")
|
||||
version_str="".join([
|
||||
f"py3{sys.version_info.minor}_cu",
|
||||
torch.version.cuda.replace(".",""),
|
||||
f"_pyt{torch.__version__[0:5:2]}"
|
||||
f"_pyt{pyt_version_str}"
|
||||
])
|
||||
!pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||
```
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
#!/usr/bin/bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
sudo docker run --rm -v "$PWD/../../:/inside" pytorch/conda-cuda bash inside/packaging/linux_wheels/inside.sh
|
||||
sudo docker run --rm -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu113 pytorch/conda-builder:cuda113 bash inside/packaging/linux_wheels/inside.sh
|
||||
sudo docker run --rm -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu115 pytorch/conda-builder:cuda115 bash inside/packaging/linux_wheels/inside.sh
|
||||
sudo docker run --rm -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu116 pytorch/conda-builder:cuda116 bash inside/packaging/linux_wheels/inside.sh
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
@@ -26,20 +26,13 @@ echo "CUB_HOME is now $CUB_HOME"
|
||||
# As a rule, we want to build for any combination of dependencies which is supported by
|
||||
# PyTorch3D and not older than the current Google Colab set up.
|
||||
|
||||
PYTHON_VERSIONS="3.7 3.8 3.9"
|
||||
PYTHON_VERSIONS="3.7 3.8 3.9 3.10"
|
||||
# the keys are pytorch versions
|
||||
declare -A CONDA_CUDA_VERSIONS=(
|
||||
# ["1.4.0"]="cu101"
|
||||
# ["1.5.0"]="cu101 cu102"
|
||||
# ["1.5.1"]="cu101 cu102"
|
||||
# ["1.6.0"]="cu101 cu102"
|
||||
# ["1.7.0"]="cu101 cu102 cu110"
|
||||
# ["1.7.1"]="cu101 cu102 cu110"
|
||||
# ["1.8.0"]="cu101 cu102 cu111"
|
||||
# ["1.8.1"]="cu101 cu102 cu111"
|
||||
# ["1.9.0"]="cu102 cu111"
|
||||
# ["1.9.1"]="cu102 cu111"
|
||||
["1.10.0"]="cu102 cu113"
|
||||
["1.10.1"]="cu111 cu113"
|
||||
["1.10.2"]="cu111 cu113"
|
||||
["1.10.0"]="cu111 cu113"
|
||||
["1.11.0"]="cu111 cu113 cu115"
|
||||
)
|
||||
|
||||
|
||||
@@ -48,17 +41,49 @@ for python_version in $PYTHON_VERSIONS
|
||||
do
|
||||
for pytorch_version in "${!CONDA_CUDA_VERSIONS[@]}"
|
||||
do
|
||||
if [[ "3.6 3.7 3.8" != *$python_version* ]] && [[ "1.4.0 1.5.0 1.5.1 1.6.0 1.7.0" == *$pytorch_version* ]]
|
||||
if [[ "3.7 3.8" != *$python_version* ]] && [[ "1.7.0" == *$pytorch_version* ]]
|
||||
then
|
||||
#python 3.9 and later not supported by pytorch 1.7.0 and before
|
||||
continue
|
||||
fi
|
||||
if [[ "3.7 3.8 3.9" != *$python_version* ]] && [[ "1.7.0 1.7.1 1.8.0 1.8.1 1.9.0 1.9.1 1.10.0 1.10.1 1.10.2" == *$pytorch_version* ]]
|
||||
then
|
||||
#python 3.10 and later not supported by pytorch 1.10.2 and before
|
||||
continue
|
||||
fi
|
||||
|
||||
extra_channel="-c conda-forge"
|
||||
if [[ "1.11.0" == "$pytorch_version" ]]
|
||||
then
|
||||
extra_channel=""
|
||||
fi
|
||||
|
||||
for cu_version in ${CONDA_CUDA_VERSIONS[$pytorch_version]}
|
||||
do
|
||||
if [[ "cu113 cu115 cu116" == *$cu_version* ]]
|
||||
# ^^^ CUDA versions listed here have to be built
|
||||
# in their own containers.
|
||||
then
|
||||
if [[ $SELECTED_CUDA != "$cu_version" ]]
|
||||
then
|
||||
continue
|
||||
fi
|
||||
elif [[ $SELECTED_CUDA != "" ]]
|
||||
then
|
||||
continue
|
||||
fi
|
||||
|
||||
case "$cu_version" in
|
||||
cu116)
|
||||
export CUDA_HOME=/usr/local/cuda-11.6/
|
||||
export CUDA_TAG=11.6
|
||||
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
||||
;;
|
||||
cu115)
|
||||
export CUDA_HOME=/usr/local/cuda-11.5/
|
||||
export CUDA_TAG=11.5
|
||||
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
||||
;;
|
||||
cu113)
|
||||
export CUDA_HOME=/usr/local/cuda-11.3/
|
||||
export CUDA_TAG=11.3
|
||||
@@ -104,6 +129,7 @@ do
|
||||
|
||||
conda create -y -n "$tag" "python=$python_version"
|
||||
conda activate "$tag"
|
||||
# shellcheck disable=SC2086
|
||||
conda install -y -c pytorch $extra_channel "pytorch=$pytorch_version" "cudatoolkit=$CUDA_TAG" torchvision
|
||||
pip install fvcore iopath
|
||||
echo "python version" "$python_version" "pytorch version" "$pytorch_version" "cuda version" "$cu_version" "tag" "$tag"
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
@@ -15,13 +14,12 @@ dest = "s3://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/"
|
||||
output = Path("output")
|
||||
|
||||
|
||||
def fs3cmd(args, allow_failure: bool = False) -> List[str]:
|
||||
def aws_s3_cmd(args) -> List[str]:
|
||||
"""
|
||||
This function returns the args for subprocess to mimic the bash command
|
||||
fs3cmd available in the fairusers_aws module on the FAIR cluster.
|
||||
This function returns the full args for subprocess to do a command
|
||||
with aws.
|
||||
"""
|
||||
os.environ["FAIR_CLUSTER_NAME"] = os.environ["FAIR_ENV_CLUSTER"].lower()
|
||||
cmd_args = ["/public/apps/fairusers_aws/bin/fs3cmd"] + args
|
||||
cmd_args = ["aws", "s3", "--profile", "saml"] + args
|
||||
return cmd_args
|
||||
|
||||
|
||||
@@ -31,7 +29,7 @@ def fs3_exists(path) -> bool:
|
||||
In fact, will also return True if there is a file which has the given
|
||||
path as a prefix, but we are careful about this.
|
||||
"""
|
||||
out = subprocess.check_output(fs3cmd(["ls", path]))
|
||||
out = subprocess.check_output(aws_s3_cmd(["ls", path]))
|
||||
return len(out) != 0
|
||||
|
||||
|
||||
@@ -41,7 +39,7 @@ def get_html_wrappers() -> None:
|
||||
assert not output_wrapper.exists()
|
||||
dest_wrapper = dest + directory.name + "/download.html"
|
||||
if fs3_exists(dest_wrapper):
|
||||
subprocess.check_call(fs3cmd(["get", dest_wrapper, str(output_wrapper)]))
|
||||
subprocess.check_call(aws_s3_cmd(["cp", dest_wrapper, str(output_wrapper)]))
|
||||
|
||||
|
||||
def write_html_wrappers() -> None:
|
||||
@@ -70,7 +68,7 @@ def to_aws() -> None:
|
||||
for file in directory.iterdir():
|
||||
print(file)
|
||||
subprocess.check_call(
|
||||
fs3cmd(["put", str(file), dest + str(file.relative_to(output))])
|
||||
aws_s3_cmd(["cp", str(file), dest + str(file.relative_to(output))])
|
||||
)
|
||||
|
||||
|
||||
@@ -79,3 +77,11 @@ if __name__ == "__main__":
|
||||
# get_html_wrappers()
|
||||
write_html_wrappers()
|
||||
to_aws()
|
||||
|
||||
|
||||
# see all files with
|
||||
# aws s3 --profile saml ls --recursive s3://dl.fbaipublicfiles.com/pytorch3d/
|
||||
|
||||
# empty current with
|
||||
# aws s3 --profile saml rm --recursive
|
||||
# s3://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# shellcheck shell=bash
|
||||
# A set of useful bash functions for common functionality we need to do in
|
||||
# many build scripts
|
||||
|
||||
|
||||
# Setup CUDA environment variables, based on CU_VERSION
|
||||
#
|
||||
# Inputs:
|
||||
@@ -51,6 +55,28 @@ setup_cuda() {
|
||||
|
||||
# Now work out the CUDA settings
|
||||
case "$CU_VERSION" in
|
||||
cu116)
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.6"
|
||||
else
|
||||
export CUDA_HOME=/usr/local/cuda-11.6/
|
||||
fi
|
||||
export FORCE_CUDA=1
|
||||
# Hard-coding gencode flags is temporary situation until
|
||||
# https://github.com/pytorch/pytorch/pull/23408 lands
|
||||
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
||||
;;
|
||||
cu115)
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.5"
|
||||
else
|
||||
export CUDA_HOME=/usr/local/cuda-11.5/
|
||||
fi
|
||||
export FORCE_CUDA=1
|
||||
# Hard-coding gencode flags is temporary situation until
|
||||
# https://github.com/pytorch/pytorch/pull/23408 lands
|
||||
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
||||
;;
|
||||
cu113)
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.3"
|
||||
@@ -289,6 +315,12 @@ setup_conda_cudatoolkit_constraint() {
|
||||
export CONDA_CUDATOOLKIT_CONSTRAINT=""
|
||||
else
|
||||
case "$CU_VERSION" in
|
||||
cu116)
|
||||
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.6,<11.7 # [not osx]"
|
||||
;;
|
||||
cu115)
|
||||
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.5,<11.6 # [not osx]"
|
||||
;;
|
||||
cu113)
|
||||
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.3,<11.4 # [not osx]"
|
||||
;;
|
||||
|
||||
@@ -45,9 +45,12 @@ test:
|
||||
- docs
|
||||
requires:
|
||||
- imageio
|
||||
- hydra-core
|
||||
- accelerate
|
||||
- lpips
|
||||
commands:
|
||||
#pytest .
|
||||
python -m unittest discover -v -s tests
|
||||
python -m unittest discover -v -s tests -t .
|
||||
|
||||
|
||||
about:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@REM Copyright (c) Facebook, Inc. and its affiliates.
|
||||
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
@REM All rights reserved.
|
||||
@REM
|
||||
@REM This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@REM Copyright (c) Facebook, Inc. and its affiliates.
|
||||
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
@REM All rights reserved.
|
||||
@REM
|
||||
@REM This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@REM Copyright (c) Facebook, Inc. and its affiliates.
|
||||
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
@REM All rights reserved.
|
||||
@REM
|
||||
@REM This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@REM Copyright (c) Facebook, Inc. and its affiliates.
|
||||
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
@REM All rights reserved.
|
||||
@REM
|
||||
@REM This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@REM Copyright (c) Facebook, Inc. and its affiliates.
|
||||
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
@REM All rights reserved.
|
||||
@REM
|
||||
@REM This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@REM Copyright (c) Facebook, Inc. and its affiliates.
|
||||
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
@REM All rights reserved.
|
||||
@REM
|
||||
@REM This source code is licensed under the BSD-style license found in the
|
||||
|
||||
5
projects/__init__.py
Normal file
5
projects/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
280
projects/implicitron_trainer/README.md
Normal file
280
projects/implicitron_trainer/README.md
Normal file
@@ -0,0 +1,280 @@
|
||||
# Introduction
|
||||
|
||||
Implicitron is a PyTorch3D-based framework for new-view synthesis via modeling the neural-network based representations.
|
||||
|
||||
# License
|
||||
|
||||
Implicitron is distributed as part of PyTorch3D under the [BSD license](https://github.com/facebookresearch/pytorch3d/blob/main/LICENSE).
|
||||
It includes code from the [NeRF](https://github.com/bmild/nerf), [SRN](http://github.com/vsitzmann/scene-representation-networks) and [IDR](http://github.com/lioryariv/idr) repos.
|
||||
See [LICENSE-3RD-PARTY](https://github.com/facebookresearch/pytorch3d/blob/main/LICENSE-3RD-PARTY) for their licenses.
|
||||
|
||||
|
||||
# Installation
|
||||
|
||||
There are three ways to set up Implicitron, depending on the flexibility level required.
|
||||
If you only want to train or evaluate models as they are implemented changing only the parameters, you can just install the package.
|
||||
Implicitron also provides a flexible API that supports user-defined plug-ins;
|
||||
if you want to re-implement some of the components without changing the high-level pipeline, you need to create a custom launcher script.
|
||||
The most flexible option, though, is cloning PyTorch3D repo and building it from sources, which allows changing the code in arbitrary ways.
|
||||
Below, we descibe all three options in more details.
|
||||
|
||||
|
||||
## [Option 1] Running an executable from the package
|
||||
|
||||
This option allows you to use the code as is without changing the implementations.
|
||||
Only configuration can be changed (see [Configuration system](#configuration-system)).
|
||||
|
||||
For this setup, install the dependencies and PyTorch3D from conda following [the guide](https://github.com/facebookresearch/pytorch3d/blob/master/INSTALL.md#1-install-with-cuda-support-from-anaconda-cloud-on-linux-only). Then, install implicitron-specific dependencies:
|
||||
|
||||
```shell
|
||||
pip install "hydra-core>=1.1" visdom lpips matplotlib accelerate
|
||||
```
|
||||
|
||||
Runner executable is available as `pytorch3d_implicitron_runner` shell command.
|
||||
See [Running](#running) section below for examples of training and evaluation commands.
|
||||
|
||||
## [Option 2] Supporting custom implementations
|
||||
|
||||
To plug in custom implementations, for example, of renderer or implicit-function protocols, you need to create your own runner script and import the plug-in implementations there.
|
||||
First, install PyTorch3D and Implicitron dependencies as described in the previous section.
|
||||
Then, implement the custom script; copying `pytorch3d/projects/implicitron_trainer/experiment.py` is a good place to start.
|
||||
See [Custom plugins](#custom-plugins) for more information on how to import implementations and enable them in the configs.
|
||||
|
||||
|
||||
## [Option 3] Cloning PyTorch3D repo
|
||||
|
||||
This is the most flexible way to set up Implicitron as it allows changing the code directly.
|
||||
It allows modifying the high-level rendering pipeline or implementing yet-unsupported loss functions.
|
||||
Please follow the instructions to [install PyTorch3D from a local clone](https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md#2-install-from-a-local-clone).
|
||||
Then, install Implicitron-specific dependencies:
|
||||
|
||||
```shell
|
||||
pip install "hydra-core>=1.1" visdom lpips matplotlib accelerate
|
||||
```
|
||||
|
||||
You are still encouraged to implement custom plugins as above where possible as it makes reusing the code easier.
|
||||
The executable is located in `pytorch3d/projects/implicitron_trainer`.
|
||||
|
||||
|
||||
# Running
|
||||
|
||||
This section assumes that you use the executable provided by the installed package.
|
||||
If you have a custom `experiment.py` script (as in the Option 2 above), replace the executable with the path to your script.
|
||||
|
||||
## Training
|
||||
|
||||
To run training, pass a yaml config file, followed by a list of overridden arguments.
|
||||
For example, to train NeRF on the first skateboard sequence from CO3D dataset, you can run:
|
||||
```shell
|
||||
dataset_args=data_source_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
|
||||
pytorch3d_implicitron_runner --config-path ./configs/ --config-name repro_singleseq_nerf $dataset_args.dataset_root=<DATASET_ROOT> $dataset_args.category='skateboard' $dataset_args.test_restrict_sequence_id=0 test_when_finished=True exp_dir=<CHECKPOINT_DIR>
|
||||
```
|
||||
|
||||
Here, `--config-path` points to the config path relative to `pytorch3d_implicitron_runner` location;
|
||||
`--config-name` picks the config (in this case, `repro_singleseq_nerf.yaml`);
|
||||
`test_when_finished` will launch evaluation script once training is finished.
|
||||
Replace `<DATASET_ROOT>` with the location where the dataset in Implicitron format is stored
|
||||
and `<CHECKPOINT_DIR>` with a directory where checkpoints will be dumped during training.
|
||||
Other configuration parameters can be overridden in the same way.
|
||||
See [Configuration system](#configuration-system) section for more information on this.
|
||||
|
||||
|
||||
## Evaluation
|
||||
|
||||
To run evaluation on the latest checkpoint after (or during) training, simply add `eval_only=True` to your training command.
|
||||
|
||||
E.g. for executing the evaluation on the NeRF skateboard sequence, you can run:
|
||||
```shell
|
||||
dataset_args=data_source_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
|
||||
pytorch3d_implicitron_runner --config-path ./configs/ --config-name repro_singleseq_nerf $dataset_args.dataset_root=<CO3D_DATASET_ROOT> $dataset_args.category='skateboard' $dataset_args.test_restrict_sequence_id=0 exp_dir=<CHECKPOINT_DIR> eval_only=True
|
||||
```
|
||||
Evaluation prints the metrics to `stdout` and dumps them to a json file in `exp_dir`.
|
||||
|
||||
## Visualisation
|
||||
|
||||
The script produces a video of renders by a trained model assuming a pre-defined camera trajectory.
|
||||
In order for it to work, `ffmpeg` needs to be installed:
|
||||
|
||||
```shell
|
||||
conda install ffmpeg
|
||||
```
|
||||
|
||||
Here is an example of calling the script:
|
||||
```shell
|
||||
projects/implicitron_trainer/visualize_reconstruction.py exp_dir=<CHECKPOINT_DIR> visdom_show_preds=True n_eval_cameras=40 render_size="[64,64]" video_size="[256,256]"
|
||||
```
|
||||
|
||||
The argument `n_eval_cameras` sets the number of renderring viewpoints sampled on a trajectory, which defaults to a circular fly-around;
|
||||
`render_size` sets the size of a render passed to the model, which can be resized to `video_size` before writing.
|
||||
|
||||
Rendered videos of images, masks, and depth maps will be saved to `<CHECKPOINT_DIR>/vis`.
|
||||
|
||||
|
||||
# Configuration system
|
||||
|
||||
We use hydra and OmegaConf to parse the configs.
|
||||
The config schema and default values are defined by the dataclasses implementing the modules.
|
||||
More specifically, if a class derives from `Configurable`, its fields can be set in config yaml files or overridden in CLI.
|
||||
For example, `GenericModel` has a field `render_image_width` with the default value 400.
|
||||
If it is specified in the yaml config file or in CLI command, the new value will be used.
|
||||
|
||||
Configurables can form hierarchies.
|
||||
For example, `GenericModel` has a field `raysampler: RaySampler`, which is also Configurable.
|
||||
In the config, inner parameters can be propagated using `_args` postfix, e.g. to change `raysampler.n_pts_per_ray_training` (the number of sampled points per ray), the node `raysampler_args.n_pts_per_ray_training` should be specified.
|
||||
|
||||
The root of the hierarchy is defined by `ExperimentConfig` dataclass.
|
||||
It has top-level fields like `eval_only` which was used above for running evaluation by adding a CLI override.
|
||||
Additionally, it has non-leaf nodes like `generic_model_args`, which dispatches the config parameters to `GenericModel`. Thus, changing the model parameters may be achieved in two ways: either by editing the config file, e.g.
|
||||
```yaml
|
||||
generic_model_args:
|
||||
render_image_width: 800
|
||||
raysampler_args:
|
||||
n_pts_per_ray_training: 128
|
||||
```
|
||||
|
||||
or, equivalently, by adding the following to `pytorch3d_implicitron_runner` arguments:
|
||||
|
||||
```shell
|
||||
generic_model_args.render_image_width=800 generic_model_args.raysampler_args.n_pts_per_ray_training=128
|
||||
```
|
||||
|
||||
See the documentation in `pytorch3d/implicitron/tools/config.py` for more details.
|
||||
|
||||
## Replaceable implementations
|
||||
|
||||
Sometimes changing the model parameters does not provide enough flexibility, and you want to provide a new implementation for a building block.
|
||||
The configuration system also supports it!
|
||||
Abstract classes like `BaseRenderer` derive from `ReplaceableBase` instead of `Configurable`.
|
||||
This means that other Configurables can refer to them using the base type, while the specific implementation is chosen in the config using `_class_type`-postfixed node.
|
||||
In that case, `_args` node name has to include the implementation type.
|
||||
More specifically, to change renderer settings, the config will look like this:
|
||||
```yaml
|
||||
generic_model_args:
|
||||
renderer_class_type: LSTMRenderer
|
||||
renderer_LSTMRenderer_args:
|
||||
num_raymarch_steps: 10
|
||||
hidden_size: 16
|
||||
```
|
||||
|
||||
See the documentation in `pytorch3d/implicitron/tools/config.py` for more details on the configuration system.
|
||||
|
||||
## Custom plugins
|
||||
|
||||
If you have an idea for another implementation of a replaceable component, it can be plugged in without changing the core code.
|
||||
For that, you need to set up Implicitron through option 2 or 3 above.
|
||||
Let's say you want to implement a renderer that accumulates opacities similar to an X-ray machine.
|
||||
First, create a module `x_ray_renderer.py` with a class deriving from `BaseRenderer`:
|
||||
|
||||
```python
|
||||
from pytorch3d.implicitron.tools.config import registry
|
||||
|
||||
@registry.register
|
||||
class XRayRenderer(BaseRenderer, torch.nn.Module):
|
||||
n_pts_per_ray: int = 64
|
||||
|
||||
# if there are other base classes, make sure to call `super().__init__()` explicitly
|
||||
def __post_init__(self):
|
||||
super().__init__()
|
||||
# custom initialization
|
||||
|
||||
def forward(
|
||||
self,
|
||||
ray_bundle,
|
||||
implicit_functions=[],
|
||||
evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION,
|
||||
**kwargs,
|
||||
) -> RendererOutput:
|
||||
...
|
||||
```
|
||||
|
||||
Please note `@registry.register` decorator that registers the plug-in as an implementation of `Renderer`.
|
||||
IMPORTANT: In order for it to run, the class (or its enclosing module) has to be imported in your launch script. Additionally, this has to be done before parsing the root configuration class `ExperimentConfig`.
|
||||
Simply add `import .x_ray_renderer` in the beginning of `experiment.py`.
|
||||
|
||||
After that, you should be able to change the config with:
|
||||
```yaml
|
||||
generic_model_args:
|
||||
renderer_class_type: XRayRenderer
|
||||
renderer_XRayRenderer_args:
|
||||
n_pts_per_ray: 128
|
||||
```
|
||||
|
||||
to replace the implementation and potentially override the parameters.
|
||||
|
||||
# Code and config structure
|
||||
|
||||
As per above, the config structure is parsed automatically from the module hierarchy.
|
||||
In particular, model parameters are contained in `generic_model_args` node, and dataset parameters in `data_source_args` node.
|
||||
|
||||
Here is the class structure (single-line edges show aggregation, while double lines show available implementations):
|
||||
```
|
||||
generic_model_args: GenericModel
|
||||
└-- sequence_autodecoder_args: Autodecoder
|
||||
└-- raysampler_args: RaySampler
|
||||
└-- renderer_*_args: BaseRenderer
|
||||
╘== MultiPassEmissionAbsorptionRenderer
|
||||
╘== LSTMRenderer
|
||||
╘== SignedDistanceFunctionRenderer
|
||||
└-- ray_tracer_args: RayTracing
|
||||
└-- ray_normal_coloring_network_args: RayNormalColoringNetwork
|
||||
└-- implicit_function_*_args: ImplicitFunctionBase
|
||||
╘== NeuralRadianceFieldImplicitFunction
|
||||
╘== SRNImplicitFunction
|
||||
└-- raymarch_function_args: SRNRaymarchFunction
|
||||
└-- pixel_generator_args: SRNPixelGenerator
|
||||
╘== SRNHyperNetImplicitFunction
|
||||
└-- hypernet_args: SRNRaymarchHyperNet
|
||||
└-- pixel_generator_args: SRNPixelGenerator
|
||||
╘== IdrFeatureField
|
||||
└-- image_feature_extractor_*_args: FeatureExtractorBase
|
||||
╘== ResNetFeatureExtractor
|
||||
└-- view_sampler_args: ViewSampler
|
||||
└-- feature_aggregator_*_args: FeatureAggregatorBase
|
||||
╘== IdentityFeatureAggregator
|
||||
╘== AngleWeightedIdentityFeatureAggregator
|
||||
╘== AngleWeightedReductionFeatureAggregator
|
||||
╘== ReductionFeatureAggregator
|
||||
solver_args: init_optimizer
|
||||
data_source_args: ImplicitronDataSource
|
||||
└-- dataset_map_provider_*_args
|
||||
└-- data_loader_map_provider_*_args
|
||||
```
|
||||
|
||||
Please look at the annotations of the respective classes or functions for the lists of hyperparameters.
|
||||
|
||||
# Reproducing CO3D experiments
|
||||
|
||||
Common Objects in 3D (CO3D) is a large-scale dataset of videos of rigid objects grouped into 50 common categories.
|
||||
Implicitron provides implementations and config files to reproduce the results from [the paper](https://arxiv.org/abs/2109.00512).
|
||||
Please follow [the link](https://github.com/facebookresearch/co3d#automatic-batch-download) for the instructions to download the dataset.
|
||||
In training and evaluation scripts, use the download location as `<DATASET_ROOT>`.
|
||||
It is also possible to define environment variable `CO3D_DATASET_ROOT` instead of specifying it.
|
||||
To reproduce the experiments from the paper, use the following configs. For single-sequence experiments:
|
||||
|
||||
| Method | config file |
|
||||
|-----------------|-------------------------------------|
|
||||
| NeRF | repro_singleseq_nerf.yaml |
|
||||
| NeRF + WCE | repro_singleseq_nerf_wce.yaml |
|
||||
| NerFormer | repro_singleseq_nerformer.yaml |
|
||||
| IDR | repro_singleseq_idr.yaml |
|
||||
| SRN | repro_singleseq_srn_noharm.yaml |
|
||||
| SRN + γ | repro_singleseq_srn.yaml |
|
||||
| SRN + WCE | repro_singleseq_srn_wce_noharm.yaml |
|
||||
| SRN + WCE + γ | repro_singleseq_srn_wce_noharm.yaml |
|
||||
|
||||
For multi-sequence experiments (without generalisation to new sequences):
|
||||
|
||||
| Method | config file |
|
||||
|-----------------|--------------------------------------------|
|
||||
| NeRF + AD | repro_multiseq_nerf_ad.yaml |
|
||||
| SRN + AD | repro_multiseq_srn_ad_hypernet_noharm.yaml |
|
||||
| SRN + γ + AD | repro_multiseq_srn_ad_hypernet.yaml |
|
||||
|
||||
For multi-sequence experiments (with generalisation to new sequences):
|
||||
|
||||
| Method | config file |
|
||||
|-----------------|--------------------------------------|
|
||||
| NeRF + WCE | repro_multiseq_nerf_wce.yaml |
|
||||
| NerFormer | repro_multiseq_nerformer.yaml |
|
||||
| SRN + WCE | repro_multiseq_srn_wce_noharm.yaml |
|
||||
| SRN + WCE + γ | repro_multiseq_srn_wce.yaml |
|
||||
5
projects/implicitron_trainer/__init__.py
Normal file
5
projects/implicitron_trainer/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
75
projects/implicitron_trainer/configs/repro_base.yaml
Normal file
75
projects/implicitron_trainer/configs/repro_base.yaml
Normal file
@@ -0,0 +1,75 @@
|
||||
defaults:
|
||||
- default_config
|
||||
- _self_
|
||||
exp_dir: ./data/exps/base/
|
||||
architecture: generic
|
||||
visualize_interval: 0
|
||||
visdom_port: 8097
|
||||
data_source_args:
|
||||
data_loader_map_provider_class_type: SequenceDataLoaderMapProvider
|
||||
dataset_map_provider_class_type: JsonIndexDatasetMapProvider
|
||||
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
||||
dataset_length_train: 1000
|
||||
dataset_length_val: 1
|
||||
num_workers: 8
|
||||
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
||||
dataset_root: ${oc.env:CO3D_DATASET_ROOT}
|
||||
n_frames_per_sequence: -1
|
||||
test_on_train: true
|
||||
test_restrict_sequence_id: 0
|
||||
dataset_JsonIndexDataset_args:
|
||||
load_point_clouds: false
|
||||
mask_depths: false
|
||||
mask_images: false
|
||||
generic_model_args:
|
||||
loss_weights:
|
||||
loss_mask_bce: 1.0
|
||||
loss_prev_stage_mask_bce: 1.0
|
||||
loss_autodecoder_norm: 0.01
|
||||
loss_rgb_mse: 1.0
|
||||
loss_prev_stage_rgb_mse: 1.0
|
||||
output_rasterized_mc: false
|
||||
chunk_size_grid: 102400
|
||||
render_image_height: 400
|
||||
render_image_width: 400
|
||||
num_passes: 2
|
||||
implicit_function_NeuralRadianceFieldImplicitFunction_args:
|
||||
n_harmonic_functions_xyz: 10
|
||||
n_harmonic_functions_dir: 4
|
||||
n_hidden_neurons_xyz: 256
|
||||
n_hidden_neurons_dir: 128
|
||||
n_layers_xyz: 8
|
||||
append_xyz:
|
||||
- 5
|
||||
latent_dim: 0
|
||||
raysampler_AdaptiveRaySampler_args:
|
||||
n_rays_per_image_sampled_from_mask: 1024
|
||||
scene_extent: 8.0
|
||||
n_pts_per_ray_training: 64
|
||||
n_pts_per_ray_evaluation: 64
|
||||
stratified_point_sampling_training: true
|
||||
stratified_point_sampling_evaluation: false
|
||||
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
||||
n_pts_per_ray_fine_training: 64
|
||||
n_pts_per_ray_fine_evaluation: 64
|
||||
append_coarse_samples_to_fine: true
|
||||
density_noise_std_train: 1.0
|
||||
view_pooler_args:
|
||||
view_sampler_args:
|
||||
masked_sampling: false
|
||||
image_feature_extractor_ResNetFeatureExtractor_args:
|
||||
stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
proj_dim: 16
|
||||
image_rescale: 0.32
|
||||
first_max_pool: false
|
||||
solver_args:
|
||||
breed: adam
|
||||
lr: 0.0005
|
||||
lr_policy: multistep
|
||||
max_epochs: 2000
|
||||
momentum: 0.9
|
||||
weight_decay: 0.0
|
||||
@@ -0,0 +1,17 @@
|
||||
generic_model_args:
|
||||
image_feature_extractor_class_type: ResNetFeatureExtractor
|
||||
image_feature_extractor_ResNetFeatureExtractor_args:
|
||||
add_images: true
|
||||
add_masks: true
|
||||
first_max_pool: true
|
||||
image_rescale: 0.375
|
||||
l2_norm: true
|
||||
name: resnet34
|
||||
normalize_image: true
|
||||
pretrained: true
|
||||
stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
proj_dim: 32
|
||||
@@ -0,0 +1,17 @@
|
||||
generic_model_args:
|
||||
image_feature_extractor_class_type: ResNetFeatureExtractor
|
||||
image_feature_extractor_ResNetFeatureExtractor_args:
|
||||
add_images: true
|
||||
add_masks: true
|
||||
first_max_pool: false
|
||||
image_rescale: 0.375
|
||||
l2_norm: true
|
||||
name: resnet34
|
||||
normalize_image: true
|
||||
pretrained: true
|
||||
stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
proj_dim: 16
|
||||
@@ -0,0 +1,18 @@
|
||||
generic_model_args:
|
||||
image_feature_extractor_class_type: ResNetFeatureExtractor
|
||||
image_feature_extractor_ResNetFeatureExtractor_args:
|
||||
stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
first_max_pool: false
|
||||
proj_dim: -1
|
||||
l2_norm: false
|
||||
image_rescale: 0.375
|
||||
name: resnet34
|
||||
normalize_image: true
|
||||
pretrained: true
|
||||
view_pooler_args:
|
||||
feature_aggregator_AngleWeightedReductionFeatureAggregator_args:
|
||||
reduction_functions:
|
||||
- AVG
|
||||
@@ -0,0 +1,35 @@
|
||||
defaults:
|
||||
- repro_base.yaml
|
||||
- _self_
|
||||
data_source_args:
|
||||
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
||||
batch_size: 10
|
||||
dataset_length_train: 1000
|
||||
dataset_length_val: 1
|
||||
num_workers: 8
|
||||
train_conditioning_type: SAME
|
||||
val_conditioning_type: SAME
|
||||
test_conditioning_type: SAME
|
||||
images_per_seq_options:
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- 6
|
||||
- 7
|
||||
- 8
|
||||
- 9
|
||||
- 10
|
||||
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
||||
assert_single_seq: false
|
||||
task_str: multisequence
|
||||
n_frames_per_sequence: -1
|
||||
test_on_train: true
|
||||
test_restrict_sequence_id: 0
|
||||
solver_args:
|
||||
max_epochs: 3000
|
||||
milestones:
|
||||
- 1000
|
||||
camera_difficulty_bin_breaks:
|
||||
- 0.666667
|
||||
- 0.833334
|
||||
@@ -0,0 +1,65 @@
|
||||
defaults:
|
||||
- repro_multiseq_base.yaml
|
||||
- _self_
|
||||
generic_model_args:
|
||||
loss_weights:
|
||||
loss_mask_bce: 100.0
|
||||
loss_kl: 0.0
|
||||
loss_rgb_mse: 1.0
|
||||
loss_eikonal: 0.1
|
||||
chunk_size_grid: 65536
|
||||
num_passes: 1
|
||||
output_rasterized_mc: true
|
||||
sampling_mode_training: mask_sample
|
||||
global_encoder_class_type: SequenceAutodecoder
|
||||
global_encoder_SequenceAutodecoder_args:
|
||||
autodecoder_args:
|
||||
n_instances: 20000
|
||||
init_scale: 1.0
|
||||
encoding_dim: 256
|
||||
implicit_function_IdrFeatureField_args:
|
||||
n_harmonic_functions_xyz: 6
|
||||
bias: 0.6
|
||||
d_in: 3
|
||||
d_out: 1
|
||||
dims:
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
geometric_init: true
|
||||
pooled_feature_dim: 0
|
||||
skip_in:
|
||||
- 6
|
||||
weight_norm: true
|
||||
renderer_SignedDistanceFunctionRenderer_args:
|
||||
ray_tracer_args:
|
||||
line_search_step: 0.5
|
||||
line_step_iters: 3
|
||||
n_secant_steps: 8
|
||||
n_steps: 100
|
||||
object_bounding_sphere: 8.0
|
||||
sdf_threshold: 5.0e-05
|
||||
ray_normal_coloring_network_args:
|
||||
d_in: 9
|
||||
d_out: 3
|
||||
dims:
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
mode: idr
|
||||
n_harmonic_functions_dir: 4
|
||||
pooled_feature_dim: 0
|
||||
weight_norm: true
|
||||
raysampler_AdaptiveRaySampler_args:
|
||||
n_rays_per_image_sampled_from_mask: 1024
|
||||
n_pts_per_ray_training: 0
|
||||
n_pts_per_ray_evaluation: 0
|
||||
scene_extent: 8.0
|
||||
renderer_class_type: SignedDistanceFunctionRenderer
|
||||
implicit_function_class_type: IdrFeatureField
|
||||
@@ -0,0 +1,11 @@
|
||||
defaults:
|
||||
- repro_multiseq_base.yaml
|
||||
- _self_
|
||||
generic_model_args:
|
||||
chunk_size_grid: 16000
|
||||
view_pooler_enabled: false
|
||||
global_encoder_class_type: SequenceAutodecoder
|
||||
global_encoder_SequenceAutodecoder_args:
|
||||
autodecoder_args:
|
||||
n_instances: 20000
|
||||
encoding_dim: 256
|
||||
@@ -0,0 +1,10 @@
|
||||
defaults:
|
||||
- repro_multiseq_base.yaml
|
||||
- repro_feat_extractor_unnormed.yaml
|
||||
- _self_
|
||||
clip_grad: 1.0
|
||||
generic_model_args:
|
||||
chunk_size_grid: 16000
|
||||
view_pooler_enabled: true
|
||||
raysampler_AdaptiveRaySampler_args:
|
||||
n_rays_per_image_sampled_from_mask: 850
|
||||
@@ -0,0 +1,17 @@
|
||||
defaults:
|
||||
- repro_multiseq_base.yaml
|
||||
- repro_feat_extractor_transformer.yaml
|
||||
- _self_
|
||||
generic_model_args:
|
||||
chunk_size_grid: 16000
|
||||
raysampler_AdaptiveRaySampler_args:
|
||||
n_rays_per_image_sampled_from_mask: 800
|
||||
n_pts_per_ray_training: 32
|
||||
n_pts_per_ray_evaluation: 32
|
||||
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
||||
n_pts_per_ray_fine_training: 16
|
||||
n_pts_per_ray_fine_evaluation: 16
|
||||
implicit_function_class_type: NeRFormerImplicitFunction
|
||||
view_pooler_enabled: true
|
||||
view_pooler_args:
|
||||
feature_aggregator_class_type: IdentityFeatureAggregator
|
||||
@@ -0,0 +1,6 @@
|
||||
defaults:
|
||||
- repro_multiseq_nerformer.yaml
|
||||
- _self_
|
||||
generic_model_args:
|
||||
view_pooler_args:
|
||||
feature_aggregator_class_type: AngleWeightedIdentityFeatureAggregator
|
||||
@@ -0,0 +1,34 @@
|
||||
defaults:
|
||||
- repro_multiseq_base.yaml
|
||||
- _self_
|
||||
generic_model_args:
|
||||
chunk_size_grid: 16000
|
||||
view_pooler_enabled: false
|
||||
n_train_target_views: -1
|
||||
num_passes: 1
|
||||
loss_weights:
|
||||
loss_rgb_mse: 200.0
|
||||
loss_prev_stage_rgb_mse: 0.0
|
||||
loss_mask_bce: 1.0
|
||||
loss_prev_stage_mask_bce: 0.0
|
||||
loss_autodecoder_norm: 0.001
|
||||
depth_neg_penalty: 10000.0
|
||||
global_encoder_class_type: SequenceAutodecoder
|
||||
global_encoder_SequenceAutodecoder_args:
|
||||
autodecoder_args:
|
||||
encoding_dim: 256
|
||||
n_instances: 20000
|
||||
raysampler_class_type: NearFarRaySampler
|
||||
raysampler_NearFarRaySampler_args:
|
||||
n_rays_per_image_sampled_from_mask: 2048
|
||||
min_depth: 0.05
|
||||
max_depth: 0.05
|
||||
n_pts_per_ray_training: 1
|
||||
n_pts_per_ray_evaluation: 1
|
||||
stratified_point_sampling_training: false
|
||||
stratified_point_sampling_evaluation: false
|
||||
renderer_class_type: LSTMRenderer
|
||||
implicit_function_class_type: SRNHyperNetImplicitFunction
|
||||
solver_args:
|
||||
breed: adam
|
||||
lr: 5.0e-05
|
||||
@@ -0,0 +1,10 @@
|
||||
defaults:
|
||||
- repro_multiseq_srn_ad_hypernet.yaml
|
||||
- _self_
|
||||
generic_model_args:
|
||||
num_passes: 1
|
||||
implicit_function_SRNHyperNetImplicitFunction_args:
|
||||
pixel_generator_args:
|
||||
n_harmonic_functions: 0
|
||||
hypernet_args:
|
||||
n_harmonic_functions: 0
|
||||
@@ -0,0 +1,30 @@
|
||||
defaults:
|
||||
- repro_multiseq_base.yaml
|
||||
- repro_feat_extractor_normed.yaml
|
||||
- _self_
|
||||
generic_model_args:
|
||||
chunk_size_grid: 32000
|
||||
num_passes: 1
|
||||
n_train_target_views: -1
|
||||
loss_weights:
|
||||
loss_rgb_mse: 200.0
|
||||
loss_prev_stage_rgb_mse: 0.0
|
||||
loss_mask_bce: 1.0
|
||||
loss_prev_stage_mask_bce: 0.0
|
||||
loss_autodecoder_norm: 0.0
|
||||
depth_neg_penalty: 10000.0
|
||||
raysampler_class_type: NearFarRaySampler
|
||||
raysampler_NearFarRaySampler_args:
|
||||
n_rays_per_image_sampled_from_mask: 2048
|
||||
min_depth: 0.05
|
||||
max_depth: 0.05
|
||||
n_pts_per_ray_training: 1
|
||||
n_pts_per_ray_evaluation: 1
|
||||
stratified_point_sampling_training: false
|
||||
stratified_point_sampling_evaluation: false
|
||||
renderer_class_type: LSTMRenderer
|
||||
implicit_function_class_type: SRNImplicitFunction
|
||||
view_pooler_enabled: true
|
||||
solver_args:
|
||||
breed: adam
|
||||
lr: 5.0e-05
|
||||
@@ -0,0 +1,10 @@
|
||||
defaults:
|
||||
- repro_multiseq_srn_wce.yaml
|
||||
- _self_
|
||||
generic_model_args:
|
||||
num_passes: 1
|
||||
implicit_function_SRNImplicitFunction_args:
|
||||
pixel_generator_args:
|
||||
n_harmonic_functions: 0
|
||||
raymarch_function_args:
|
||||
n_harmonic_functions: 0
|
||||
@@ -0,0 +1,39 @@
|
||||
defaults:
|
||||
- repro_base
|
||||
- _self_
|
||||
data_source_args:
|
||||
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
||||
batch_size: 1
|
||||
dataset_length_train: 1000
|
||||
dataset_length_val: 1
|
||||
num_workers: 8
|
||||
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
||||
assert_single_seq: true
|
||||
n_frames_per_sequence: -1
|
||||
test_restrict_sequence_id: 0
|
||||
test_on_train: false
|
||||
generic_model_args:
|
||||
render_image_height: 800
|
||||
render_image_width: 800
|
||||
log_vars:
|
||||
- loss_rgb_psnr_fg
|
||||
- loss_rgb_psnr
|
||||
- loss_eikonal
|
||||
- loss_prev_stage_rgb_psnr
|
||||
- loss_mask_bce
|
||||
- loss_prev_stage_mask_bce
|
||||
- loss_rgb_mse
|
||||
- loss_prev_stage_rgb_mse
|
||||
- loss_depth_abs
|
||||
- loss_depth_abs_fg
|
||||
- loss_kl
|
||||
- loss_mask_neg_iou
|
||||
- objective
|
||||
- epoch
|
||||
- sec/it
|
||||
solver_args:
|
||||
lr: 0.0005
|
||||
max_epochs: 400
|
||||
milestones:
|
||||
- 200
|
||||
- 300
|
||||
@@ -0,0 +1,57 @@
|
||||
defaults:
|
||||
- repro_singleseq_base
|
||||
- _self_
|
||||
generic_model_args:
|
||||
loss_weights:
|
||||
loss_mask_bce: 100.0
|
||||
loss_kl: 0.0
|
||||
loss_rgb_mse: 1.0
|
||||
loss_eikonal: 0.1
|
||||
chunk_size_grid: 65536
|
||||
num_passes: 1
|
||||
view_pooler_enabled: false
|
||||
implicit_function_IdrFeatureField_args:
|
||||
n_harmonic_functions_xyz: 6
|
||||
bias: 0.6
|
||||
d_in: 3
|
||||
d_out: 1
|
||||
dims:
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
geometric_init: true
|
||||
pooled_feature_dim: 0
|
||||
skip_in:
|
||||
- 6
|
||||
weight_norm: true
|
||||
renderer_SignedDistanceFunctionRenderer_args:
|
||||
ray_tracer_args:
|
||||
line_search_step: 0.5
|
||||
line_step_iters: 3
|
||||
n_secant_steps: 8
|
||||
n_steps: 100
|
||||
object_bounding_sphere: 8.0
|
||||
sdf_threshold: 5.0e-05
|
||||
ray_normal_coloring_network_args:
|
||||
d_in: 9
|
||||
d_out: 3
|
||||
dims:
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
mode: idr
|
||||
n_harmonic_functions_dir: 4
|
||||
pooled_feature_dim: 0
|
||||
weight_norm: true
|
||||
raysampler_AdaptiveRaySampler_args:
|
||||
n_rays_per_image_sampled_from_mask: 1024
|
||||
n_pts_per_ray_training: 0
|
||||
n_pts_per_ray_evaluation: 0
|
||||
renderer_class_type: SignedDistanceFunctionRenderer
|
||||
implicit_function_class_type: IdrFeatureField
|
||||
@@ -0,0 +1,3 @@
|
||||
defaults:
|
||||
- repro_singleseq_base
|
||||
- _self_
|
||||
@@ -0,0 +1,9 @@
|
||||
defaults:
|
||||
- repro_singleseq_wce_base.yaml
|
||||
- repro_feat_extractor_unnormed.yaml
|
||||
- _self_
|
||||
generic_model_args:
|
||||
chunk_size_grid: 16000
|
||||
view_pooler_enabled: true
|
||||
raysampler_AdaptiveRaySampler_args:
|
||||
n_rays_per_image_sampled_from_mask: 850
|
||||
@@ -0,0 +1,17 @@
|
||||
defaults:
|
||||
- repro_singleseq_wce_base.yaml
|
||||
- repro_feat_extractor_transformer.yaml
|
||||
- _self_
|
||||
generic_model_args:
|
||||
chunk_size_grid: 16000
|
||||
view_pooler_enabled: true
|
||||
implicit_function_class_type: NeRFormerImplicitFunction
|
||||
raysampler_AdaptiveRaySampler_args:
|
||||
n_rays_per_image_sampled_from_mask: 800
|
||||
n_pts_per_ray_training: 32
|
||||
n_pts_per_ray_evaluation: 32
|
||||
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
||||
n_pts_per_ray_fine_training: 16
|
||||
n_pts_per_ray_fine_evaluation: 16
|
||||
view_pooler_args:
|
||||
feature_aggregator_class_type: IdentityFeatureAggregator
|
||||
@@ -0,0 +1,28 @@
|
||||
defaults:
|
||||
- repro_singleseq_base.yaml
|
||||
- _self_
|
||||
generic_model_args:
|
||||
num_passes: 1
|
||||
chunk_size_grid: 32000
|
||||
view_pooler_enabled: false
|
||||
loss_weights:
|
||||
loss_rgb_mse: 200.0
|
||||
loss_prev_stage_rgb_mse: 0.0
|
||||
loss_mask_bce: 1.0
|
||||
loss_prev_stage_mask_bce: 0.0
|
||||
loss_autodecoder_norm: 0.0
|
||||
depth_neg_penalty: 10000.0
|
||||
raysampler_class_type: NearFarRaySampler
|
||||
raysampler_NearFarRaySampler_args:
|
||||
n_rays_per_image_sampled_from_mask: 2048
|
||||
min_depth: 0.05
|
||||
max_depth: 0.05
|
||||
n_pts_per_ray_training: 1
|
||||
n_pts_per_ray_evaluation: 1
|
||||
stratified_point_sampling_training: false
|
||||
stratified_point_sampling_evaluation: false
|
||||
renderer_class_type: LSTMRenderer
|
||||
implicit_function_class_type: SRNImplicitFunction
|
||||
solver_args:
|
||||
breed: adam
|
||||
lr: 5.0e-05
|
||||
@@ -0,0 +1,10 @@
|
||||
defaults:
|
||||
- repro_singleseq_srn.yaml
|
||||
- _self_
|
||||
generic_model_args:
|
||||
num_passes: 1
|
||||
implicit_function_SRNImplicitFunction_args:
|
||||
pixel_generator_args:
|
||||
n_harmonic_functions: 0
|
||||
raymarch_function_args:
|
||||
n_harmonic_functions: 0
|
||||
@@ -0,0 +1,29 @@
|
||||
defaults:
|
||||
- repro_singleseq_wce_base
|
||||
- repro_feat_extractor_normed.yaml
|
||||
- _self_
|
||||
generic_model_args:
|
||||
num_passes: 1
|
||||
chunk_size_grid: 32000
|
||||
view_pooler_enabled: true
|
||||
loss_weights:
|
||||
loss_rgb_mse: 200.0
|
||||
loss_prev_stage_rgb_mse: 0.0
|
||||
loss_mask_bce: 1.0
|
||||
loss_prev_stage_mask_bce: 0.0
|
||||
loss_autodecoder_norm: 0.0
|
||||
depth_neg_penalty: 10000.0
|
||||
raysampler_class_type: NearFarRaySampler
|
||||
raysampler_NearFarRaySampler_args:
|
||||
n_rays_per_image_sampled_from_mask: 2048
|
||||
min_depth: 0.05
|
||||
max_depth: 0.05
|
||||
n_pts_per_ray_training: 1
|
||||
n_pts_per_ray_evaluation: 1
|
||||
stratified_point_sampling_training: false
|
||||
stratified_point_sampling_evaluation: false
|
||||
renderer_class_type: LSTMRenderer
|
||||
implicit_function_class_type: SRNImplicitFunction
|
||||
solver_args:
|
||||
breed: adam
|
||||
lr: 5.0e-05
|
||||
@@ -0,0 +1,10 @@
|
||||
defaults:
|
||||
- repro_singleseq_srn_wce.yaml
|
||||
- _self_
|
||||
generic_model_args:
|
||||
num_passes: 1
|
||||
implicit_function_SRNImplicitFunction_args:
|
||||
pixel_generator_args:
|
||||
n_harmonic_functions: 0
|
||||
raymarch_function_args:
|
||||
n_harmonic_functions: 0
|
||||
@@ -0,0 +1,22 @@
|
||||
defaults:
|
||||
- repro_singleseq_base
|
||||
- _self_
|
||||
data_source_args:
|
||||
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
||||
batch_size: 10
|
||||
dataset_length_train: 1000
|
||||
dataset_length_val: 1
|
||||
num_workers: 8
|
||||
train_conditioning_type: SAME
|
||||
val_conditioning_type: SAME
|
||||
test_conditioning_type: SAME
|
||||
images_per_seq_options:
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
- 5
|
||||
- 6
|
||||
- 7
|
||||
- 8
|
||||
- 9
|
||||
- 10
|
||||
706
projects/implicitron_trainer/experiment.py
Executable file
706
projects/implicitron_trainer/experiment.py
Executable file
@@ -0,0 +1,706 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
""""
|
||||
This file is the entry point for launching experiments with Implicitron.
|
||||
|
||||
Main functions
|
||||
---------------
|
||||
- `run_training` is the wrapper for the train, val, test loops
|
||||
and checkpointing
|
||||
- `trainvalidate` is the inner loop which runs the model forward/backward
|
||||
pass, visualizations and metric printing
|
||||
|
||||
Launch Training
|
||||
---------------
|
||||
Experiment config .yaml files are located in the
|
||||
`projects/implicitron_trainer/configs` folder. To launch
|
||||
an experiment, specify the name of the file. Specific config values can
|
||||
also be overridden from the command line, for example:
|
||||
|
||||
```
|
||||
./experiment.py --config-name base_config.yaml override.param.one=42 override.param.two=84
|
||||
```
|
||||
|
||||
To run an experiment on a specific GPU, specify the `gpu_idx` key
|
||||
in the config file / CLI. To run on a different device, specify the
|
||||
device in `run_training`.
|
||||
|
||||
Outputs
|
||||
--------
|
||||
The outputs of the experiment are saved and logged in multiple ways:
|
||||
- Checkpoints:
|
||||
Model, optimizer and stats are stored in the directory
|
||||
named by the `exp_dir` key from the config file / CLI parameters.
|
||||
- Stats
|
||||
Stats are logged and plotted to the file "train_stats.pdf" in the
|
||||
same directory. The stats are also saved as part of the checkpoint file.
|
||||
- Visualizations
|
||||
Prredictions are plotted to a visdom server running at the
|
||||
port specified by the `visdom_server` and `visdom_port` keys in the
|
||||
config file.
|
||||
|
||||
"""
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
import warnings
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
|
||||
import hydra
|
||||
import lpips
|
||||
import numpy as np
|
||||
import torch
|
||||
import tqdm
|
||||
from accelerate import Accelerator
|
||||
from omegaconf import DictConfig, OmegaConf
|
||||
from packaging import version
|
||||
from pytorch3d.implicitron.dataset import utils as ds_utils
|
||||
from pytorch3d.implicitron.dataset.data_loader_map_provider import DataLoaderMap
|
||||
from pytorch3d.implicitron.dataset.data_source import ImplicitronDataSource, Task
|
||||
from pytorch3d.implicitron.dataset.dataset_map_provider import DatasetMap
|
||||
from pytorch3d.implicitron.evaluation import evaluate_new_view_synthesis as evaluate
|
||||
from pytorch3d.implicitron.models.generic_model import EvaluationMode, GenericModel
|
||||
from pytorch3d.implicitron.models.renderer.multipass_ea import (
|
||||
MultiPassEmissionAbsorptionRenderer,
|
||||
)
|
||||
from pytorch3d.implicitron.models.renderer.ray_sampler import AdaptiveRaySampler
|
||||
from pytorch3d.implicitron.tools import model_io, vis_utils
|
||||
from pytorch3d.implicitron.tools.config import (
|
||||
expand_args_fields,
|
||||
remove_unused_components,
|
||||
)
|
||||
from pytorch3d.implicitron.tools.stats import Stats
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
|
||||
from .impl.experiment_config import ExperimentConfig
|
||||
from .impl.optimization import init_optimizer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if version.parse(hydra.__version__) < version.Version("1.1"):
|
||||
raise ValueError(
|
||||
f"Hydra version {hydra.__version__} is too old."
|
||||
" (Implicitron requires version 1.1 or later.)"
|
||||
)
|
||||
|
||||
try:
|
||||
# only makes sense in FAIR cluster
|
||||
import pytorch3d.implicitron.fair_cluster.slurm # noqa: F401
|
||||
except ModuleNotFoundError:
|
||||
pass
|
||||
|
||||
no_accelerate = os.environ.get("PYTORCH3D_NO_ACCELERATE") is not None
|
||||
|
||||
|
||||
def init_model(
|
||||
*,
|
||||
cfg: DictConfig,
|
||||
accelerator: Optional[Accelerator] = None,
|
||||
force_load: bool = False,
|
||||
clear_stats: bool = False,
|
||||
load_model_only: bool = False,
|
||||
) -> Tuple[GenericModel, Stats, Optional[Dict[str, Any]]]:
|
||||
"""
|
||||
Returns an instance of `GenericModel`.
|
||||
|
||||
If `cfg.resume` is set or `force_load` is true,
|
||||
attempts to load the last checkpoint from `cfg.exp_dir`. Failure to do so
|
||||
will return the model with initial weights, unless `force_load` is passed,
|
||||
in which case a FileNotFoundError is raised.
|
||||
|
||||
Args:
|
||||
force_load: If true, force load model from checkpoint even if
|
||||
cfg.resume is false.
|
||||
clear_stats: If true, clear the stats object loaded from checkpoint
|
||||
load_model_only: If true, load only the model weights from checkpoint
|
||||
and do not load the state of the optimizer and stats.
|
||||
|
||||
Returns:
|
||||
model: The model with optionally loaded weights from checkpoint
|
||||
stats: The stats structure (optionally loaded from checkpoint)
|
||||
optimizer_state: The optimizer state dict containing
|
||||
`state` and `param_groups` keys (optionally loaded from checkpoint)
|
||||
|
||||
Raise:
|
||||
FileNotFoundError if `force_load` is passed but checkpoint is not found.
|
||||
"""
|
||||
|
||||
# Initialize the model
|
||||
if cfg.architecture == "generic":
|
||||
model = GenericModel(**cfg.generic_model_args)
|
||||
else:
|
||||
raise ValueError(f"No such arch {cfg.architecture}.")
|
||||
|
||||
# Determine the network outputs that should be logged
|
||||
if hasattr(model, "log_vars"):
|
||||
log_vars = copy.deepcopy(list(model.log_vars))
|
||||
else:
|
||||
log_vars = ["objective"]
|
||||
|
||||
visdom_env_charts = vis_utils.get_visdom_env(cfg) + "_charts"
|
||||
|
||||
# Init the stats struct
|
||||
stats = Stats(
|
||||
log_vars,
|
||||
visdom_env=visdom_env_charts,
|
||||
verbose=False,
|
||||
visdom_server=cfg.visdom_server,
|
||||
visdom_port=cfg.visdom_port,
|
||||
)
|
||||
|
||||
# Retrieve the last checkpoint
|
||||
if cfg.resume_epoch > 0:
|
||||
model_path = model_io.get_checkpoint(cfg.exp_dir, cfg.resume_epoch)
|
||||
else:
|
||||
model_path = model_io.find_last_checkpoint(cfg.exp_dir)
|
||||
|
||||
optimizer_state = None
|
||||
if model_path is not None:
|
||||
logger.info("found previous model %s" % model_path)
|
||||
if force_load or cfg.resume:
|
||||
logger.info(" -> resuming")
|
||||
|
||||
map_location = None
|
||||
if accelerator is not None and not accelerator.is_local_main_process:
|
||||
map_location = {
|
||||
"cuda:%d" % 0: "cuda:%d" % accelerator.local_process_index
|
||||
}
|
||||
if load_model_only:
|
||||
model_state_dict = torch.load(
|
||||
model_io.get_model_path(model_path), map_location=map_location
|
||||
)
|
||||
stats_load, optimizer_state = None, None
|
||||
else:
|
||||
model_state_dict, stats_load, optimizer_state = model_io.load_model(
|
||||
model_path, map_location=map_location
|
||||
)
|
||||
|
||||
# Determine if stats should be reset
|
||||
if not clear_stats:
|
||||
if stats_load is None:
|
||||
logger.info("\n\n\n\nCORRUPT STATS -> clearing stats\n\n\n\n")
|
||||
last_epoch = model_io.parse_epoch_from_model_path(model_path)
|
||||
logger.info(f"Estimated resume epoch = {last_epoch}")
|
||||
|
||||
# Reset the stats struct
|
||||
for _ in range(last_epoch + 1):
|
||||
stats.new_epoch()
|
||||
assert last_epoch == stats.epoch
|
||||
else:
|
||||
stats = stats_load
|
||||
|
||||
# Update stats properties incase it was reset on load
|
||||
stats.visdom_env = visdom_env_charts
|
||||
stats.visdom_server = cfg.visdom_server
|
||||
stats.visdom_port = cfg.visdom_port
|
||||
stats.plot_file = os.path.join(cfg.exp_dir, "train_stats.pdf")
|
||||
stats.synchronize_logged_vars(log_vars)
|
||||
else:
|
||||
logger.info(" -> clearing stats")
|
||||
|
||||
try:
|
||||
# TODO: fix on creation of the buffers
|
||||
# after the hack above, this will not pass in most cases
|
||||
# ... but this is fine for now
|
||||
model.load_state_dict(model_state_dict, strict=True)
|
||||
except RuntimeError as e:
|
||||
logger.error(e)
|
||||
logger.info("Cant load state dict in strict mode! -> trying non-strict")
|
||||
model.load_state_dict(model_state_dict, strict=False)
|
||||
model.log_vars = log_vars
|
||||
else:
|
||||
logger.info(" -> but not resuming -> starting from scratch")
|
||||
elif force_load:
|
||||
raise FileNotFoundError(f"Cannot find a checkpoint in {cfg.exp_dir}!")
|
||||
|
||||
return model, stats, optimizer_state
|
||||
|
||||
|
||||
def trainvalidate(
|
||||
model,
|
||||
stats,
|
||||
epoch,
|
||||
loader,
|
||||
optimizer,
|
||||
validation: bool,
|
||||
*,
|
||||
accelerator: Optional[Accelerator],
|
||||
device: torch.device,
|
||||
bp_var: str = "objective",
|
||||
metric_print_interval: int = 5,
|
||||
visualize_interval: int = 100,
|
||||
visdom_env_root: str = "trainvalidate",
|
||||
clip_grad: float = 0.0,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""
|
||||
This is the main loop for training and evaluation including:
|
||||
model forward pass, loss computation, backward pass and visualization.
|
||||
|
||||
Args:
|
||||
model: The model module optionally loaded from checkpoint
|
||||
stats: The stats struct, also optionally loaded from checkpoint
|
||||
epoch: The index of the current epoch
|
||||
loader: The dataloader to use for the loop
|
||||
optimizer: The optimizer module optionally loaded from checkpoint
|
||||
validation: If true, run the loop with the model in eval mode
|
||||
and skip the backward pass
|
||||
bp_var: The name of the key in the model output `preds` dict which
|
||||
should be used as the loss for the backward pass.
|
||||
metric_print_interval: The batch interval at which the stats should be
|
||||
logged.
|
||||
visualize_interval: The batch interval at which the visualizations
|
||||
should be plotted
|
||||
visdom_env_root: The name of the visdom environment to use for plotting
|
||||
clip_grad: Optionally clip the gradient norms.
|
||||
If set to a value <=0.0, no clipping
|
||||
device: The device on which to run the model.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
if validation:
|
||||
model.eval()
|
||||
trainmode = "val"
|
||||
else:
|
||||
model.train()
|
||||
trainmode = "train"
|
||||
|
||||
t_start = time.time()
|
||||
|
||||
# get the visdom env name
|
||||
visdom_env_imgs = visdom_env_root + "_images_" + trainmode
|
||||
viz = vis_utils.get_visdom_connection(
|
||||
server=stats.visdom_server,
|
||||
port=stats.visdom_port,
|
||||
)
|
||||
|
||||
# Iterate through the batches
|
||||
n_batches = len(loader)
|
||||
for it, net_input in enumerate(loader):
|
||||
last_iter = it == n_batches - 1
|
||||
|
||||
# move to gpu where possible (in place)
|
||||
net_input = net_input.to(device)
|
||||
|
||||
# run the forward pass
|
||||
if not validation:
|
||||
optimizer.zero_grad()
|
||||
preds = model(**{**net_input, "evaluation_mode": EvaluationMode.TRAINING})
|
||||
else:
|
||||
with torch.no_grad():
|
||||
preds = model(
|
||||
**{**net_input, "evaluation_mode": EvaluationMode.EVALUATION}
|
||||
)
|
||||
|
||||
# make sure we dont overwrite something
|
||||
assert all(k not in preds for k in net_input.keys())
|
||||
# merge everything into one big dict
|
||||
preds.update(net_input)
|
||||
|
||||
# update the stats logger
|
||||
stats.update(preds, time_start=t_start, stat_set=trainmode)
|
||||
assert stats.it[trainmode] == it, "inconsistent stat iteration number!"
|
||||
|
||||
# print textual status update
|
||||
if it % metric_print_interval == 0 or last_iter:
|
||||
stats.print(stat_set=trainmode, max_it=n_batches)
|
||||
|
||||
# visualize results
|
||||
if (
|
||||
(accelerator is None or accelerator.is_local_main_process)
|
||||
and visualize_interval > 0
|
||||
and it % visualize_interval == 0
|
||||
):
|
||||
prefix = f"e{stats.epoch}_it{stats.it[trainmode]}"
|
||||
|
||||
model.visualize(
|
||||
viz,
|
||||
visdom_env_imgs,
|
||||
preds,
|
||||
prefix,
|
||||
)
|
||||
|
||||
# optimizer step
|
||||
if not validation:
|
||||
loss = preds[bp_var]
|
||||
assert torch.isfinite(loss).all(), "Non-finite loss!"
|
||||
# backprop
|
||||
if accelerator is None:
|
||||
loss.backward()
|
||||
else:
|
||||
accelerator.backward(loss)
|
||||
if clip_grad > 0.0:
|
||||
# Optionally clip the gradient norms.
|
||||
total_norm = torch.nn.utils.clip_grad_norm(
|
||||
model.parameters(), clip_grad
|
||||
)
|
||||
if total_norm > clip_grad:
|
||||
logger.info(
|
||||
f"Clipping gradient: {total_norm}"
|
||||
+ f" with coef {clip_grad / float(total_norm)}."
|
||||
)
|
||||
|
||||
optimizer.step()
|
||||
|
||||
|
||||
def run_training(cfg: DictConfig) -> None:
|
||||
"""
|
||||
Entry point to run the training and validation loops
|
||||
based on the specified config file.
|
||||
"""
|
||||
|
||||
# Initialize the accelerator
|
||||
if no_accelerate:
|
||||
accelerator = None
|
||||
device = torch.device("cuda:0")
|
||||
else:
|
||||
accelerator = Accelerator(device_placement=False)
|
||||
logger.info(accelerator.state)
|
||||
device = accelerator.device
|
||||
|
||||
logger.info(f"Running experiment on device: {device}")
|
||||
|
||||
# set the debug mode
|
||||
if cfg.detect_anomaly:
|
||||
logger.info("Anomaly detection!")
|
||||
torch.autograd.set_detect_anomaly(cfg.detect_anomaly)
|
||||
|
||||
# create the output folder
|
||||
os.makedirs(cfg.exp_dir, exist_ok=True)
|
||||
_seed_all_random_engines(cfg.seed)
|
||||
remove_unused_components(cfg)
|
||||
|
||||
# dump the exp config to the exp dir
|
||||
try:
|
||||
cfg_filename = os.path.join(cfg.exp_dir, "expconfig.yaml")
|
||||
OmegaConf.save(config=cfg, f=cfg_filename)
|
||||
except PermissionError:
|
||||
warnings.warn("Cant dump config due to insufficient permissions!")
|
||||
|
||||
# setup datasets
|
||||
datasource = ImplicitronDataSource(**cfg.data_source_args)
|
||||
datasets, dataloaders = datasource.get_datasets_and_dataloaders()
|
||||
task = datasource.get_task()
|
||||
|
||||
# init the model
|
||||
model, stats, optimizer_state = init_model(cfg=cfg, accelerator=accelerator)
|
||||
start_epoch = stats.epoch + 1
|
||||
|
||||
# move model to gpu
|
||||
model.to(device)
|
||||
|
||||
# only run evaluation on the test dataloader
|
||||
if cfg.eval_only:
|
||||
_eval_and_dump(
|
||||
cfg,
|
||||
task,
|
||||
datasource.all_train_cameras,
|
||||
datasets,
|
||||
dataloaders,
|
||||
model,
|
||||
stats,
|
||||
device=device,
|
||||
)
|
||||
return
|
||||
|
||||
# init the optimizer
|
||||
optimizer, scheduler = init_optimizer(
|
||||
model,
|
||||
optimizer_state=optimizer_state,
|
||||
last_epoch=start_epoch,
|
||||
**cfg.solver_args,
|
||||
)
|
||||
|
||||
# check the scheduler and stats have been initialized correctly
|
||||
assert scheduler.last_epoch == stats.epoch + 1
|
||||
assert scheduler.last_epoch == start_epoch
|
||||
|
||||
# Wrap all modules in the distributed library
|
||||
# Note: we don't pass the scheduler to prepare as it
|
||||
# doesn't need to be stepped at each optimizer step
|
||||
train_loader = dataloaders.train
|
||||
val_loader = dataloaders.val
|
||||
if accelerator is not None:
|
||||
(
|
||||
model,
|
||||
optimizer,
|
||||
train_loader,
|
||||
val_loader,
|
||||
) = accelerator.prepare(model, optimizer, train_loader, val_loader)
|
||||
|
||||
past_scheduler_lrs = []
|
||||
# loop through epochs
|
||||
for epoch in range(start_epoch, cfg.solver_args.max_epochs):
|
||||
# automatic new_epoch and plotting of stats at every epoch start
|
||||
with stats:
|
||||
|
||||
# Make sure to re-seed random generators to ensure reproducibility
|
||||
# even after restart.
|
||||
_seed_all_random_engines(cfg.seed + epoch)
|
||||
|
||||
cur_lr = float(scheduler.get_last_lr()[-1])
|
||||
logger.info(f"scheduler lr = {cur_lr:1.2e}")
|
||||
past_scheduler_lrs.append(cur_lr)
|
||||
|
||||
# train loop
|
||||
trainvalidate(
|
||||
model,
|
||||
stats,
|
||||
epoch,
|
||||
train_loader,
|
||||
optimizer,
|
||||
False,
|
||||
visdom_env_root=vis_utils.get_visdom_env(cfg),
|
||||
device=device,
|
||||
accelerator=accelerator,
|
||||
**cfg,
|
||||
)
|
||||
|
||||
# val loop (optional)
|
||||
if val_loader is not None and epoch % cfg.validation_interval == 0:
|
||||
trainvalidate(
|
||||
model,
|
||||
stats,
|
||||
epoch,
|
||||
val_loader,
|
||||
optimizer,
|
||||
True,
|
||||
visdom_env_root=vis_utils.get_visdom_env(cfg),
|
||||
device=device,
|
||||
accelerator=accelerator,
|
||||
**cfg,
|
||||
)
|
||||
|
||||
# eval loop (optional)
|
||||
if (
|
||||
dataloaders.test is not None
|
||||
and cfg.test_interval > 0
|
||||
and epoch % cfg.test_interval == 0
|
||||
):
|
||||
_run_eval(
|
||||
model,
|
||||
datasource.all_train_cameras,
|
||||
dataloaders.test,
|
||||
task,
|
||||
camera_difficulty_bin_breaks=cfg.camera_difficulty_bin_breaks,
|
||||
device=device,
|
||||
)
|
||||
|
||||
assert stats.epoch == epoch, "inconsistent stats!"
|
||||
|
||||
# delete previous models if required
|
||||
# save model only on the main process
|
||||
if cfg.store_checkpoints and (
|
||||
accelerator is None or accelerator.is_local_main_process
|
||||
):
|
||||
if cfg.store_checkpoints_purge > 0:
|
||||
for prev_epoch in range(epoch - cfg.store_checkpoints_purge):
|
||||
model_io.purge_epoch(cfg.exp_dir, prev_epoch)
|
||||
outfile = model_io.get_checkpoint(cfg.exp_dir, epoch)
|
||||
unwrapped_model = (
|
||||
model if accelerator is None else accelerator.unwrap_model(model)
|
||||
)
|
||||
model_io.safe_save_model(
|
||||
unwrapped_model, stats, outfile, optimizer=optimizer
|
||||
)
|
||||
|
||||
scheduler.step()
|
||||
|
||||
new_lr = float(scheduler.get_last_lr()[-1])
|
||||
if new_lr != cur_lr:
|
||||
logger.info(f"LR change! {cur_lr} -> {new_lr}")
|
||||
|
||||
if cfg.test_when_finished:
|
||||
_eval_and_dump(
|
||||
cfg,
|
||||
task,
|
||||
datasource.all_train_cameras,
|
||||
datasets,
|
||||
dataloaders,
|
||||
model,
|
||||
stats,
|
||||
device=device,
|
||||
)
|
||||
|
||||
|
||||
def _eval_and_dump(
|
||||
cfg,
|
||||
task: Task,
|
||||
all_train_cameras: Optional[CamerasBase],
|
||||
datasets: DatasetMap,
|
||||
dataloaders: DataLoaderMap,
|
||||
model,
|
||||
stats,
|
||||
device,
|
||||
) -> None:
|
||||
"""
|
||||
Run the evaluation loop with the test data loader and
|
||||
save the predictions to the `exp_dir`.
|
||||
"""
|
||||
|
||||
dataloader = dataloaders.test
|
||||
|
||||
if dataloader is None:
|
||||
raise ValueError('DataLoaderMap have to contain the "test" entry for eval!')
|
||||
|
||||
results = _run_eval(
|
||||
model,
|
||||
all_train_cameras,
|
||||
dataloader,
|
||||
task,
|
||||
camera_difficulty_bin_breaks=cfg.camera_difficulty_bin_breaks,
|
||||
device=device,
|
||||
)
|
||||
|
||||
# add the evaluation epoch to the results
|
||||
for r in results:
|
||||
r["eval_epoch"] = int(stats.epoch)
|
||||
|
||||
logger.info("Evaluation results")
|
||||
evaluate.pretty_print_nvs_metrics(results)
|
||||
|
||||
with open(os.path.join(cfg.exp_dir, "results_test.json"), "w") as f:
|
||||
json.dump(results, f)
|
||||
|
||||
|
||||
def _get_eval_frame_data(frame_data):
|
||||
"""
|
||||
Masks the unknown image data to make sure we cannot use it at model evaluation time.
|
||||
"""
|
||||
frame_data_for_eval = copy.deepcopy(frame_data)
|
||||
is_known = ds_utils.is_known_frame(frame_data.frame_type).type_as(
|
||||
frame_data.image_rgb
|
||||
)[:, None, None, None]
|
||||
for k in ("image_rgb", "depth_map", "fg_probability", "mask_crop"):
|
||||
value_masked = getattr(frame_data_for_eval, k).clone() * is_known
|
||||
setattr(frame_data_for_eval, k, value_masked)
|
||||
return frame_data_for_eval
|
||||
|
||||
|
||||
def _run_eval(
|
||||
model,
|
||||
all_train_cameras,
|
||||
loader,
|
||||
task: Task,
|
||||
camera_difficulty_bin_breaks: Tuple[float, float],
|
||||
device,
|
||||
):
|
||||
"""
|
||||
Run the evaluation loop on the test dataloader
|
||||
"""
|
||||
lpips_model = lpips.LPIPS(net="vgg")
|
||||
lpips_model = lpips_model.to(device)
|
||||
|
||||
model.eval()
|
||||
|
||||
per_batch_eval_results = []
|
||||
logger.info("Evaluating model ...")
|
||||
for frame_data in tqdm.tqdm(loader):
|
||||
frame_data = frame_data.to(device)
|
||||
|
||||
# mask out the unknown images so that the model does not see them
|
||||
frame_data_for_eval = _get_eval_frame_data(frame_data)
|
||||
|
||||
with torch.no_grad():
|
||||
preds = model(
|
||||
**{**frame_data_for_eval, "evaluation_mode": EvaluationMode.EVALUATION}
|
||||
)
|
||||
|
||||
# TODO: Cannot use accelerate gather for two reasons:.
|
||||
# (1) TypeError: Can't apply _gpu_gather_one on object of type
|
||||
# <class 'pytorch3d.implicitron.models.base_model.ImplicitronRender'>,
|
||||
# only of nested list/tuple/dicts of objects that satisfy is_torch_tensor.
|
||||
# (2) Same error above but for frame_data which contains Cameras.
|
||||
|
||||
implicitron_render = copy.deepcopy(preds["implicitron_render"])
|
||||
|
||||
per_batch_eval_results.append(
|
||||
evaluate.eval_batch(
|
||||
frame_data,
|
||||
implicitron_render,
|
||||
bg_color="black",
|
||||
lpips_model=lpips_model,
|
||||
source_cameras=all_train_cameras,
|
||||
)
|
||||
)
|
||||
|
||||
_, category_result = evaluate.summarize_nvs_eval_results(
|
||||
per_batch_eval_results, task, camera_difficulty_bin_breaks
|
||||
)
|
||||
|
||||
return category_result["results"]
|
||||
|
||||
|
||||
def _seed_all_random_engines(seed: int) -> None:
|
||||
np.random.seed(seed)
|
||||
torch.manual_seed(seed)
|
||||
random.seed(seed)
|
||||
|
||||
|
||||
def _setup_envvars_for_cluster() -> bool:
|
||||
"""
|
||||
Prepares to run on cluster if relevant.
|
||||
Returns whether FAIR cluster in use.
|
||||
"""
|
||||
# TODO: How much of this is needed in general?
|
||||
|
||||
try:
|
||||
import submitit
|
||||
except ImportError:
|
||||
return False
|
||||
|
||||
try:
|
||||
# Only needed when launching on cluster with slurm and submitit
|
||||
job_env = submitit.JobEnvironment()
|
||||
except RuntimeError:
|
||||
return False
|
||||
|
||||
os.environ["LOCAL_RANK"] = str(job_env.local_rank)
|
||||
os.environ["RANK"] = str(job_env.global_rank)
|
||||
os.environ["WORLD_SIZE"] = str(job_env.num_tasks)
|
||||
os.environ["MASTER_ADDR"] = "localhost"
|
||||
os.environ["MASTER_PORT"] = "42918"
|
||||
logger.info(
|
||||
"Num tasks %s, global_rank %s"
|
||||
% (str(job_env.num_tasks), str(job_env.global_rank))
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
expand_args_fields(ExperimentConfig)
|
||||
cs = hydra.core.config_store.ConfigStore.instance()
|
||||
cs.store(name="default_config", node=ExperimentConfig)
|
||||
|
||||
|
||||
@hydra.main(config_path="./configs/", config_name="default_config")
|
||||
def experiment(cfg: DictConfig) -> None:
|
||||
# CUDA_VISIBLE_DEVICES must have been set.
|
||||
|
||||
if "CUDA_DEVICE_ORDER" not in os.environ:
|
||||
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
|
||||
|
||||
if not _setup_envvars_for_cluster():
|
||||
logger.info("Running locally")
|
||||
|
||||
# TODO: The following may be needed for hydra/submitit it to work
|
||||
expand_args_fields(GenericModel)
|
||||
expand_args_fields(AdaptiveRaySampler)
|
||||
expand_args_fields(MultiPassEmissionAbsorptionRenderer)
|
||||
expand_args_fields(ImplicitronDataSource)
|
||||
|
||||
run_training(cfg)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
experiment()
|
||||
5
projects/implicitron_trainer/impl/__init__.py
Normal file
5
projects/implicitron_trainer/impl/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
49
projects/implicitron_trainer/impl/experiment_config.py
Normal file
49
projects/implicitron_trainer/impl/experiment_config.py
Normal file
@@ -0,0 +1,49 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
from dataclasses import field
|
||||
from typing import Any, Dict, Tuple
|
||||
|
||||
from omegaconf import DictConfig
|
||||
from pytorch3d.implicitron.dataset.data_source import ImplicitronDataSource
|
||||
from pytorch3d.implicitron.models.generic_model import GenericModel
|
||||
from pytorch3d.implicitron.tools.config import Configurable, get_default_args_field
|
||||
|
||||
from .optimization import init_optimizer
|
||||
|
||||
|
||||
class ExperimentConfig(Configurable):
|
||||
generic_model_args: DictConfig = get_default_args_field(GenericModel)
|
||||
solver_args: DictConfig = get_default_args_field(init_optimizer)
|
||||
data_source_args: DictConfig = get_default_args_field(ImplicitronDataSource)
|
||||
architecture: str = "generic"
|
||||
detect_anomaly: bool = False
|
||||
eval_only: bool = False
|
||||
exp_dir: str = "./data/default_experiment/"
|
||||
exp_idx: int = 0
|
||||
gpu_idx: int = 0
|
||||
metric_print_interval: int = 5
|
||||
resume: bool = True
|
||||
resume_epoch: int = -1
|
||||
seed: int = 0
|
||||
store_checkpoints: bool = True
|
||||
store_checkpoints_purge: int = 1
|
||||
test_interval: int = -1
|
||||
test_when_finished: bool = False
|
||||
validation_interval: int = 1
|
||||
visdom_env: str = ""
|
||||
visdom_port: int = 8097
|
||||
visdom_server: str = "http://127.0.0.1"
|
||||
visualize_interval: int = 1000
|
||||
clip_grad: float = 0.0
|
||||
camera_difficulty_bin_breaks: Tuple[float, ...] = 0.97, 0.98
|
||||
|
||||
hydra: Dict[str, Any] = field(
|
||||
default_factory=lambda: {
|
||||
"run": {"dir": "."}, # Make hydra not change the working dir.
|
||||
"output_subdir": None, # disable storing the .hydra logs
|
||||
}
|
||||
)
|
||||
109
projects/implicitron_trainer/impl/optimization.py
Normal file
109
projects/implicitron_trainer/impl/optimization.py
Normal file
@@ -0,0 +1,109 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
|
||||
import torch
|
||||
from pytorch3d.implicitron.models.generic_model import GenericModel
|
||||
from pytorch3d.implicitron.tools.config import enable_get_default_args
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def init_optimizer(
|
||||
model: GenericModel,
|
||||
optimizer_state: Optional[Dict[str, Any]],
|
||||
last_epoch: int,
|
||||
breed: str = "adam",
|
||||
weight_decay: float = 0.0,
|
||||
lr_policy: str = "multistep",
|
||||
lr: float = 0.0005,
|
||||
gamma: float = 0.1,
|
||||
momentum: float = 0.9,
|
||||
betas: Tuple[float, ...] = (0.9, 0.999),
|
||||
milestones: Tuple[int, ...] = (),
|
||||
max_epochs: int = 1000,
|
||||
):
|
||||
"""
|
||||
Initialize the optimizer (optionally from checkpoint state)
|
||||
and the learning rate scheduler.
|
||||
|
||||
Args:
|
||||
model: The model with optionally loaded weights
|
||||
optimizer_state: The state dict for the optimizer. If None
|
||||
it has not been loaded from checkpoint
|
||||
last_epoch: If the model was loaded from checkpoint this will be the
|
||||
number of the last epoch that was saved
|
||||
breed: The type of optimizer to use e.g. adam
|
||||
weight_decay: The optimizer weight_decay (L2 penalty on model weights)
|
||||
lr_policy: The policy to use for learning rate. Currently, only "multistep:
|
||||
is supported.
|
||||
lr: The value for the initial learning rate
|
||||
gamma: Multiplicative factor of learning rate decay
|
||||
momentum: Momentum factor for SGD optimizer
|
||||
betas: Coefficients used for computing running averages of gradient and its square
|
||||
in the Adam optimizer
|
||||
milestones: List of increasing epoch indices at which the learning rate is
|
||||
modified
|
||||
max_epochs: The maximum number of epochs to run the optimizer for
|
||||
|
||||
Returns:
|
||||
optimizer: Optimizer module, optionally loaded from checkpoint
|
||||
scheduler: Learning rate scheduler module
|
||||
|
||||
Raise:
|
||||
ValueError if `breed` or `lr_policy` are not supported.
|
||||
"""
|
||||
|
||||
# Get the parameters to optimize
|
||||
if hasattr(model, "_get_param_groups"): # use the model function
|
||||
# pyre-ignore[29]
|
||||
p_groups = model._get_param_groups(lr, wd=weight_decay)
|
||||
else:
|
||||
allprm = [prm for prm in model.parameters() if prm.requires_grad]
|
||||
p_groups = [{"params": allprm, "lr": lr}]
|
||||
|
||||
# Intialize the optimizer
|
||||
if breed == "sgd":
|
||||
optimizer = torch.optim.SGD(
|
||||
p_groups, lr=lr, momentum=momentum, weight_decay=weight_decay
|
||||
)
|
||||
elif breed == "adagrad":
|
||||
optimizer = torch.optim.Adagrad(p_groups, lr=lr, weight_decay=weight_decay)
|
||||
elif breed == "adam":
|
||||
optimizer = torch.optim.Adam(
|
||||
p_groups, lr=lr, betas=betas, weight_decay=weight_decay
|
||||
)
|
||||
else:
|
||||
raise ValueError("no such solver type %s" % breed)
|
||||
logger.info(" -> solver type = %s" % breed)
|
||||
|
||||
# Load state from checkpoint
|
||||
if optimizer_state is not None:
|
||||
logger.info(" -> setting loaded optimizer state")
|
||||
optimizer.load_state_dict(optimizer_state)
|
||||
|
||||
# Initialize the learning rate scheduler
|
||||
if lr_policy == "multistep":
|
||||
scheduler = torch.optim.lr_scheduler.MultiStepLR(
|
||||
optimizer,
|
||||
milestones=milestones,
|
||||
gamma=gamma,
|
||||
)
|
||||
else:
|
||||
raise ValueError("no such lr policy %s" % lr_policy)
|
||||
|
||||
# When loading from checkpoint, this will make sure that the
|
||||
# lr is correctly set even after returning
|
||||
for _ in range(last_epoch):
|
||||
scheduler.step()
|
||||
|
||||
optimizer.zero_grad()
|
||||
return optimizer, scheduler
|
||||
|
||||
|
||||
enable_get_default_args(init_optimizer)
|
||||
5
projects/implicitron_trainer/tests/__init__.py
Normal file
5
projects/implicitron_trainer/tests/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
425
projects/implicitron_trainer/tests/experiment.yaml
Normal file
425
projects/implicitron_trainer/tests/experiment.yaml
Normal file
@@ -0,0 +1,425 @@
|
||||
generic_model_args:
|
||||
mask_images: true
|
||||
mask_depths: true
|
||||
render_image_width: 400
|
||||
render_image_height: 400
|
||||
mask_threshold: 0.5
|
||||
output_rasterized_mc: false
|
||||
bg_color:
|
||||
- 0.0
|
||||
- 0.0
|
||||
- 0.0
|
||||
num_passes: 1
|
||||
chunk_size_grid: 4096
|
||||
render_features_dimensions: 3
|
||||
tqdm_trigger_threshold: 16
|
||||
n_train_target_views: 1
|
||||
sampling_mode_training: mask_sample
|
||||
sampling_mode_evaluation: full_grid
|
||||
global_encoder_class_type: null
|
||||
raysampler_class_type: AdaptiveRaySampler
|
||||
renderer_class_type: MultiPassEmissionAbsorptionRenderer
|
||||
image_feature_extractor_class_type: null
|
||||
view_pooler_enabled: false
|
||||
implicit_function_class_type: NeuralRadianceFieldImplicitFunction
|
||||
view_metrics_class_type: ViewMetrics
|
||||
regularization_metrics_class_type: RegularizationMetrics
|
||||
loss_weights:
|
||||
loss_rgb_mse: 1.0
|
||||
loss_prev_stage_rgb_mse: 1.0
|
||||
loss_mask_bce: 0.0
|
||||
loss_prev_stage_mask_bce: 0.0
|
||||
log_vars:
|
||||
- loss_rgb_psnr_fg
|
||||
- loss_rgb_psnr
|
||||
- loss_rgb_mse
|
||||
- loss_rgb_huber
|
||||
- loss_depth_abs
|
||||
- loss_depth_abs_fg
|
||||
- loss_mask_neg_iou
|
||||
- loss_mask_bce
|
||||
- loss_mask_beta_prior
|
||||
- loss_eikonal
|
||||
- loss_density_tv
|
||||
- loss_depth_neg_penalty
|
||||
- loss_autodecoder_norm
|
||||
- loss_prev_stage_rgb_mse
|
||||
- loss_prev_stage_rgb_psnr_fg
|
||||
- loss_prev_stage_rgb_psnr
|
||||
- loss_prev_stage_mask_bce
|
||||
- objective
|
||||
- epoch
|
||||
- sec/it
|
||||
global_encoder_HarmonicTimeEncoder_args:
|
||||
n_harmonic_functions: 10
|
||||
append_input: true
|
||||
time_divisor: 1.0
|
||||
global_encoder_SequenceAutodecoder_args:
|
||||
autodecoder_args:
|
||||
encoding_dim: 0
|
||||
n_instances: 0
|
||||
init_scale: 1.0
|
||||
ignore_input: false
|
||||
raysampler_AdaptiveRaySampler_args:
|
||||
image_width: 400
|
||||
image_height: 400
|
||||
sampling_mode_training: mask_sample
|
||||
sampling_mode_evaluation: full_grid
|
||||
n_pts_per_ray_training: 64
|
||||
n_pts_per_ray_evaluation: 64
|
||||
n_rays_per_image_sampled_from_mask: 1024
|
||||
stratified_point_sampling_training: true
|
||||
stratified_point_sampling_evaluation: false
|
||||
scene_extent: 8.0
|
||||
scene_center:
|
||||
- 0.0
|
||||
- 0.0
|
||||
- 0.0
|
||||
raysampler_NearFarRaySampler_args:
|
||||
image_width: 400
|
||||
image_height: 400
|
||||
sampling_mode_training: mask_sample
|
||||
sampling_mode_evaluation: full_grid
|
||||
n_pts_per_ray_training: 64
|
||||
n_pts_per_ray_evaluation: 64
|
||||
n_rays_per_image_sampled_from_mask: 1024
|
||||
stratified_point_sampling_training: true
|
||||
stratified_point_sampling_evaluation: false
|
||||
min_depth: 0.1
|
||||
max_depth: 8.0
|
||||
renderer_LSTMRenderer_args:
|
||||
num_raymarch_steps: 10
|
||||
init_depth: 17.0
|
||||
init_depth_noise_std: 0.0005
|
||||
hidden_size: 16
|
||||
n_feature_channels: 256
|
||||
bg_color: null
|
||||
verbose: false
|
||||
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
||||
raymarcher_class_type: EmissionAbsorptionRaymarcher
|
||||
n_pts_per_ray_fine_training: 64
|
||||
n_pts_per_ray_fine_evaluation: 64
|
||||
stratified_sampling_coarse_training: true
|
||||
stratified_sampling_coarse_evaluation: false
|
||||
append_coarse_samples_to_fine: true
|
||||
density_noise_std_train: 0.0
|
||||
return_weights: false
|
||||
raymarcher_CumsumRaymarcher_args:
|
||||
surface_thickness: 1
|
||||
bg_color:
|
||||
- 0.0
|
||||
background_opacity: 0.0
|
||||
density_relu: true
|
||||
blend_output: false
|
||||
raymarcher_EmissionAbsorptionRaymarcher_args:
|
||||
surface_thickness: 1
|
||||
bg_color:
|
||||
- 0.0
|
||||
background_opacity: 10000000000.0
|
||||
density_relu: true
|
||||
blend_output: false
|
||||
renderer_SignedDistanceFunctionRenderer_args:
|
||||
render_features_dimensions: 3
|
||||
ray_tracer_args:
|
||||
object_bounding_sphere: 1.0
|
||||
sdf_threshold: 5.0e-05
|
||||
line_search_step: 0.5
|
||||
line_step_iters: 1
|
||||
sphere_tracing_iters: 10
|
||||
n_steps: 100
|
||||
n_secant_steps: 8
|
||||
ray_normal_coloring_network_args:
|
||||
feature_vector_size: 3
|
||||
mode: idr
|
||||
d_in: 9
|
||||
d_out: 3
|
||||
dims:
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
weight_norm: true
|
||||
n_harmonic_functions_dir: 0
|
||||
pooled_feature_dim: 0
|
||||
bg_color:
|
||||
- 0.0
|
||||
soft_mask_alpha: 50.0
|
||||
image_feature_extractor_ResNetFeatureExtractor_args:
|
||||
name: resnet34
|
||||
pretrained: true
|
||||
stages:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
normalize_image: true
|
||||
image_rescale: 0.16
|
||||
first_max_pool: true
|
||||
proj_dim: 32
|
||||
l2_norm: true
|
||||
add_masks: true
|
||||
add_images: true
|
||||
global_average_pool: false
|
||||
feature_rescale: 1.0
|
||||
view_pooler_args:
|
||||
feature_aggregator_class_type: AngleWeightedReductionFeatureAggregator
|
||||
view_sampler_args:
|
||||
masked_sampling: false
|
||||
sampling_mode: bilinear
|
||||
feature_aggregator_AngleWeightedIdentityFeatureAggregator_args:
|
||||
exclude_target_view: true
|
||||
exclude_target_view_mask_features: true
|
||||
concatenate_output: true
|
||||
weight_by_ray_angle_gamma: 1.0
|
||||
min_ray_angle_weight: 0.1
|
||||
feature_aggregator_AngleWeightedReductionFeatureAggregator_args:
|
||||
exclude_target_view: true
|
||||
exclude_target_view_mask_features: true
|
||||
concatenate_output: true
|
||||
reduction_functions:
|
||||
- AVG
|
||||
- STD
|
||||
weight_by_ray_angle_gamma: 1.0
|
||||
min_ray_angle_weight: 0.1
|
||||
feature_aggregator_IdentityFeatureAggregator_args:
|
||||
exclude_target_view: true
|
||||
exclude_target_view_mask_features: true
|
||||
concatenate_output: true
|
||||
feature_aggregator_ReductionFeatureAggregator_args:
|
||||
exclude_target_view: true
|
||||
exclude_target_view_mask_features: true
|
||||
concatenate_output: true
|
||||
reduction_functions:
|
||||
- AVG
|
||||
- STD
|
||||
implicit_function_IdrFeatureField_args:
|
||||
feature_vector_size: 3
|
||||
d_in: 3
|
||||
d_out: 1
|
||||
dims:
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
- 512
|
||||
geometric_init: true
|
||||
bias: 1.0
|
||||
skip_in: []
|
||||
weight_norm: true
|
||||
n_harmonic_functions_xyz: 0
|
||||
pooled_feature_dim: 0
|
||||
encoding_dim: 0
|
||||
implicit_function_NeRFormerImplicitFunction_args:
|
||||
n_harmonic_functions_xyz: 10
|
||||
n_harmonic_functions_dir: 4
|
||||
n_hidden_neurons_dir: 128
|
||||
latent_dim: 0
|
||||
input_xyz: true
|
||||
xyz_ray_dir_in_camera_coords: false
|
||||
color_dim: 3
|
||||
transformer_dim_down_factor: 2.0
|
||||
n_hidden_neurons_xyz: 80
|
||||
n_layers_xyz: 2
|
||||
append_xyz:
|
||||
- 1
|
||||
implicit_function_NeuralRadianceFieldImplicitFunction_args:
|
||||
n_harmonic_functions_xyz: 10
|
||||
n_harmonic_functions_dir: 4
|
||||
n_hidden_neurons_dir: 128
|
||||
latent_dim: 0
|
||||
input_xyz: true
|
||||
xyz_ray_dir_in_camera_coords: false
|
||||
color_dim: 3
|
||||
transformer_dim_down_factor: 1.0
|
||||
n_hidden_neurons_xyz: 256
|
||||
n_layers_xyz: 8
|
||||
append_xyz:
|
||||
- 5
|
||||
implicit_function_SRNHyperNetImplicitFunction_args:
|
||||
hypernet_args:
|
||||
n_harmonic_functions: 3
|
||||
n_hidden_units: 256
|
||||
n_layers: 2
|
||||
n_hidden_units_hypernet: 256
|
||||
n_layers_hypernet: 1
|
||||
in_features: 3
|
||||
out_features: 256
|
||||
latent_dim_hypernet: 0
|
||||
latent_dim: 0
|
||||
xyz_in_camera_coords: false
|
||||
pixel_generator_args:
|
||||
n_harmonic_functions: 4
|
||||
n_hidden_units: 256
|
||||
n_hidden_units_color: 128
|
||||
n_layers: 2
|
||||
in_features: 256
|
||||
out_features: 3
|
||||
ray_dir_in_camera_coords: false
|
||||
implicit_function_SRNImplicitFunction_args:
|
||||
raymarch_function_args:
|
||||
n_harmonic_functions: 3
|
||||
n_hidden_units: 256
|
||||
n_layers: 2
|
||||
in_features: 3
|
||||
out_features: 256
|
||||
latent_dim: 0
|
||||
xyz_in_camera_coords: false
|
||||
raymarch_function: null
|
||||
pixel_generator_args:
|
||||
n_harmonic_functions: 4
|
||||
n_hidden_units: 256
|
||||
n_hidden_units_color: 128
|
||||
n_layers: 2
|
||||
in_features: 256
|
||||
out_features: 3
|
||||
ray_dir_in_camera_coords: false
|
||||
view_metrics_ViewMetrics_args: {}
|
||||
regularization_metrics_RegularizationMetrics_args: {}
|
||||
solver_args:
|
||||
breed: adam
|
||||
weight_decay: 0.0
|
||||
lr_policy: multistep
|
||||
lr: 0.0005
|
||||
gamma: 0.1
|
||||
momentum: 0.9
|
||||
betas:
|
||||
- 0.9
|
||||
- 0.999
|
||||
milestones: []
|
||||
max_epochs: 1000
|
||||
data_source_args:
|
||||
dataset_map_provider_class_type: ???
|
||||
data_loader_map_provider_class_type: SequenceDataLoaderMapProvider
|
||||
dataset_map_provider_BlenderDatasetMapProvider_args:
|
||||
base_dir: ???
|
||||
object_name: ???
|
||||
path_manager_factory_class_type: PathManagerFactory
|
||||
n_known_frames_for_test: null
|
||||
path_manager_factory_PathManagerFactory_args:
|
||||
silence_logs: true
|
||||
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
||||
category: ???
|
||||
task_str: singlesequence
|
||||
dataset_root: ''
|
||||
n_frames_per_sequence: -1
|
||||
test_on_train: false
|
||||
restrict_sequence_name: []
|
||||
test_restrict_sequence_id: -1
|
||||
assert_single_seq: false
|
||||
only_test_set: false
|
||||
dataset_class_type: JsonIndexDataset
|
||||
path_manager_factory_class_type: PathManagerFactory
|
||||
dataset_JsonIndexDataset_args:
|
||||
limit_to: 0
|
||||
limit_sequences_to: 0
|
||||
exclude_sequence: []
|
||||
limit_category_to: []
|
||||
load_images: true
|
||||
load_depths: true
|
||||
load_depth_masks: true
|
||||
load_masks: true
|
||||
load_point_clouds: false
|
||||
max_points: 0
|
||||
mask_images: false
|
||||
mask_depths: false
|
||||
image_height: 800
|
||||
image_width: 800
|
||||
box_crop: true
|
||||
box_crop_mask_thr: 0.4
|
||||
box_crop_context: 0.3
|
||||
remove_empty_masks: true
|
||||
seed: 0
|
||||
sort_frames: false
|
||||
path_manager_factory_PathManagerFactory_args:
|
||||
silence_logs: true
|
||||
dataset_map_provider_JsonIndexDatasetMapProviderV2_args:
|
||||
category: ???
|
||||
subset_name: ???
|
||||
dataset_root: ''
|
||||
test_on_train: false
|
||||
only_test_set: false
|
||||
load_eval_batches: true
|
||||
dataset_class_type: JsonIndexDataset
|
||||
path_manager_factory_class_type: PathManagerFactory
|
||||
dataset_JsonIndexDataset_args:
|
||||
path_manager: null
|
||||
frame_annotations_file: ''
|
||||
sequence_annotations_file: ''
|
||||
subset_lists_file: ''
|
||||
subsets: null
|
||||
limit_to: 0
|
||||
limit_sequences_to: 0
|
||||
pick_sequence: []
|
||||
exclude_sequence: []
|
||||
limit_category_to: []
|
||||
dataset_root: ''
|
||||
load_images: true
|
||||
load_depths: true
|
||||
load_depth_masks: true
|
||||
load_masks: true
|
||||
load_point_clouds: false
|
||||
max_points: 0
|
||||
mask_images: false
|
||||
mask_depths: false
|
||||
image_height: 800
|
||||
image_width: 800
|
||||
box_crop: true
|
||||
box_crop_mask_thr: 0.4
|
||||
box_crop_context: 0.3
|
||||
remove_empty_masks: true
|
||||
n_frames_per_sequence: -1
|
||||
seed: 0
|
||||
sort_frames: false
|
||||
eval_batches: null
|
||||
path_manager_factory_PathManagerFactory_args:
|
||||
silence_logs: true
|
||||
dataset_map_provider_LlffDatasetMapProvider_args:
|
||||
base_dir: ???
|
||||
object_name: ???
|
||||
path_manager_factory_class_type: PathManagerFactory
|
||||
n_known_frames_for_test: null
|
||||
path_manager_factory_PathManagerFactory_args:
|
||||
silence_logs: true
|
||||
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
||||
batch_size: 1
|
||||
num_workers: 0
|
||||
dataset_length_train: 0
|
||||
dataset_length_val: 0
|
||||
dataset_length_test: 0
|
||||
train_conditioning_type: SAME
|
||||
val_conditioning_type: SAME
|
||||
test_conditioning_type: KNOWN
|
||||
images_per_seq_options: []
|
||||
sample_consecutive_frames: false
|
||||
consecutive_frames_max_gap: 0
|
||||
consecutive_frames_max_gap_seconds: 0.1
|
||||
architecture: generic
|
||||
detect_anomaly: false
|
||||
eval_only: false
|
||||
exp_dir: ./data/default_experiment/
|
||||
exp_idx: 0
|
||||
gpu_idx: 0
|
||||
metric_print_interval: 5
|
||||
resume: true
|
||||
resume_epoch: -1
|
||||
seed: 0
|
||||
store_checkpoints: true
|
||||
store_checkpoints_purge: 1
|
||||
test_interval: -1
|
||||
test_when_finished: false
|
||||
validation_interval: 1
|
||||
visdom_env: ''
|
||||
visdom_port: 8097
|
||||
visdom_server: http://127.0.0.1
|
||||
visualize_interval: 1000
|
||||
clip_grad: 0.0
|
||||
camera_difficulty_bin_breaks:
|
||||
- 0.97
|
||||
- 0.98
|
||||
hydra:
|
||||
run:
|
||||
dir: .
|
||||
output_subdir: null
|
||||
91
projects/implicitron_trainer/tests/test_experiment.py
Normal file
91
projects/implicitron_trainer/tests/test_experiment.py
Normal file
@@ -0,0 +1,91 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import os
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
from hydra import compose, initialize_config_dir
|
||||
from omegaconf import OmegaConf
|
||||
|
||||
from .. import experiment
|
||||
|
||||
|
||||
def interactive_testing_requested() -> bool:
|
||||
"""
|
||||
Certain tests are only useful when run interactively, and so are not regularly run.
|
||||
These are activated by this funciton returning True, which the user requests by
|
||||
setting the environment variable `PYTORCH3D_INTERACTIVE_TESTING` to 1.
|
||||
"""
|
||||
return os.environ.get("PYTORCH3D_INTERACTIVE_TESTING", "") == "1"
|
||||
|
||||
|
||||
internal = os.environ.get("FB_TEST", False)
|
||||
|
||||
|
||||
DATA_DIR = Path(__file__).resolve().parent
|
||||
IMPLICITRON_CONFIGS_DIR = Path(__file__).resolve().parent.parent / "configs"
|
||||
DEBUG: bool = False
|
||||
|
||||
# TODO:
|
||||
# - add enough files to skateboard_first_5 that this works on RE.
|
||||
# - share common code with PyTorch3D tests?
|
||||
# - deal with the temporary output files this test creates
|
||||
|
||||
|
||||
class TestExperiment(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.maxDiff = None
|
||||
|
||||
def test_from_defaults(self):
|
||||
# Test making minimal changes to the dataclass defaults.
|
||||
if not interactive_testing_requested() or not internal:
|
||||
return
|
||||
cfg = OmegaConf.structured(experiment.ExperimentConfig)
|
||||
cfg.data_source_args.dataset_map_provider_class_type = (
|
||||
"JsonIndexDatasetMapProvider"
|
||||
)
|
||||
dataset_args = (
|
||||
cfg.data_source_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
|
||||
)
|
||||
dataloader_args = (
|
||||
cfg.data_source_args.data_loader_map_provider_SequenceDataLoaderMapProvider_args
|
||||
)
|
||||
dataset_args.category = "skateboard"
|
||||
dataset_args.test_restrict_sequence_id = 0
|
||||
dataset_args.dataset_root = "manifold://co3d/tree/extracted"
|
||||
dataset_args.dataset_JsonIndexDataset_args.limit_sequences_to = 5
|
||||
dataset_args.dataset_JsonIndexDataset_args.image_height = 80
|
||||
dataset_args.dataset_JsonIndexDataset_args.image_width = 80
|
||||
dataloader_args.dataset_length_train = 1
|
||||
dataloader_args.dataset_length_val = 1
|
||||
cfg.solver_args.max_epochs = 2
|
||||
|
||||
experiment.run_training(cfg)
|
||||
|
||||
def test_yaml_contents(self):
|
||||
cfg = OmegaConf.structured(experiment.ExperimentConfig)
|
||||
yaml = OmegaConf.to_yaml(cfg, sort_keys=False)
|
||||
if DEBUG:
|
||||
(DATA_DIR / "experiment.yaml").write_text(yaml)
|
||||
self.assertEqual(yaml, (DATA_DIR / "experiment.yaml").read_text())
|
||||
|
||||
def test_load_configs(self):
|
||||
config_files = []
|
||||
|
||||
for pattern in ("repro_singleseq*.yaml", "repro_multiseq*.yaml"):
|
||||
config_files.extend(
|
||||
[
|
||||
f
|
||||
for f in IMPLICITRON_CONFIGS_DIR.glob(pattern)
|
||||
if not f.name.endswith("_base.yaml")
|
||||
]
|
||||
)
|
||||
|
||||
for file in config_files:
|
||||
with self.subTest(file.name):
|
||||
with initialize_config_dir(config_dir=str(IMPLICITRON_CONFIGS_DIR)):
|
||||
compose(file.name)
|
||||
393
projects/implicitron_trainer/visualize_reconstruction.py
Normal file
393
projects/implicitron_trainer/visualize_reconstruction.py
Normal file
@@ -0,0 +1,393 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
"""Script to visualize a previously trained model. Example call:
|
||||
|
||||
projects/implicitron_trainer/visualize_reconstruction.py
|
||||
exp_dir='./exps/checkpoint_dir' visdom_show_preds=True visdom_port=8097
|
||||
n_eval_cameras=40 render_size="[64,64]" video_size="[256,256]"
|
||||
"""
|
||||
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as Fu
|
||||
from omegaconf import OmegaConf
|
||||
from pytorch3d.implicitron.dataset.data_source import ImplicitronDataSource
|
||||
from pytorch3d.implicitron.dataset.dataset_base import DatasetBase, FrameData
|
||||
from pytorch3d.implicitron.dataset.utils import is_train_frame
|
||||
from pytorch3d.implicitron.models.base_model import EvaluationMode
|
||||
from pytorch3d.implicitron.tools.configurable import get_default_args
|
||||
from pytorch3d.implicitron.tools.eval_video_trajectory import (
|
||||
generate_eval_video_cameras,
|
||||
)
|
||||
from pytorch3d.implicitron.tools.video_writer import VideoWriter
|
||||
from pytorch3d.implicitron.tools.vis_utils import (
|
||||
get_visdom_connection,
|
||||
make_depth_image,
|
||||
)
|
||||
from tqdm import tqdm
|
||||
|
||||
from .experiment import init_model
|
||||
|
||||
|
||||
def render_sequence(
|
||||
dataset: DatasetBase,
|
||||
sequence_name: str,
|
||||
model: torch.nn.Module,
|
||||
video_path,
|
||||
n_eval_cameras=40,
|
||||
fps=20,
|
||||
max_angle=2 * math.pi,
|
||||
trajectory_type="circular_lsq_fit",
|
||||
trajectory_scale=1.1,
|
||||
scene_center=(0.0, 0.0, 0.0),
|
||||
up=(0.0, -1.0, 0.0),
|
||||
traj_offset=0.0,
|
||||
n_source_views=9,
|
||||
viz_env="debug",
|
||||
visdom_show_preds=False,
|
||||
visdom_server="http://127.0.0.1",
|
||||
visdom_port=8097,
|
||||
num_workers=10,
|
||||
seed=None,
|
||||
video_resize=None,
|
||||
):
|
||||
if seed is None:
|
||||
seed = hash(sequence_name)
|
||||
|
||||
if visdom_show_preds:
|
||||
viz = get_visdom_connection(server=visdom_server, port=visdom_port)
|
||||
else:
|
||||
viz = None
|
||||
|
||||
print(f"Loading all data of sequence '{sequence_name}'.")
|
||||
seq_idx = list(dataset.sequence_indices_in_order(sequence_name))
|
||||
train_data = _load_whole_dataset(dataset, seq_idx, num_workers=num_workers)
|
||||
assert all(train_data.sequence_name[0] == sn for sn in train_data.sequence_name)
|
||||
sequence_set_name = "train" if is_train_frame(train_data.frame_type)[0] else "test"
|
||||
print(f"Sequence set = {sequence_set_name}.")
|
||||
train_cameras = train_data.camera
|
||||
time = torch.linspace(0, max_angle, n_eval_cameras + 1)[:n_eval_cameras]
|
||||
test_cameras = generate_eval_video_cameras(
|
||||
train_cameras,
|
||||
time=time,
|
||||
n_eval_cams=n_eval_cameras,
|
||||
trajectory_type=trajectory_type,
|
||||
trajectory_scale=trajectory_scale,
|
||||
scene_center=scene_center,
|
||||
up=up,
|
||||
focal_length=None,
|
||||
principal_point=torch.zeros(n_eval_cameras, 2),
|
||||
traj_offset_canonical=(0.0, 0.0, traj_offset),
|
||||
)
|
||||
|
||||
# sample the source views reproducibly
|
||||
with torch.random.fork_rng():
|
||||
torch.manual_seed(seed)
|
||||
source_views_i = torch.randperm(len(seq_idx))[:n_source_views]
|
||||
# add the first dummy view that will get replaced with the target camera
|
||||
source_views_i = Fu.pad(source_views_i, [1, 0])
|
||||
source_views = [seq_idx[i] for i in source_views_i.tolist()]
|
||||
batch = _load_whole_dataset(dataset, source_views, num_workers=num_workers)
|
||||
assert all(batch.sequence_name[0] == sn for sn in batch.sequence_name)
|
||||
|
||||
preds_total = []
|
||||
for n in tqdm(range(n_eval_cameras), total=n_eval_cameras):
|
||||
# set the first batch camera to the target camera
|
||||
for k in ("R", "T", "focal_length", "principal_point"):
|
||||
getattr(batch.camera, k)[0] = getattr(test_cameras[n], k)
|
||||
|
||||
# Move to cuda
|
||||
net_input = batch.cuda()
|
||||
with torch.no_grad():
|
||||
preds = model(**{**net_input, "evaluation_mode": EvaluationMode.EVALUATION})
|
||||
|
||||
# make sure we dont overwrite something
|
||||
assert all(k not in preds for k in net_input.keys())
|
||||
preds.update(net_input) # merge everything into one big dict
|
||||
|
||||
# Render the predictions to images
|
||||
rendered_pred = images_from_preds(preds)
|
||||
preds_total.append(rendered_pred)
|
||||
|
||||
# show the preds every 5% of the export iterations
|
||||
if visdom_show_preds and (
|
||||
n % max(n_eval_cameras // 20, 1) == 0 or n == n_eval_cameras - 1
|
||||
):
|
||||
show_predictions(
|
||||
preds_total,
|
||||
sequence_name=batch.sequence_name[0],
|
||||
viz=viz,
|
||||
viz_env=viz_env,
|
||||
)
|
||||
|
||||
print(f"Exporting videos for sequence {sequence_name} ...")
|
||||
generate_prediction_videos(
|
||||
preds_total,
|
||||
sequence_name=batch.sequence_name[0],
|
||||
viz=viz,
|
||||
viz_env=viz_env,
|
||||
fps=fps,
|
||||
video_path=video_path,
|
||||
resize=video_resize,
|
||||
)
|
||||
|
||||
|
||||
def _load_whole_dataset(dataset, idx, num_workers=10):
|
||||
load_all_dataloader = torch.utils.data.DataLoader(
|
||||
torch.utils.data.Subset(dataset, idx),
|
||||
batch_size=len(idx),
|
||||
num_workers=num_workers,
|
||||
shuffle=False,
|
||||
collate_fn=FrameData.collate,
|
||||
)
|
||||
return next(iter(load_all_dataloader))
|
||||
|
||||
|
||||
def images_from_preds(preds):
|
||||
imout = {}
|
||||
for k in (
|
||||
"image_rgb",
|
||||
"images_render",
|
||||
"fg_probability",
|
||||
"masks_render",
|
||||
"depths_render",
|
||||
"depth_map",
|
||||
"_all_source_images",
|
||||
):
|
||||
if k == "_all_source_images" and "image_rgb" in preds:
|
||||
src_ims = preds["image_rgb"][1:].cpu().detach().clone()
|
||||
v = _stack_images(src_ims, None)[None]
|
||||
else:
|
||||
if k not in preds or preds[k] is None:
|
||||
print(f"cant show {k}")
|
||||
continue
|
||||
v = preds[k].cpu().detach().clone()
|
||||
if k.startswith("depth"):
|
||||
mask_resize = Fu.interpolate(
|
||||
preds["masks_render"],
|
||||
size=preds[k].shape[2:],
|
||||
mode="nearest",
|
||||
)
|
||||
v = make_depth_image(preds[k], mask_resize)
|
||||
if v.shape[1] == 1:
|
||||
v = v.repeat(1, 3, 1, 1)
|
||||
imout[k] = v.detach().cpu()
|
||||
|
||||
return imout
|
||||
|
||||
|
||||
def _stack_images(ims, size):
|
||||
ba = ims.shape[0]
|
||||
H = int(np.ceil(np.sqrt(ba)))
|
||||
W = H
|
||||
n_add = H * W - ba
|
||||
if n_add > 0:
|
||||
ims = torch.cat((ims, torch.zeros_like(ims[:1]).repeat(n_add, 1, 1, 1)))
|
||||
|
||||
ims = ims.view(H, W, *ims.shape[1:])
|
||||
cated = torch.cat([torch.cat(list(row), dim=2) for row in ims], dim=1)
|
||||
if size is not None:
|
||||
cated = Fu.interpolate(cated[None], size=size, mode="bilinear")[0]
|
||||
return cated.clamp(0.0, 1.0)
|
||||
|
||||
|
||||
def show_predictions(
|
||||
preds,
|
||||
sequence_name,
|
||||
viz,
|
||||
viz_env="visualizer",
|
||||
predicted_keys=(
|
||||
"images_render",
|
||||
"masks_render",
|
||||
"depths_render",
|
||||
"_all_source_images",
|
||||
),
|
||||
n_samples=10,
|
||||
one_image_width=200,
|
||||
):
|
||||
"""Given a list of predictions visualize them into a single image using visdom."""
|
||||
assert isinstance(preds, list)
|
||||
|
||||
pred_all = []
|
||||
# Randomly choose a subset of the rendered images, sort by ordr in the sequence
|
||||
n_samples = min(n_samples, len(preds))
|
||||
pred_idx = sorted(random.sample(list(range(len(preds))), n_samples))
|
||||
for predi in pred_idx:
|
||||
# Make the concatentation for the same camera vertically
|
||||
pred_all.append(
|
||||
torch.cat(
|
||||
[
|
||||
torch.nn.functional.interpolate(
|
||||
preds[predi][k].cpu(),
|
||||
scale_factor=one_image_width / preds[predi][k].shape[3],
|
||||
mode="bilinear",
|
||||
).clamp(0.0, 1.0)
|
||||
for k in predicted_keys
|
||||
],
|
||||
dim=2,
|
||||
)
|
||||
)
|
||||
# Concatenate the images horizontally
|
||||
pred_all_cat = torch.cat(pred_all, dim=3)[0]
|
||||
viz.image(
|
||||
pred_all_cat,
|
||||
win="show_predictions",
|
||||
env=viz_env,
|
||||
opts={"title": f"pred_{sequence_name}"},
|
||||
)
|
||||
|
||||
|
||||
def generate_prediction_videos(
|
||||
preds,
|
||||
sequence_name,
|
||||
viz=None,
|
||||
viz_env="visualizer",
|
||||
predicted_keys=(
|
||||
"images_render",
|
||||
"masks_render",
|
||||
"depths_render",
|
||||
"_all_source_images",
|
||||
),
|
||||
fps=20,
|
||||
video_path="/tmp/video",
|
||||
resize=None,
|
||||
):
|
||||
"""Given a list of predictions create and visualize rotating videos of the
|
||||
objects using visdom.
|
||||
"""
|
||||
assert isinstance(preds, list)
|
||||
|
||||
# make sure the target video directory exists
|
||||
os.makedirs(os.path.dirname(video_path), exist_ok=True)
|
||||
|
||||
# init a video writer for each predicted key
|
||||
vws = {}
|
||||
for k in predicted_keys:
|
||||
vws[k] = VideoWriter(out_path=f"{video_path}_{sequence_name}_{k}.mp4", fps=fps)
|
||||
|
||||
for rendered_pred in tqdm(preds):
|
||||
for k in predicted_keys:
|
||||
vws[k].write_frame(
|
||||
rendered_pred[k][0].clip(0.0, 1.0).detach().cpu().numpy(),
|
||||
resize=resize,
|
||||
)
|
||||
|
||||
for k in predicted_keys:
|
||||
vws[k].get_video(quiet=True)
|
||||
print(f"Generated {vws[k].out_path}.")
|
||||
if viz is not None:
|
||||
viz.video(
|
||||
videofile=vws[k].out_path,
|
||||
env=viz_env,
|
||||
win=k, # we reuse the same window otherwise visdom dies
|
||||
opts={"title": sequence_name + " " + k},
|
||||
)
|
||||
|
||||
|
||||
def export_scenes(
|
||||
exp_dir: str = "",
|
||||
restrict_sequence_name: Optional[str] = None,
|
||||
output_directory: Optional[str] = None,
|
||||
render_size: Tuple[int, int] = (512, 512),
|
||||
video_size: Optional[Tuple[int, int]] = None,
|
||||
split: str = "train", # train | val | test
|
||||
n_source_views: int = 9,
|
||||
n_eval_cameras: int = 40,
|
||||
visdom_server="http://127.0.0.1",
|
||||
visdom_port=8097,
|
||||
visdom_show_preds: bool = False,
|
||||
visdom_env: Optional[str] = None,
|
||||
gpu_idx: int = 0,
|
||||
):
|
||||
# In case an output directory is specified use it. If no output_directory
|
||||
# is specified create a vis folder inside the experiment directory
|
||||
if output_directory is None:
|
||||
output_directory = os.path.join(exp_dir, "vis")
|
||||
else:
|
||||
output_directory = output_directory
|
||||
if not os.path.exists(output_directory):
|
||||
os.makedirs(output_directory)
|
||||
|
||||
# Set the random seeds
|
||||
torch.manual_seed(0)
|
||||
np.random.seed(0)
|
||||
|
||||
# Get the config from the experiment_directory,
|
||||
# and overwrite relevant fields
|
||||
config = _get_config_from_experiment_directory(exp_dir)
|
||||
config.gpu_idx = gpu_idx
|
||||
config.exp_dir = exp_dir
|
||||
# important so that the CO3D dataset gets loaded in full
|
||||
dataset_args = (
|
||||
config.data_source_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
|
||||
)
|
||||
dataset_args.test_on_train = False
|
||||
# Set the rendering image size
|
||||
config.generic_model_args.render_image_width = render_size[0]
|
||||
config.generic_model_args.render_image_height = render_size[1]
|
||||
if restrict_sequence_name is not None:
|
||||
dataset_args.restrict_sequence_name = restrict_sequence_name
|
||||
|
||||
# Set up the CUDA env for the visualization
|
||||
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = str(config.gpu_idx)
|
||||
|
||||
# Load the previously trained model
|
||||
model, _, _ = init_model(cfg=config, force_load=True, load_model_only=True)
|
||||
model.cuda()
|
||||
model.eval()
|
||||
|
||||
# Setup the dataset
|
||||
datasource = ImplicitronDataSource(**config.data_source_args)
|
||||
dataset_map = datasource.dataset_map_provider.get_dataset_map()
|
||||
dataset = dataset_map[split]
|
||||
if dataset is None:
|
||||
raise ValueError(f"{split} dataset not provided")
|
||||
|
||||
# iterate over the sequences in the dataset
|
||||
for sequence_name in dataset.sequence_names():
|
||||
with torch.no_grad():
|
||||
render_sequence(
|
||||
dataset,
|
||||
sequence_name,
|
||||
model,
|
||||
video_path="{}/video".format(output_directory),
|
||||
n_source_views=n_source_views,
|
||||
visdom_show_preds=visdom_show_preds,
|
||||
n_eval_cameras=n_eval_cameras,
|
||||
visdom_server=visdom_server,
|
||||
visdom_port=visdom_port,
|
||||
viz_env=f"visualizer_{config.visdom_env}"
|
||||
if visdom_env is None
|
||||
else visdom_env,
|
||||
video_resize=video_size,
|
||||
)
|
||||
|
||||
|
||||
def _get_config_from_experiment_directory(experiment_directory):
|
||||
cfg_file = os.path.join(experiment_directory, "expconfig.yaml")
|
||||
config = OmegaConf.load(cfg_file)
|
||||
return config
|
||||
|
||||
|
||||
def main(argv):
|
||||
# automatically parses arguments of export_scenes
|
||||
cfg = OmegaConf.create(get_default_args(export_scenes))
|
||||
cfg.update(OmegaConf.from_cli())
|
||||
with torch.no_grad():
|
||||
export_scenes(**cfg)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv)
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
@@ -8,7 +8,7 @@ import math
|
||||
from typing import Tuple
|
||||
|
||||
import torch
|
||||
from pytorch3d.renderer import PerspectiveCameras, look_at_view_transform
|
||||
from pytorch3d.renderer import look_at_view_transform, PerspectiveCameras
|
||||
from torch.utils.data.dataset import Dataset
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ def generate_eval_video_cameras(
|
||||
cam_centers_on_plane.t() @ cam_centers_on_plane
|
||||
) / cam_centers_on_plane.shape[0]
|
||||
_, e_vec = torch.symeig(cov, eigenvectors=True)
|
||||
traj_radius = (cam_centers_on_plane ** 2).sum(dim=1).sqrt().mean()
|
||||
traj_radius = (cam_centers_on_plane**2).sum(dim=1).sqrt().mean()
|
||||
angle = torch.linspace(0, 2.0 * math.pi, n_eval_cams)
|
||||
traj = traj_radius * torch.stack(
|
||||
(torch.zeros_like(angle), angle.cos(), angle.sin()), dim=-1
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
class HarmonicEmbedding(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
n_harmonic_functions: int = 6,
|
||||
omega0: float = 1.0,
|
||||
logspace: bool = True,
|
||||
include_input: bool = True,
|
||||
) -> None:
|
||||
"""
|
||||
Given an input tensor `x` of shape [minibatch, ... , dim],
|
||||
the harmonic embedding layer converts each feature
|
||||
in `x` into a series of harmonic features `embedding`,
|
||||
where for each i in range(dim) the following are present
|
||||
in embedding[...]:
|
||||
```
|
||||
[
|
||||
sin(x[..., i]),
|
||||
sin(f_1*x[..., i]),
|
||||
sin(f_2*x[..., i]),
|
||||
...
|
||||
sin(f_N * x[..., i]),
|
||||
cos(x[..., i]),
|
||||
cos(f_1*x[..., i]),
|
||||
cos(f_2*x[..., i]),
|
||||
...
|
||||
cos(f_N * x[..., i]),
|
||||
x[..., i] # only present if include_input is True.
|
||||
]
|
||||
```
|
||||
where N corresponds to `n_harmonic_functions`, and f_i is a scalar
|
||||
denoting the i-th frequency of the harmonic embedding.
|
||||
The shape of the output is [minibatch, ... , dim * (2 * N + 1)] if
|
||||
include_input is True, otherwise [minibatch, ... , dim * (2 * N)].
|
||||
|
||||
If `logspace==True`, the frequencies `[f_1, ..., f_N]` are
|
||||
powers of 2:
|
||||
`f_1 = 1, ..., f_N = 2**torch.arange(n_harmonic_functions)`
|
||||
|
||||
If `logspace==False`, frequencies are linearly spaced between
|
||||
`1.0` and `2**(n_harmonic_functions-1)`:
|
||||
`f_1, ..., f_N = torch.linspace(
|
||||
1.0, 2**(n_harmonic_functions-1), n_harmonic_functions
|
||||
)`
|
||||
|
||||
Note that `x` is also premultiplied by the base frequency `omega0`
|
||||
before evaluating the harmonic functions.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
if logspace:
|
||||
frequencies = 2.0 ** torch.arange(
|
||||
n_harmonic_functions,
|
||||
dtype=torch.float32,
|
||||
)
|
||||
else:
|
||||
frequencies = torch.linspace(
|
||||
1.0,
|
||||
2.0 ** (n_harmonic_functions - 1),
|
||||
n_harmonic_functions,
|
||||
dtype=torch.float32,
|
||||
)
|
||||
|
||||
self.register_buffer("_frequencies", omega0 * frequencies, persistent=False)
|
||||
self.include_input = include_input
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
Args:
|
||||
x: tensor of shape [..., dim]
|
||||
Returns:
|
||||
embedding: a harmonic embedding of `x` of shape
|
||||
[..., dim * (n_harmonic_functions * 2 + T)] where
|
||||
T is 1 if include_input is True and 0 otherwise.
|
||||
"""
|
||||
embed = (x[..., None] * self._frequencies).view(*x.shape[:-1], -1)
|
||||
if self.include_input:
|
||||
return torch.cat((embed.sin(), embed.cos(), x), dim=-1)
|
||||
else:
|
||||
return torch.cat((embed.sin(), embed.cos()), dim=-1)
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
@@ -7,10 +7,8 @@
|
||||
from typing import Tuple
|
||||
|
||||
import torch
|
||||
from pytorch3d.renderer import RayBundle, ray_bundle_to_ray_points
|
||||
|
||||
from .harmonic_embedding import HarmonicEmbedding
|
||||
from .linear_with_repeat import LinearWithRepeat
|
||||
from pytorch3d.common.linear_with_repeat import LinearWithRepeat
|
||||
from pytorch3d.renderer import HarmonicEmbedding, ray_bundle_to_ray_points, RayBundle
|
||||
|
||||
|
||||
def _xavier_init(linear):
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user