mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2026-02-28 09:16:00 +08:00
Compare commits
314 Commits
v0.5.0
...
classner-p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e7c1f026ea | ||
|
|
cb49550486 | ||
|
|
36edf2b302 | ||
|
|
78bb6d17fa | ||
|
|
54c75b4114 | ||
|
|
3783437d2f | ||
|
|
b2dc520210 | ||
|
|
8597d4c5c1 | ||
|
|
38fd8380f7 | ||
|
|
67840f8320 | ||
|
|
9b2e570536 | ||
|
|
0f966217e5 | ||
|
|
379c8b2780 | ||
|
|
8e0c82b89a | ||
|
|
8ba9a694ee | ||
|
|
36ba079bef | ||
|
|
b95ec190af | ||
|
|
55f67b0d18 | ||
|
|
4261e59f51 | ||
|
|
af55ba01f8 | ||
|
|
d3b7f5f421 | ||
|
|
4ecc9ea89d | ||
|
|
8d10ba52b2 | ||
|
|
aa8b03f31d | ||
|
|
57a40b3688 | ||
|
|
522e5f0644 | ||
|
|
e8390d3500 | ||
|
|
4300030d7a | ||
|
|
00acf0b0c7 | ||
|
|
a94f3f4c4b | ||
|
|
efb721320a | ||
|
|
40fb189c29 | ||
|
|
4e87c2b7f1 | ||
|
|
771cf8a328 | ||
|
|
0dce883241 | ||
|
|
ae35824f21 | ||
|
|
f4dd151037 | ||
|
|
7ce8ed55e1 | ||
|
|
7e0146ece4 | ||
|
|
0e4c53c612 | ||
|
|
879495d38f | ||
|
|
5c1ca757bb | ||
|
|
3e4fb0b9d9 | ||
|
|
731ea53c80 | ||
|
|
2e42ef793f | ||
|
|
81d63c6382 | ||
|
|
28c1afaa9d | ||
|
|
cba26506b6 | ||
|
|
65f667fd2e | ||
|
|
7978ffd1e4 | ||
|
|
ea4f3260e4 | ||
|
|
023a2369ae | ||
|
|
c0f88e04a0 | ||
|
|
6275283202 | ||
|
|
1d43251391 | ||
|
|
1fb268dea6 | ||
|
|
8bc0a04e86 | ||
|
|
5cd70067e2 | ||
|
|
5b74a2cc27 | ||
|
|
49ed7b07b1 | ||
|
|
c6519f29f0 | ||
|
|
a42a89a5ba | ||
|
|
c31bf85a23 | ||
|
|
fbd3c679ac | ||
|
|
34f648ede0 | ||
|
|
f625fe1f8b | ||
|
|
7c25d34d22 | ||
|
|
c5a83f46ef | ||
|
|
1702c85bec | ||
|
|
90d00f1b2b | ||
|
|
d27ef14ec7 | ||
|
|
2d1c6d5d93 | ||
|
|
9fe15da3cd | ||
|
|
0f12c51646 | ||
|
|
79c61a2d86 | ||
|
|
69c6d06ed8 | ||
|
|
73dc109dba | ||
|
|
9ec9d057cc | ||
|
|
cd7b885169 | ||
|
|
f632c423ef | ||
|
|
f36b11fe49 | ||
|
|
ea5df60d72 | ||
|
|
4372001981 | ||
|
|
61e2b87019 | ||
|
|
0143d63ba8 | ||
|
|
899a3192b6 | ||
|
|
3b2300641a | ||
|
|
b5f3d3ce12 | ||
|
|
2c1901522a | ||
|
|
90ab219d88 | ||
|
|
9e57b994ca | ||
|
|
e767c4b548 | ||
|
|
e85fa03c5a | ||
|
|
47d06c8924 | ||
|
|
bef959c755 | ||
|
|
c21ba144e7 | ||
|
|
d737a05e55 | ||
|
|
2374d19da5 | ||
|
|
1f3953795c | ||
|
|
a6dada399d | ||
|
|
5c59841863 | ||
|
|
2c64635daa | ||
|
|
ec9580a1d4 | ||
|
|
44cb00e468 | ||
|
|
44ca5f95d9 | ||
|
|
a51a300827 | ||
|
|
2bd65027ca | ||
|
|
11635fbd7d | ||
|
|
a268b18e07 | ||
|
|
7ea0756b05 | ||
|
|
96889deab9 | ||
|
|
9f443ed26b | ||
|
|
9320100abc | ||
|
|
2edb93d184 | ||
|
|
41c594ca37 | ||
|
|
c3c4495c7a | ||
|
|
34bbb3ad32 | ||
|
|
df08ea8eb4 | ||
|
|
78fd5af1a6 | ||
|
|
0a7c354dc1 | ||
|
|
b79764ea69 | ||
|
|
b1ff9d9fd4 | ||
|
|
22f86072ca | ||
|
|
050f650ae8 | ||
|
|
8596fcacd2 | ||
|
|
7f097b064b | ||
|
|
aab95575a6 | ||
|
|
67fff956a2 | ||
|
|
4b94649f7b | ||
|
|
3809b6094c | ||
|
|
722646863c | ||
|
|
e10a90140d | ||
|
|
4c48beb226 | ||
|
|
4db9fc11d2 | ||
|
|
3b8a33e9c5 | ||
|
|
199309fcf7 | ||
|
|
6473aa316c | ||
|
|
2802fd9398 | ||
|
|
a999fc22ee | ||
|
|
24260130ce | ||
|
|
a54ad2b912 | ||
|
|
b602edccc4 | ||
|
|
21262e38c7 | ||
|
|
e332f9ffa4 | ||
|
|
0c3bed55be | ||
|
|
97894fb37b | ||
|
|
645a47d054 | ||
|
|
8ac5e8f083 | ||
|
|
92f9dfe9d6 | ||
|
|
f2cf9d4d0b | ||
|
|
e2622d79c0 | ||
|
|
c0bb49b5f6 | ||
|
|
05f656c01f | ||
|
|
4c22855a23 | ||
|
|
cdd2142dd5 | ||
|
|
0e377c6850 | ||
|
|
e64f25c255 | ||
|
|
c85673c626 | ||
|
|
3de3c13a0f | ||
|
|
9b5a3ffa6c | ||
|
|
1701b76a31 | ||
|
|
57a33b25c1 | ||
|
|
c371a9a6cc | ||
|
|
4a1f176054 | ||
|
|
16d0aa82c1 | ||
|
|
69b27d160e | ||
|
|
84a569c0aa | ||
|
|
471b126818 | ||
|
|
4d043fc9ac | ||
|
|
f816568735 | ||
|
|
0e88b21de6 | ||
|
|
1cbf80dab6 | ||
|
|
ee71c7c447 | ||
|
|
3de41223dd | ||
|
|
967a099231 | ||
|
|
feb5d36394 | ||
|
|
db1f7c4506 | ||
|
|
59972b121d | ||
|
|
c8f3d6bc0b | ||
|
|
2a1de3b610 | ||
|
|
ef21a6f6aa | ||
|
|
12f20d799e | ||
|
|
47c0997227 | ||
|
|
e9fb6c27e3 | ||
|
|
c2862ff427 | ||
|
|
5053142363 | ||
|
|
67778caee8 | ||
|
|
3eb4233844 | ||
|
|
174738c33e | ||
|
|
45d096e219 | ||
|
|
39bb2ce063 | ||
|
|
9e2bc3a17f | ||
|
|
fddd6a700f | ||
|
|
85cdcc252d | ||
|
|
fc4dd80208 | ||
|
|
9640560541 | ||
|
|
6726500ad3 | ||
|
|
d6a12afbe7 | ||
|
|
49f93b6388 | ||
|
|
741777b5b5 | ||
|
|
9eeb456e82 | ||
|
|
7660ed1876 | ||
|
|
52c71b8816 | ||
|
|
f9a26a22fc | ||
|
|
d67662d13c | ||
|
|
28ccdb7328 | ||
|
|
cc3259ba93 | ||
|
|
b51be58f63 | ||
|
|
7449951850 | ||
|
|
262c1bfcd4 | ||
|
|
eb2bbf8433 | ||
|
|
1152a93b72 | ||
|
|
315f2487db | ||
|
|
ccfb72cc50 | ||
|
|
069c9fd759 | ||
|
|
9eec430f1c | ||
|
|
f8fe9a2be1 | ||
|
|
d049cd2e01 | ||
|
|
1edc624d82 | ||
|
|
6ea6314792 | ||
|
|
093999e71f | ||
|
|
a22b1e32a4 | ||
|
|
9c9d9440f9 | ||
|
|
c65af9ef5a | ||
|
|
70acb3e415 | ||
|
|
bf3bc6f8e3 | ||
|
|
cff4876131 | ||
|
|
a0e2d2e3c3 | ||
|
|
a6508ac3df | ||
|
|
d9f709599b | ||
|
|
e4456dba2f | ||
|
|
7fa333f632 | ||
|
|
a0247ea6bd | ||
|
|
a8cb7fa862 | ||
|
|
7ce18f38cd | ||
|
|
5fbdb99aec | ||
|
|
1836c786fe | ||
|
|
cac6cb1b78 | ||
|
|
bfeb82efa3 | ||
|
|
73a14d7266 | ||
|
|
bee31c48d3 | ||
|
|
29417d1f9b | ||
|
|
57b9c729b8 | ||
|
|
7c111f7379 | ||
|
|
3953de47ee | ||
|
|
1a7442a483 | ||
|
|
16ebf54e69 | ||
|
|
14dd2611ee | ||
|
|
34b1b4ab8b | ||
|
|
2f2466f472 | ||
|
|
53d99671bd | ||
|
|
6d36c1e2b0 | ||
|
|
6dfa326922 | ||
|
|
b26f4bc33a | ||
|
|
8fa438cbda | ||
|
|
815a93ce89 | ||
|
|
23ef666db1 | ||
|
|
d7d740abe9 | ||
|
|
9585a58d10 | ||
|
|
364a7dcaf4 | ||
|
|
1360d69ffb | ||
|
|
4281df19ce | ||
|
|
ee2b2feb98 | ||
|
|
9ad98c87c3 | ||
|
|
0dfc6e0eb8 | ||
|
|
c7c6deab86 | ||
|
|
4ad8576541 | ||
|
|
a5cbb624c1 | ||
|
|
720bdf60f5 | ||
|
|
1aab192706 | ||
|
|
dd76b41014 | ||
|
|
1b1ba5612f | ||
|
|
ff8d4762f4 | ||
|
|
53266ec9ff | ||
|
|
2293f1fed0 | ||
|
|
5b89c4e3bb | ||
|
|
d0ca3b9e0c | ||
|
|
9a737da83c | ||
|
|
860b742a02 | ||
|
|
cb170ac024 | ||
|
|
fe5bfa5994 | ||
|
|
dbfb3a910a | ||
|
|
526df446c6 | ||
|
|
bd04ffaf77 | ||
|
|
d9f7611c4b | ||
|
|
3b7d78c7a7 | ||
|
|
a0d76a7080 | ||
|
|
46f727cb68 | ||
|
|
c3d7808868 | ||
|
|
bbc7573261 | ||
|
|
eed68f457d | ||
|
|
62dbf371ae | ||
|
|
f2c44e3540 | ||
|
|
a9b0d50baf | ||
|
|
fc156b50c0 | ||
|
|
835e662fb5 | ||
|
|
1b8d86a104 | ||
|
|
1251446383 | ||
|
|
d2bbd0cdb7 | ||
|
|
6c416b319c | ||
|
|
77fa5987b8 | ||
|
|
fadec970c9 | ||
|
|
1ea2b7272a | ||
|
|
7d7d00f288 | ||
|
|
b481cfbd01 | ||
|
|
46cf1970ac | ||
|
|
5491b46511 | ||
|
|
ae1387b523 | ||
|
|
b0dd0c8821 | ||
|
|
103da63393 | ||
|
|
e5c58a8a8b | ||
|
|
64faedfd57 | ||
|
|
9db70400d8 | ||
|
|
804117833e |
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -18,20 +18,13 @@ setupcuda: &setupcuda
|
|||||||
working_directory: ~/
|
working_directory: ~/
|
||||||
command: |
|
command: |
|
||||||
# download and install nvidia drivers, cuda, etc
|
# download and install nvidia drivers, cuda, etc
|
||||||
wget --no-verbose --no-clobber -P ~/nvidia-downloads https://developer.download.nvidia.com/compute/cuda/11.2.2/local_installers/cuda_11.2.2_460.32.03_linux.run
|
wget --no-verbose --no-clobber -P ~/nvidia-downloads https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.19.01_linux.run
|
||||||
sudo sh ~/nvidia-downloads/cuda_11.2.2_460.32.03_linux.run --silent
|
sudo sh ~/nvidia-downloads/cuda_11.3.1_465.19.01_linux.run --silent
|
||||||
echo "Done installing CUDA."
|
echo "Done installing CUDA."
|
||||||
pyenv versions
|
pyenv versions
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
pyenv global 3.9.1
|
pyenv global 3.9.1
|
||||||
|
|
||||||
gpu: &gpu
|
|
||||||
environment:
|
|
||||||
CUDA_VERSION: "10.2"
|
|
||||||
machine:
|
|
||||||
image: default
|
|
||||||
resource_class: gpu.medium # tesla m60
|
|
||||||
|
|
||||||
binary_common: &binary_common
|
binary_common: &binary_common
|
||||||
parameters:
|
parameters:
|
||||||
# Edit these defaults to do a release`
|
# Edit these defaults to do a release`
|
||||||
@@ -54,42 +47,41 @@ binary_common: &binary_common
|
|||||||
description: "Wheel only: what docker image to use"
|
description: "Wheel only: what docker image to use"
|
||||||
type: string
|
type: string
|
||||||
default: "pytorch/manylinux-cuda101"
|
default: "pytorch/manylinux-cuda101"
|
||||||
|
conda_docker_image:
|
||||||
|
description: "what docker image to use for docker"
|
||||||
|
type: string
|
||||||
|
default: "pytorch/conda-cuda"
|
||||||
environment:
|
environment:
|
||||||
PYTHON_VERSION: << parameters.python_version >>
|
PYTHON_VERSION: << parameters.python_version >>
|
||||||
BUILD_VERSION: << parameters.build_version >>
|
BUILD_VERSION: << parameters.build_version >>
|
||||||
PYTORCH_VERSION: << parameters.pytorch_version >>
|
PYTORCH_VERSION: << parameters.pytorch_version >>
|
||||||
CU_VERSION: << parameters.cu_version >>
|
CU_VERSION: << parameters.cu_version >>
|
||||||
|
TESTRUN_DOCKER_IMAGE: << parameters.conda_docker_image >>
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
main:
|
main:
|
||||||
<<: *gpu
|
environment:
|
||||||
|
CUDA_VERSION: "11.3"
|
||||||
|
resource_class: gpu.nvidia.small.multi
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202101-01
|
image: ubuntu-2004:202101-01
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- <<: *setupcuda
|
- <<: *setupcuda
|
||||||
- run: pip3 install --progress-bar off imageio wheel matplotlib 'pillow<7'
|
- run: pip3 install --progress-bar off imageio wheel matplotlib 'pillow<7'
|
||||||
- run: pip3 install --progress-bar off torch torchvision
|
- run: pip3 install --progress-bar off torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
|
||||||
# - run: conda create -p ~/conda_env python=3.7 numpy
|
# - run: conda create -p ~/conda_env python=3.7 numpy
|
||||||
# - run: conda activate ~/conda_env
|
# - run: conda activate ~/conda_env
|
||||||
# - run: conda install -c pytorch pytorch torchvision
|
# - run: conda install -c pytorch pytorch torchvision
|
||||||
|
|
||||||
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
|
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
|
||||||
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/iopath'
|
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/iopath'
|
||||||
- run:
|
|
||||||
name: get cub
|
|
||||||
command: |
|
|
||||||
cd ..
|
|
||||||
wget --no-verbose https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
|
||||||
tar xzf 1.10.0.tar.gz
|
|
||||||
# This expands to a directory called cub-1.10.0
|
|
||||||
- run:
|
- run:
|
||||||
name: build
|
name: build
|
||||||
command: |
|
command: |
|
||||||
export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.2/lib64
|
export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64
|
||||||
export CUB_HOME=$(realpath ../cub-1.10.0)
|
|
||||||
python3 setup.py build_ext --inplace
|
python3 setup.py build_ext --inplace
|
||||||
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.2/lib64 python -m unittest discover -v -s tests
|
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64 python -m unittest discover -v -s tests -t .
|
||||||
- run: python3 setup.py bdist_wheel
|
- run: python3 setup.py bdist_wheel
|
||||||
|
|
||||||
binary_linux_wheel:
|
binary_linux_wheel:
|
||||||
@@ -113,7 +105,7 @@ jobs:
|
|||||||
binary_linux_conda:
|
binary_linux_conda:
|
||||||
<<: *binary_common
|
<<: *binary_common
|
||||||
docker:
|
docker:
|
||||||
- image: "pytorch/conda-cuda"
|
- image: "<< parameters.conda_docker_image >>"
|
||||||
auth:
|
auth:
|
||||||
username: $DOCKERHUB_USERNAME
|
username: $DOCKERHUB_USERNAME
|
||||||
password: $DOCKERHUB_TOKEN
|
password: $DOCKERHUB_TOKEN
|
||||||
@@ -136,62 +128,21 @@ jobs:
|
|||||||
binary_linux_conda_cuda:
|
binary_linux_conda_cuda:
|
||||||
<<: *binary_common
|
<<: *binary_common
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-1604:201903-01
|
image: ubuntu-1604-cuda-10.2:202012-01
|
||||||
resource_class: gpu.medium
|
resource_class: gpu.nvidia.small.multi
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
|
||||||
name: Setup environment
|
|
||||||
command: |
|
|
||||||
set -e
|
|
||||||
|
|
||||||
curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
|
|
||||||
curl -L https://dl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
|
|
||||||
|
|
||||||
sudo apt-get update
|
|
||||||
|
|
||||||
sudo apt-get install \
|
|
||||||
apt-transport-https \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
gnupg-agent \
|
|
||||||
software-properties-common
|
|
||||||
|
|
||||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
|
||||||
|
|
||||||
sudo add-apt-repository \
|
|
||||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
|
||||||
$(lsb_release -cs) \
|
|
||||||
stable"
|
|
||||||
|
|
||||||
sudo apt-get update
|
|
||||||
export DOCKER_VERSION="5:19.03.2~3-0~ubuntu-xenial"
|
|
||||||
sudo apt-get install docker-ce=${DOCKER_VERSION} docker-ce-cli=${DOCKER_VERSION} containerd.io=1.2.6-3
|
|
||||||
|
|
||||||
# Add the package repositories
|
|
||||||
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
|
|
||||||
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
|
|
||||||
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
|
|
||||||
|
|
||||||
export NVIDIA_CONTAINER_VERSION="1.0.3-1"
|
|
||||||
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit=${NVIDIA_CONTAINER_VERSION}
|
|
||||||
sudo systemctl restart docker
|
|
||||||
|
|
||||||
DRIVER_FN="NVIDIA-Linux-x86_64-460.84.run"
|
|
||||||
wget "https://us.download.nvidia.com/XFree86/Linux-x86_64/460.84/$DRIVER_FN"
|
|
||||||
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
|
|
||||||
nvidia-smi
|
|
||||||
|
|
||||||
- run:
|
- run:
|
||||||
name: Pull docker image
|
name: Pull docker image
|
||||||
command: |
|
command: |
|
||||||
|
nvidia-smi
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
{ docker login -u="$DOCKERHUB_USERNAME" -p="$DOCKERHUB_TOKEN" ; } 2> /dev/null
|
{ docker login -u="$DOCKERHUB_USERNAME" -p="$DOCKERHUB_TOKEN" ; } 2> /dev/null
|
||||||
|
|
||||||
DOCKER_IMAGE=pytorch/conda-cuda
|
echo Pulling docker image $TESTRUN_DOCKER_IMAGE
|
||||||
echo Pulling docker image $DOCKER_IMAGE
|
docker pull $TESTRUN_DOCKER_IMAGE
|
||||||
docker pull $DOCKER_IMAGE
|
|
||||||
- run:
|
- run:
|
||||||
name: Build and run tests
|
name: Build and run tests
|
||||||
no_output_timeout: 20m
|
no_output_timeout: 20m
|
||||||
@@ -200,11 +151,10 @@ jobs:
|
|||||||
|
|
||||||
cd ${HOME}/project/
|
cd ${HOME}/project/
|
||||||
|
|
||||||
DOCKER_IMAGE=pytorch/conda-cuda
|
|
||||||
export JUST_TESTRUN=1
|
export JUST_TESTRUN=1
|
||||||
VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION -e JUST_TESTRUN"
|
VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION -e JUST_TESTRUN"
|
||||||
|
|
||||||
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${DOCKER_IMAGE} ./packaging/build_conda.sh
|
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${TESTRUN_DOCKER_IMAGE} ./packaging/build_conda.sh
|
||||||
|
|
||||||
binary_macos_wheel:
|
binary_macos_wheel:
|
||||||
<<: *binary_common
|
<<: *binary_common
|
||||||
@@ -228,50 +178,27 @@ workflows:
|
|||||||
version: 2
|
version: 2
|
||||||
build_and_test:
|
build_and_test:
|
||||||
jobs:
|
jobs:
|
||||||
- main:
|
# - main:
|
||||||
context: DOCKERHUB_TOKEN
|
# context: DOCKERHUB_TOKEN
|
||||||
{{workflows()}}
|
{{workflows()}}
|
||||||
- binary_linux_conda_cuda:
|
|
||||||
name: testrun_conda_cuda_py36_cu101_pyt14
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
python_version: "3.6"
|
|
||||||
pytorch_version: "1.4"
|
|
||||||
cu_version: "cu101"
|
|
||||||
- binary_linux_conda_cuda:
|
- binary_linux_conda_cuda:
|
||||||
name: testrun_conda_cuda_py37_cu102_pyt190
|
name: testrun_conda_cuda_py37_cu102_pyt190
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
python_version: "3.7"
|
python_version: "3.7"
|
||||||
pytorch_version: '1.9.0'
|
pytorch_version: '1.9.0'
|
||||||
cu_version: "cu102"
|
cu_version: "cu102"
|
||||||
- binary_linux_conda_cuda:
|
|
||||||
name: testrun_conda_cuda_py37_cu110_pyt170
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
python_version: "3.7"
|
|
||||||
pytorch_version: '1.7.0'
|
|
||||||
cu_version: "cu110"
|
|
||||||
- binary_linux_conda_cuda:
|
|
||||||
name: testrun_conda_cuda_py39_cu111_pyt181
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
python_version: "3.9"
|
|
||||||
pytorch_version: '1.8.1'
|
|
||||||
cu_version: "cu111"
|
|
||||||
- binary_macos_wheel:
|
|
||||||
cu_version: cpu
|
|
||||||
name: macos_wheel_py36_cpu
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: '1.9.0'
|
|
||||||
- binary_macos_wheel:
|
- binary_macos_wheel:
|
||||||
cu_version: cpu
|
cu_version: cpu
|
||||||
name: macos_wheel_py37_cpu
|
name: macos_wheel_py37_cpu
|
||||||
python_version: '3.7'
|
python_version: '3.7'
|
||||||
pytorch_version: '1.9.0'
|
pytorch_version: '1.12.0'
|
||||||
- binary_macos_wheel:
|
- binary_macos_wheel:
|
||||||
cu_version: cpu
|
cu_version: cpu
|
||||||
name: macos_wheel_py38_cpu
|
name: macos_wheel_py38_cpu
|
||||||
python_version: '3.8'
|
python_version: '3.8'
|
||||||
pytorch_version: '1.9.0'
|
pytorch_version: '1.12.0'
|
||||||
- binary_macos_wheel:
|
- binary_macos_wheel:
|
||||||
cu_version: cpu
|
cu_version: cpu
|
||||||
name: macos_wheel_py39_cpu
|
name: macos_wheel_py39_cpu
|
||||||
python_version: '3.9'
|
python_version: '3.9'
|
||||||
pytorch_version: '1.9.0'
|
pytorch_version: '1.12.0'
|
||||||
|
|||||||
@@ -18,20 +18,13 @@ setupcuda: &setupcuda
|
|||||||
working_directory: ~/
|
working_directory: ~/
|
||||||
command: |
|
command: |
|
||||||
# download and install nvidia drivers, cuda, etc
|
# download and install nvidia drivers, cuda, etc
|
||||||
wget --no-verbose --no-clobber -P ~/nvidia-downloads https://developer.download.nvidia.com/compute/cuda/11.2.2/local_installers/cuda_11.2.2_460.32.03_linux.run
|
wget --no-verbose --no-clobber -P ~/nvidia-downloads https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.19.01_linux.run
|
||||||
sudo sh ~/nvidia-downloads/cuda_11.2.2_460.32.03_linux.run --silent
|
sudo sh ~/nvidia-downloads/cuda_11.3.1_465.19.01_linux.run --silent
|
||||||
echo "Done installing CUDA."
|
echo "Done installing CUDA."
|
||||||
pyenv versions
|
pyenv versions
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
pyenv global 3.9.1
|
pyenv global 3.9.1
|
||||||
|
|
||||||
gpu: &gpu
|
|
||||||
environment:
|
|
||||||
CUDA_VERSION: "10.2"
|
|
||||||
machine:
|
|
||||||
image: default
|
|
||||||
resource_class: gpu.medium # tesla m60
|
|
||||||
|
|
||||||
binary_common: &binary_common
|
binary_common: &binary_common
|
||||||
parameters:
|
parameters:
|
||||||
# Edit these defaults to do a release`
|
# Edit these defaults to do a release`
|
||||||
@@ -54,42 +47,41 @@ binary_common: &binary_common
|
|||||||
description: "Wheel only: what docker image to use"
|
description: "Wheel only: what docker image to use"
|
||||||
type: string
|
type: string
|
||||||
default: "pytorch/manylinux-cuda101"
|
default: "pytorch/manylinux-cuda101"
|
||||||
|
conda_docker_image:
|
||||||
|
description: "what docker image to use for docker"
|
||||||
|
type: string
|
||||||
|
default: "pytorch/conda-cuda"
|
||||||
environment:
|
environment:
|
||||||
PYTHON_VERSION: << parameters.python_version >>
|
PYTHON_VERSION: << parameters.python_version >>
|
||||||
BUILD_VERSION: << parameters.build_version >>
|
BUILD_VERSION: << parameters.build_version >>
|
||||||
PYTORCH_VERSION: << parameters.pytorch_version >>
|
PYTORCH_VERSION: << parameters.pytorch_version >>
|
||||||
CU_VERSION: << parameters.cu_version >>
|
CU_VERSION: << parameters.cu_version >>
|
||||||
|
TESTRUN_DOCKER_IMAGE: << parameters.conda_docker_image >>
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
main:
|
main:
|
||||||
<<: *gpu
|
environment:
|
||||||
|
CUDA_VERSION: "11.3"
|
||||||
|
resource_class: gpu.nvidia.small.multi
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202101-01
|
image: ubuntu-2004:202101-01
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- <<: *setupcuda
|
- <<: *setupcuda
|
||||||
- run: pip3 install --progress-bar off imageio wheel matplotlib 'pillow<7'
|
- run: pip3 install --progress-bar off imageio wheel matplotlib 'pillow<7'
|
||||||
- run: pip3 install --progress-bar off torch torchvision
|
- run: pip3 install --progress-bar off torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
|
||||||
# - run: conda create -p ~/conda_env python=3.7 numpy
|
# - run: conda create -p ~/conda_env python=3.7 numpy
|
||||||
# - run: conda activate ~/conda_env
|
# - run: conda activate ~/conda_env
|
||||||
# - run: conda install -c pytorch pytorch torchvision
|
# - run: conda install -c pytorch pytorch torchvision
|
||||||
|
|
||||||
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
|
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
|
||||||
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/iopath'
|
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/iopath'
|
||||||
- run:
|
|
||||||
name: get cub
|
|
||||||
command: |
|
|
||||||
cd ..
|
|
||||||
wget --no-verbose https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
|
||||||
tar xzf 1.10.0.tar.gz
|
|
||||||
# This expands to a directory called cub-1.10.0
|
|
||||||
- run:
|
- run:
|
||||||
name: build
|
name: build
|
||||||
command: |
|
command: |
|
||||||
export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.2/lib64
|
export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64
|
||||||
export CUB_HOME=$(realpath ../cub-1.10.0)
|
|
||||||
python3 setup.py build_ext --inplace
|
python3 setup.py build_ext --inplace
|
||||||
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.2/lib64 python -m unittest discover -v -s tests
|
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64 python -m unittest discover -v -s tests -t .
|
||||||
- run: python3 setup.py bdist_wheel
|
- run: python3 setup.py bdist_wheel
|
||||||
|
|
||||||
binary_linux_wheel:
|
binary_linux_wheel:
|
||||||
@@ -113,7 +105,7 @@ jobs:
|
|||||||
binary_linux_conda:
|
binary_linux_conda:
|
||||||
<<: *binary_common
|
<<: *binary_common
|
||||||
docker:
|
docker:
|
||||||
- image: "pytorch/conda-cuda"
|
- image: "<< parameters.conda_docker_image >>"
|
||||||
auth:
|
auth:
|
||||||
username: $DOCKERHUB_USERNAME
|
username: $DOCKERHUB_USERNAME
|
||||||
password: $DOCKERHUB_TOKEN
|
password: $DOCKERHUB_TOKEN
|
||||||
@@ -136,62 +128,21 @@ jobs:
|
|||||||
binary_linux_conda_cuda:
|
binary_linux_conda_cuda:
|
||||||
<<: *binary_common
|
<<: *binary_common
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-1604:201903-01
|
image: ubuntu-1604-cuda-10.2:202012-01
|
||||||
resource_class: gpu.medium
|
resource_class: gpu.nvidia.small.multi
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
|
||||||
name: Setup environment
|
|
||||||
command: |
|
|
||||||
set -e
|
|
||||||
|
|
||||||
curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
|
|
||||||
curl -L https://dl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
|
|
||||||
|
|
||||||
sudo apt-get update
|
|
||||||
|
|
||||||
sudo apt-get install \
|
|
||||||
apt-transport-https \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
gnupg-agent \
|
|
||||||
software-properties-common
|
|
||||||
|
|
||||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
|
||||||
|
|
||||||
sudo add-apt-repository \
|
|
||||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
|
||||||
$(lsb_release -cs) \
|
|
||||||
stable"
|
|
||||||
|
|
||||||
sudo apt-get update
|
|
||||||
export DOCKER_VERSION="5:19.03.2~3-0~ubuntu-xenial"
|
|
||||||
sudo apt-get install docker-ce=${DOCKER_VERSION} docker-ce-cli=${DOCKER_VERSION} containerd.io=1.2.6-3
|
|
||||||
|
|
||||||
# Add the package repositories
|
|
||||||
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
|
|
||||||
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
|
|
||||||
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
|
|
||||||
|
|
||||||
export NVIDIA_CONTAINER_VERSION="1.0.3-1"
|
|
||||||
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit=${NVIDIA_CONTAINER_VERSION}
|
|
||||||
sudo systemctl restart docker
|
|
||||||
|
|
||||||
DRIVER_FN="NVIDIA-Linux-x86_64-460.84.run"
|
|
||||||
wget "https://us.download.nvidia.com/XFree86/Linux-x86_64/460.84/$DRIVER_FN"
|
|
||||||
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
|
|
||||||
nvidia-smi
|
|
||||||
|
|
||||||
- run:
|
- run:
|
||||||
name: Pull docker image
|
name: Pull docker image
|
||||||
command: |
|
command: |
|
||||||
|
nvidia-smi
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
{ docker login -u="$DOCKERHUB_USERNAME" -p="$DOCKERHUB_TOKEN" ; } 2> /dev/null
|
{ docker login -u="$DOCKERHUB_USERNAME" -p="$DOCKERHUB_TOKEN" ; } 2> /dev/null
|
||||||
|
|
||||||
DOCKER_IMAGE=pytorch/conda-cuda
|
echo Pulling docker image $TESTRUN_DOCKER_IMAGE
|
||||||
echo Pulling docker image $DOCKER_IMAGE
|
docker pull $TESTRUN_DOCKER_IMAGE
|
||||||
docker pull $DOCKER_IMAGE
|
|
||||||
- run:
|
- run:
|
||||||
name: Build and run tests
|
name: Build and run tests
|
||||||
no_output_timeout: 20m
|
no_output_timeout: 20m
|
||||||
@@ -200,11 +151,10 @@ jobs:
|
|||||||
|
|
||||||
cd ${HOME}/project/
|
cd ${HOME}/project/
|
||||||
|
|
||||||
DOCKER_IMAGE=pytorch/conda-cuda
|
|
||||||
export JUST_TESTRUN=1
|
export JUST_TESTRUN=1
|
||||||
VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION -e JUST_TESTRUN"
|
VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION -e JUST_TESTRUN"
|
||||||
|
|
||||||
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${DOCKER_IMAGE} ./packaging/build_conda.sh
|
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${TESTRUN_DOCKER_IMAGE} ./packaging/build_conda.sh
|
||||||
|
|
||||||
binary_macos_wheel:
|
binary_macos_wheel:
|
||||||
<<: *binary_common
|
<<: *binary_common
|
||||||
@@ -228,260 +178,8 @@ workflows:
|
|||||||
version: 2
|
version: 2
|
||||||
build_and_test:
|
build_and_test:
|
||||||
jobs:
|
jobs:
|
||||||
- main:
|
# - main:
|
||||||
context: DOCKERHUB_TOKEN
|
# context: DOCKERHUB_TOKEN
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu92
|
|
||||||
name: linux_conda_py36_cu92_pyt14
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: '1.4'
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py36_cu101_pyt14
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: '1.4'
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu92
|
|
||||||
name: linux_conda_py36_cu92_pyt150
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.5.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py36_cu101_pyt150
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.5.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu102
|
|
||||||
name: linux_conda_py36_cu102_pyt150
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.5.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu92
|
|
||||||
name: linux_conda_py36_cu92_pyt151
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.5.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py36_cu101_pyt151
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.5.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu102
|
|
||||||
name: linux_conda_py36_cu102_pyt151
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.5.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu92
|
|
||||||
name: linux_conda_py36_cu92_pyt160
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.6.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py36_cu101_pyt160
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.6.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu102
|
|
||||||
name: linux_conda_py36_cu102_pyt160
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.6.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py36_cu101_pyt170
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.7.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu102
|
|
||||||
name: linux_conda_py36_cu102_pyt170
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.7.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu110
|
|
||||||
name: linux_conda_py36_cu110_pyt170
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.7.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py36_cu101_pyt171
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.7.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu102
|
|
||||||
name: linux_conda_py36_cu102_pyt171
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.7.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu110
|
|
||||||
name: linux_conda_py36_cu110_pyt171
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.7.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py36_cu101_pyt180
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.8.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu102
|
|
||||||
name: linux_conda_py36_cu102_pyt180
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.8.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu111
|
|
||||||
name: linux_conda_py36_cu111_pyt180
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.8.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py36_cu101_pyt181
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.8.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu102
|
|
||||||
name: linux_conda_py36_cu102_pyt181
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.8.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu111
|
|
||||||
name: linux_conda_py36_cu111_pyt181
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.8.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu102
|
|
||||||
name: linux_conda_py36_cu102_pyt190
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.9.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu111
|
|
||||||
name: linux_conda_py36_cu111_pyt190
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: 1.9.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu92
|
|
||||||
name: linux_conda_py37_cu92_pyt14
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: '1.4'
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py37_cu101_pyt14
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: '1.4'
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu92
|
|
||||||
name: linux_conda_py37_cu92_pyt150
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.5.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py37_cu101_pyt150
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.5.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu102
|
|
||||||
name: linux_conda_py37_cu102_pyt150
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.5.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu92
|
|
||||||
name: linux_conda_py37_cu92_pyt151
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.5.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py37_cu101_pyt151
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.5.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu102
|
|
||||||
name: linux_conda_py37_cu102_pyt151
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.5.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu92
|
|
||||||
name: linux_conda_py37_cu92_pyt160
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.6.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py37_cu101_pyt160
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.6.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu102
|
|
||||||
name: linux_conda_py37_cu102_pyt160
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.6.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py37_cu101_pyt170
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.7.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu102
|
|
||||||
name: linux_conda_py37_cu102_pyt170
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.7.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu110
|
|
||||||
name: linux_conda_py37_cu110_pyt170
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.7.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py37_cu101_pyt171
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.7.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu102
|
|
||||||
name: linux_conda_py37_cu102_pyt171
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.7.1
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu110
|
|
||||||
name: linux_conda_py37_cu110_pyt171
|
|
||||||
python_version: '3.7'
|
|
||||||
pytorch_version: 1.7.1
|
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu101
|
cu_version: cu101
|
||||||
@@ -532,106 +230,119 @@ workflows:
|
|||||||
pytorch_version: 1.9.0
|
pytorch_version: 1.9.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu92
|
cu_version: cu102
|
||||||
name: linux_conda_py38_cu92_pyt14
|
name: linux_conda_py37_cu102_pyt191
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: '1.4'
|
pytorch_version: 1.9.1
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu101
|
cu_version: cu111
|
||||||
name: linux_conda_py38_cu101_pyt14
|
name: linux_conda_py37_cu111_pyt191
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: '1.4'
|
pytorch_version: 1.9.1
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu92
|
|
||||||
name: linux_conda_py38_cu92_pyt150
|
|
||||||
python_version: '3.8'
|
|
||||||
pytorch_version: 1.5.0
|
|
||||||
- binary_linux_conda:
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
cu_version: cu101
|
|
||||||
name: linux_conda_py38_cu101_pyt150
|
|
||||||
python_version: '3.8'
|
|
||||||
pytorch_version: 1.5.0
|
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu102
|
cu_version: cu102
|
||||||
name: linux_conda_py38_cu102_pyt150
|
name: linux_conda_py37_cu102_pyt1100
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: 1.5.0
|
pytorch_version: 1.10.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu92
|
cu_version: cu111
|
||||||
name: linux_conda_py38_cu92_pyt151
|
name: linux_conda_py37_cu111_pyt1100
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: 1.5.1
|
pytorch_version: 1.10.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu101
|
cu_version: cu113
|
||||||
name: linux_conda_py38_cu101_pyt151
|
name: linux_conda_py37_cu113_pyt1100
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: 1.5.1
|
pytorch_version: 1.10.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu102
|
cu_version: cu102
|
||||||
name: linux_conda_py38_cu102_pyt151
|
name: linux_conda_py37_cu102_pyt1101
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: 1.5.1
|
pytorch_version: 1.10.1
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu92
|
cu_version: cu111
|
||||||
name: linux_conda_py38_cu92_pyt160
|
name: linux_conda_py37_cu111_pyt1101
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: 1.6.0
|
pytorch_version: 1.10.1
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu101
|
cu_version: cu113
|
||||||
name: linux_conda_py38_cu101_pyt160
|
name: linux_conda_py37_cu113_pyt1101
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: 1.6.0
|
pytorch_version: 1.10.1
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu102
|
cu_version: cu102
|
||||||
name: linux_conda_py38_cu102_pyt160
|
name: linux_conda_py37_cu102_pyt1102
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: 1.6.0
|
pytorch_version: 1.10.2
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu101
|
cu_version: cu111
|
||||||
name: linux_conda_py38_cu101_pyt170
|
name: linux_conda_py37_cu111_pyt1102
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: 1.7.0
|
pytorch_version: 1.10.2
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py37_cu113_pyt1102
|
||||||
|
python_version: '3.7'
|
||||||
|
pytorch_version: 1.10.2
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu102
|
cu_version: cu102
|
||||||
name: linux_conda_py38_cu102_pyt170
|
name: linux_conda_py37_cu102_pyt1110
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: 1.7.0
|
pytorch_version: 1.11.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu110
|
cu_version: cu111
|
||||||
name: linux_conda_py38_cu110_pyt170
|
name: linux_conda_py37_cu111_pyt1110
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: 1.7.0
|
pytorch_version: 1.11.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu101
|
cu_version: cu113
|
||||||
name: linux_conda_py38_cu101_pyt171
|
name: linux_conda_py37_cu113_pyt1110
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: 1.7.1
|
pytorch_version: 1.11.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda115
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu115
|
||||||
|
name: linux_conda_py37_cu115_pyt1110
|
||||||
|
python_version: '3.7'
|
||||||
|
pytorch_version: 1.11.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu102
|
cu_version: cu102
|
||||||
name: linux_conda_py38_cu102_pyt171
|
name: linux_conda_py37_cu102_pyt1120
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: 1.7.1
|
pytorch_version: 1.12.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu110
|
cu_version: cu113
|
||||||
name: linux_conda_py38_cu110_pyt171
|
name: linux_conda_py37_cu113_pyt1120
|
||||||
python_version: '3.8'
|
python_version: '3.7'
|
||||||
pytorch_version: 1.7.1
|
pytorch_version: 1.12.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu116
|
||||||
|
name: linux_conda_py37_cu116_pyt1120
|
||||||
|
python_version: '3.7'
|
||||||
|
pytorch_version: 1.12.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu101
|
cu_version: cu101
|
||||||
@@ -682,22 +393,119 @@ workflows:
|
|||||||
pytorch_version: 1.9.0
|
pytorch_version: 1.9.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu101
|
cu_version: cu102
|
||||||
name: linux_conda_py39_cu101_pyt171
|
name: linux_conda_py38_cu102_pyt191
|
||||||
python_version: '3.9'
|
python_version: '3.8'
|
||||||
pytorch_version: 1.7.1
|
pytorch_version: 1.9.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu111
|
||||||
|
name: linux_conda_py38_cu111_pyt191
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.9.1
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu102
|
cu_version: cu102
|
||||||
name: linux_conda_py39_cu102_pyt171
|
name: linux_conda_py38_cu102_pyt1100
|
||||||
python_version: '3.9'
|
python_version: '3.8'
|
||||||
pytorch_version: 1.7.1
|
pytorch_version: 1.10.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu110
|
cu_version: cu111
|
||||||
name: linux_conda_py39_cu110_pyt171
|
name: linux_conda_py38_cu111_pyt1100
|
||||||
python_version: '3.9'
|
python_version: '3.8'
|
||||||
pytorch_version: 1.7.1
|
pytorch_version: 1.10.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py38_cu113_pyt1100
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.10.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu102
|
||||||
|
name: linux_conda_py38_cu102_pyt1101
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.10.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu111
|
||||||
|
name: linux_conda_py38_cu111_pyt1101
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.10.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py38_cu113_pyt1101
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.10.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu102
|
||||||
|
name: linux_conda_py38_cu102_pyt1102
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.10.2
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu111
|
||||||
|
name: linux_conda_py38_cu111_pyt1102
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.10.2
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py38_cu113_pyt1102
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.10.2
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu102
|
||||||
|
name: linux_conda_py38_cu102_pyt1110
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.11.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu111
|
||||||
|
name: linux_conda_py38_cu111_pyt1110
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.11.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py38_cu113_pyt1110
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.11.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda115
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu115
|
||||||
|
name: linux_conda_py38_cu115_pyt1110
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.11.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu102
|
||||||
|
name: linux_conda_py38_cu102_pyt1120
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.12.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py38_cu113_pyt1120
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.12.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu116
|
||||||
|
name: linux_conda_py38_cu116_pyt1120
|
||||||
|
python_version: '3.8'
|
||||||
|
pytorch_version: 1.12.0
|
||||||
- binary_linux_conda:
|
- binary_linux_conda:
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
cu_version: cu101
|
cu_version: cu101
|
||||||
@@ -746,47 +554,185 @@ workflows:
|
|||||||
name: linux_conda_py39_cu111_pyt190
|
name: linux_conda_py39_cu111_pyt190
|
||||||
python_version: '3.9'
|
python_version: '3.9'
|
||||||
pytorch_version: 1.9.0
|
pytorch_version: 1.9.0
|
||||||
- binary_linux_conda_cuda:
|
- binary_linux_conda:
|
||||||
name: testrun_conda_cuda_py36_cu101_pyt14
|
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
python_version: "3.6"
|
cu_version: cu102
|
||||||
pytorch_version: "1.4"
|
name: linux_conda_py39_cu102_pyt191
|
||||||
cu_version: "cu101"
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.9.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu111
|
||||||
|
name: linux_conda_py39_cu111_pyt191
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.9.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu102
|
||||||
|
name: linux_conda_py39_cu102_pyt1100
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.10.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu111
|
||||||
|
name: linux_conda_py39_cu111_pyt1100
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.10.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py39_cu113_pyt1100
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.10.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu102
|
||||||
|
name: linux_conda_py39_cu102_pyt1101
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.10.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu111
|
||||||
|
name: linux_conda_py39_cu111_pyt1101
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.10.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py39_cu113_pyt1101
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.10.1
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu102
|
||||||
|
name: linux_conda_py39_cu102_pyt1102
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.10.2
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu111
|
||||||
|
name: linux_conda_py39_cu111_pyt1102
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.10.2
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py39_cu113_pyt1102
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.10.2
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu102
|
||||||
|
name: linux_conda_py39_cu102_pyt1110
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.11.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu111
|
||||||
|
name: linux_conda_py39_cu111_pyt1110
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.11.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py39_cu113_pyt1110
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.11.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda115
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu115
|
||||||
|
name: linux_conda_py39_cu115_pyt1110
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.11.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu102
|
||||||
|
name: linux_conda_py39_cu102_pyt1120
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.12.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py39_cu113_pyt1120
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.12.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu116
|
||||||
|
name: linux_conda_py39_cu116_pyt1120
|
||||||
|
python_version: '3.9'
|
||||||
|
pytorch_version: 1.12.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu102
|
||||||
|
name: linux_conda_py310_cu102_pyt1110
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 1.11.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu111
|
||||||
|
name: linux_conda_py310_cu111_pyt1110
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 1.11.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py310_cu113_pyt1110
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 1.11.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda115
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu115
|
||||||
|
name: linux_conda_py310_cu115_pyt1110
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 1.11.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu102
|
||||||
|
name: linux_conda_py310_cu102_pyt1120
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 1.12.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda113
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu113
|
||||||
|
name: linux_conda_py310_cu113_pyt1120
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 1.12.0
|
||||||
|
- binary_linux_conda:
|
||||||
|
conda_docker_image: pytorch/conda-builder:cuda116
|
||||||
|
context: DOCKERHUB_TOKEN
|
||||||
|
cu_version: cu116
|
||||||
|
name: linux_conda_py310_cu116_pyt1120
|
||||||
|
python_version: '3.10'
|
||||||
|
pytorch_version: 1.12.0
|
||||||
- binary_linux_conda_cuda:
|
- binary_linux_conda_cuda:
|
||||||
name: testrun_conda_cuda_py37_cu102_pyt190
|
name: testrun_conda_cuda_py37_cu102_pyt190
|
||||||
context: DOCKERHUB_TOKEN
|
context: DOCKERHUB_TOKEN
|
||||||
python_version: "3.7"
|
python_version: "3.7"
|
||||||
pytorch_version: '1.9.0'
|
pytorch_version: '1.9.0'
|
||||||
cu_version: "cu102"
|
cu_version: "cu102"
|
||||||
- binary_linux_conda_cuda:
|
|
||||||
name: testrun_conda_cuda_py37_cu110_pyt170
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
python_version: "3.7"
|
|
||||||
pytorch_version: '1.7.0'
|
|
||||||
cu_version: "cu110"
|
|
||||||
- binary_linux_conda_cuda:
|
|
||||||
name: testrun_conda_cuda_py39_cu111_pyt181
|
|
||||||
context: DOCKERHUB_TOKEN
|
|
||||||
python_version: "3.9"
|
|
||||||
pytorch_version: '1.8.1'
|
|
||||||
cu_version: "cu111"
|
|
||||||
- binary_macos_wheel:
|
|
||||||
cu_version: cpu
|
|
||||||
name: macos_wheel_py36_cpu
|
|
||||||
python_version: '3.6'
|
|
||||||
pytorch_version: '1.9.0'
|
|
||||||
- binary_macos_wheel:
|
- binary_macos_wheel:
|
||||||
cu_version: cpu
|
cu_version: cpu
|
||||||
name: macos_wheel_py37_cpu
|
name: macos_wheel_py37_cpu
|
||||||
python_version: '3.7'
|
python_version: '3.7'
|
||||||
pytorch_version: '1.9.0'
|
pytorch_version: '1.12.0'
|
||||||
- binary_macos_wheel:
|
- binary_macos_wheel:
|
||||||
cu_version: cpu
|
cu_version: cpu
|
||||||
name: macos_wheel_py38_cpu
|
name: macos_wheel_py38_cpu
|
||||||
python_version: '3.8'
|
python_version: '3.8'
|
||||||
pytorch_version: '1.9.0'
|
pytorch_version: '1.12.0'
|
||||||
- binary_macos_wheel:
|
- binary_macos_wheel:
|
||||||
cu_version: cpu
|
cu_version: cpu
|
||||||
name: macos_wheel_py39_cpu
|
name: macos_wheel_py39_cpu
|
||||||
python_version: '3.9'
|
python_version: '3.9'
|
||||||
pytorch_version: '1.9.0'
|
pytorch_version: '1.12.0'
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
@@ -13,35 +13,58 @@ import os.path
|
|||||||
|
|
||||||
import jinja2
|
import jinja2
|
||||||
import yaml
|
import yaml
|
||||||
|
from packaging import version
|
||||||
|
|
||||||
|
|
||||||
# The CUDA versions which have pytorch conda packages available for linux for each
|
# The CUDA versions which have pytorch conda packages available for linux for each
|
||||||
# version of pytorch.
|
# version of pytorch.
|
||||||
# Pytorch 1.4 also supports cuda 10.0 but we no longer build for cuda 10.0 at all.
|
# Pytorch 1.4 also supports cuda 10.0 but we no longer build for cuda 10.0 at all.
|
||||||
CONDA_CUDA_VERSIONS = {
|
CONDA_CUDA_VERSIONS = {
|
||||||
"1.4": ["cu92", "cu101"],
|
|
||||||
"1.5.0": ["cu92", "cu101", "cu102"],
|
|
||||||
"1.5.1": ["cu92", "cu101", "cu102"],
|
|
||||||
"1.6.0": ["cu92", "cu101", "cu102"],
|
|
||||||
"1.7.0": ["cu101", "cu102", "cu110"],
|
|
||||||
"1.7.1": ["cu101", "cu102", "cu110"],
|
|
||||||
"1.8.0": ["cu101", "cu102", "cu111"],
|
"1.8.0": ["cu101", "cu102", "cu111"],
|
||||||
"1.8.1": ["cu101", "cu102", "cu111"],
|
"1.8.1": ["cu101", "cu102", "cu111"],
|
||||||
"1.9.0": ["cu102", "cu111"],
|
"1.9.0": ["cu102", "cu111"],
|
||||||
|
"1.9.1": ["cu102", "cu111"],
|
||||||
|
"1.10.0": ["cu102", "cu111", "cu113"],
|
||||||
|
"1.10.1": ["cu102", "cu111", "cu113"],
|
||||||
|
"1.10.2": ["cu102", "cu111", "cu113"],
|
||||||
|
"1.11.0": ["cu102", "cu111", "cu113", "cu115"],
|
||||||
|
"1.12.0": ["cu102", "cu113", "cu116"],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def conda_docker_image_for_cuda(cuda_version):
|
||||||
|
if cuda_version in ("cu101", "cu102", "cu111"):
|
||||||
|
return None
|
||||||
|
if cuda_version == "cu113":
|
||||||
|
return "pytorch/conda-builder:cuda113"
|
||||||
|
if cuda_version == "cu115":
|
||||||
|
return "pytorch/conda-builder:cuda115"
|
||||||
|
if cuda_version == "cu116":
|
||||||
|
return "pytorch/conda-builder:cuda116"
|
||||||
|
raise ValueError("Unknown cuda version")
|
||||||
|
|
||||||
|
|
||||||
def pytorch_versions_for_python(python_version):
|
def pytorch_versions_for_python(python_version):
|
||||||
if python_version in ["3.6", "3.7", "3.8"]:
|
if python_version in ["3.7", "3.8"]:
|
||||||
return list(CONDA_CUDA_VERSIONS)
|
return list(CONDA_CUDA_VERSIONS)
|
||||||
pytorch_without_py39 = ["1.4", "1.5.0", "1.5.1", "1.6.0", "1.7.0"]
|
if python_version == "3.9":
|
||||||
return [i for i in CONDA_CUDA_VERSIONS if i not in pytorch_without_py39]
|
return [
|
||||||
|
i
|
||||||
|
for i in CONDA_CUDA_VERSIONS
|
||||||
|
if version.Version(i) > version.Version("1.7.0")
|
||||||
|
]
|
||||||
|
if python_version == "3.10":
|
||||||
|
return [
|
||||||
|
i
|
||||||
|
for i in CONDA_CUDA_VERSIONS
|
||||||
|
if version.Version(i) >= version.Version("1.11.0")
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def workflows(prefix="", filter_branch=None, upload=False, indentation=6):
|
def workflows(prefix="", filter_branch=None, upload=False, indentation=6):
|
||||||
w = []
|
w = []
|
||||||
for btype in ["conda"]:
|
for btype in ["conda"]:
|
||||||
for python_version in ["3.6", "3.7", "3.8", "3.9"]:
|
for python_version in ["3.7", "3.8", "3.9", "3.10"]:
|
||||||
for pytorch_version in pytorch_versions_for_python(python_version):
|
for pytorch_version in pytorch_versions_for_python(python_version):
|
||||||
for cu_version in CONDA_CUDA_VERSIONS[pytorch_version]:
|
for cu_version in CONDA_CUDA_VERSIONS[pytorch_version]:
|
||||||
w += workflow_pair(
|
w += workflow_pair(
|
||||||
@@ -115,6 +138,10 @@ def generate_base_workflow(
|
|||||||
"context": "DOCKERHUB_TOKEN",
|
"context": "DOCKERHUB_TOKEN",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
conda_docker_image = conda_docker_image_for_cuda(cu_version)
|
||||||
|
if conda_docker_image is not None:
|
||||||
|
d["conda_docker_image"] = conda_docker_image
|
||||||
|
|
||||||
if filter_branch is not None:
|
if filter_branch is not None:
|
||||||
d["filters"] = {"branches": {"only": filter_branch}}
|
d["filters"] = {"branches": {"only": filter_branch}}
|
||||||
|
|
||||||
|
|||||||
2
.github/CONTRIBUTING.md
vendored
2
.github/CONTRIBUTING.md
vendored
@@ -46,7 +46,7 @@ outlined on that page and do not file a public issue.
|
|||||||
## Coding Style
|
## Coding Style
|
||||||
We follow these [python](http://google.github.io/styleguide/pyguide.html) and [C++](https://google.github.io/styleguide/cppguide.html) style guides.
|
We follow these [python](http://google.github.io/styleguide/pyguide.html) and [C++](https://google.github.io/styleguide/cppguide.html) style guides.
|
||||||
|
|
||||||
For the linter to work, you will need to install `black`, `flake`, `isort` and `clang-format`, and
|
For the linter to work, you will need to install `black`, `flake`, `usort` and `clang-format`, and
|
||||||
they need to be fairly up to date.
|
they need to be fairly up to date.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|||||||
31
INSTALL.md
31
INSTALL.md
@@ -9,7 +9,7 @@ The core library is written in PyTorch. Several components have underlying imple
|
|||||||
|
|
||||||
- Linux or macOS or Windows
|
- Linux or macOS or Windows
|
||||||
- Python 3.6, 3.7, 3.8 or 3.9
|
- Python 3.6, 3.7, 3.8 or 3.9
|
||||||
- PyTorch 1.4, 1.5.0, 1.5.1, 1.6.0, 1.7.0, 1.7.1, 1.8.0, 1.8.1 or 1.9.0.
|
- PyTorch 1.8.0, 1.8.1, 1.9.0, 1.9.1, 1.10.0, 1.10.1, 1.10.2, 1.11.0 or 1.12.0.
|
||||||
- torchvision that matches the PyTorch installation. You can install them together as explained at pytorch.org to make sure of this.
|
- torchvision that matches the PyTorch installation. You can install them together as explained at pytorch.org to make sure of this.
|
||||||
- gcc & g++ ≥ 4.9
|
- gcc & g++ ≥ 4.9
|
||||||
- [fvcore](https://github.com/facebookresearch/fvcore)
|
- [fvcore](https://github.com/facebookresearch/fvcore)
|
||||||
@@ -19,9 +19,9 @@ The core library is written in PyTorch. Several components have underlying imple
|
|||||||
|
|
||||||
The runtime dependencies can be installed by running:
|
The runtime dependencies can be installed by running:
|
||||||
```
|
```
|
||||||
conda create -n pytorch3d python=3.8
|
conda create -n pytorch3d python=3.9
|
||||||
conda activate pytorch3d
|
conda activate pytorch3d
|
||||||
conda install -c pytorch pytorch=1.7.1 torchvision cudatoolkit=10.2
|
conda install -c pytorch pytorch=1.9.1 torchvision cudatoolkit=10.2
|
||||||
conda install -c fvcore -c iopath -c conda-forge fvcore iopath
|
conda install -c fvcore -c iopath -c conda-forge fvcore iopath
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ export CUB_HOME=$PWD/cub-1.10.0
|
|||||||
For developing on top of PyTorch3D or contributing, you will need to run the linter and tests. If you want to run any of the notebook tutorials as `docs/tutorials` or the examples in `docs/examples` you will also need matplotlib and OpenCV.
|
For developing on top of PyTorch3D or contributing, you will need to run the linter and tests. If you want to run any of the notebook tutorials as `docs/tutorials` or the examples in `docs/examples` you will also need matplotlib and OpenCV.
|
||||||
- scikit-image
|
- scikit-image
|
||||||
- black
|
- black
|
||||||
- isort
|
- usort
|
||||||
- flake8
|
- flake8
|
||||||
- matplotlib
|
- matplotlib
|
||||||
- tdqm
|
- tdqm
|
||||||
@@ -59,7 +59,7 @@ conda install jupyter
|
|||||||
pip install scikit-image matplotlib imageio plotly opencv-python
|
pip install scikit-image matplotlib imageio plotly opencv-python
|
||||||
|
|
||||||
# Tests/Linting
|
# Tests/Linting
|
||||||
pip install black 'isort<5' flake8 flake8-bugbear flake8-comprehensions
|
pip install black usort flake8 flake8-bugbear flake8-comprehensions
|
||||||
```
|
```
|
||||||
|
|
||||||
## Installing prebuilt binaries for PyTorch3D
|
## Installing prebuilt binaries for PyTorch3D
|
||||||
@@ -78,30 +78,31 @@ Or, to install a nightly (non-official, alpha) build:
|
|||||||
conda install pytorch3d -c pytorch3d-nightly
|
conda install pytorch3d -c pytorch3d-nightly
|
||||||
```
|
```
|
||||||
### 2. Install from PyPI, on Mac only.
|
### 2. Install from PyPI, on Mac only.
|
||||||
This works with pytorch 1.9.0 only. The build is CPU only.
|
This works with pytorch 1.12.0 only. The build is CPU only.
|
||||||
```
|
```
|
||||||
pip install pytorch3d
|
pip install pytorch3d
|
||||||
```
|
```
|
||||||
|
|
||||||
### 3. Install wheels for Linux
|
### 3. Install wheels for Linux
|
||||||
We have prebuilt wheels with CUDA for Linux for PyTorch 1.9.0, for each of the CUDA versions that they support,
|
We have prebuilt wheels with CUDA for Linux for PyTorch 1.11.0, for each of the supported CUDA versions,
|
||||||
for Python 3.7, 3.8 and 3.9.
|
for Python 3.7, 3.8 and 3.9. This is for ease of use on Google Colab.
|
||||||
These are installed in a special way.
|
These are installed in a special way.
|
||||||
For example, to install for Python 3.8, PyTorch 1.9.0 and CUDA 10.2
|
For example, to install for Python 3.8, PyTorch 1.11.0 and CUDA 11.3
|
||||||
```
|
```
|
||||||
pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu102_pyt190/download.html
|
pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu113_pyt1110/download.html
|
||||||
```
|
```
|
||||||
|
|
||||||
In general, from inside IPython, or in Google Colab or a jupyter notebook, you can install with
|
In general, from inside IPython, or in Google Colab or a jupyter notebook, you can install with
|
||||||
```
|
```
|
||||||
import sys
|
import sys
|
||||||
import torch
|
import torch
|
||||||
|
pyt_version_str=torch.__version__.split("+")[0].replace(".", "")
|
||||||
version_str="".join([
|
version_str="".join([
|
||||||
f"py3{sys.version_info.minor}_cu",
|
f"py3{sys.version_info.minor}_cu",
|
||||||
torch.version.cuda.replace(".",""),
|
torch.version.cuda.replace(".",""),
|
||||||
f"_pyt{torch.__version__[0:5:2]}"
|
f"_pyt{pyt_version_str}"
|
||||||
])
|
])
|
||||||
!pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||||
```
|
```
|
||||||
|
|
||||||
## Building / installing from source.
|
## Building / installing from source.
|
||||||
@@ -146,10 +147,10 @@ After any necessary patching, you can go to "x64 Native Tools Command Prompt for
|
|||||||
cd pytorch3d
|
cd pytorch3d
|
||||||
python3 setup.py install
|
python3 setup.py install
|
||||||
```
|
```
|
||||||
After installing, verify whether all unit tests have passed
|
|
||||||
|
After installing, you can run **unit tests**
|
||||||
```
|
```
|
||||||
cd tests
|
python3 -m unittest discover -v -s tests -t .
|
||||||
python3 -m unittest discover -p *.py
|
|
||||||
```
|
```
|
||||||
|
|
||||||
# FAQ
|
# FAQ
|
||||||
|
|||||||
4
LICENSE
4
LICENSE
@@ -2,7 +2,7 @@ BSD License
|
|||||||
|
|
||||||
For PyTorch3D software
|
For PyTorch3D software
|
||||||
|
|
||||||
Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without modification,
|
Redistribution and use in source and binary forms, with or without modification,
|
||||||
are permitted provided that the following conditions are met:
|
are permitted provided that the following conditions are met:
|
||||||
@@ -14,7 +14,7 @@ are permitted provided that the following conditions are met:
|
|||||||
this list of conditions and the following disclaimer in the documentation
|
this list of conditions and the following disclaimer in the documentation
|
||||||
and/or other materials provided with the distribution.
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
* Neither the name Facebook nor the names of its contributors may be used to
|
* Neither the name Meta nor the names of its contributors may be used to
|
||||||
endorse or promote products derived from this software without specific
|
endorse or promote products derived from this software without specific
|
||||||
prior written permission.
|
prior written permission.
|
||||||
|
|
||||||
|
|||||||
71
LICENSE-3RD-PARTY
Normal file
71
LICENSE-3RD-PARTY
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
SRN license ( https://github.com/vsitzmann/scene-representation-networks/ ):
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2019 Vincent Sitzmann
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
||||||
|
|
||||||
|
IDR license ( github.com/lioryariv/idr ):
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2020 Lior Yariv
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
||||||
|
|
||||||
|
NeRF https://github.com/bmild/nerf/
|
||||||
|
|
||||||
|
Copyright (c) 2020 bmild
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
45
README.md
45
README.md
@@ -1,4 +1,4 @@
|
|||||||
<img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/.github/pytorch3dlogo.png" width="900"/>
|
<img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/pytorch3dlogo.png" width="900"/>
|
||||||
|
|
||||||
[](https://circleci.com/gh/facebookresearch/pytorch3d)
|
[](https://circleci.com/gh/facebookresearch/pytorch3d)
|
||||||
[](https://anaconda.org/pytorch3d/pytorch3d)
|
[](https://anaconda.org/pytorch3d/pytorch3d)
|
||||||
@@ -35,25 +35,25 @@ PyTorch3D is released under the [BSD License](LICENSE).
|
|||||||
|
|
||||||
Get started with PyTorch3D by trying one of the tutorial notebooks.
|
Get started with PyTorch3D by trying one of the tutorial notebooks.
|
||||||
|
|
||||||
|<img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/.github/dolphin_deform.gif" width="310"/>|<img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/.github/bundle_adjust.gif" width="310"/>|
|
|<img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/dolphin_deform.gif" width="310"/>|<img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/bundle_adjust.gif" width="310"/>|
|
||||||
|:-----------------------------------------------------------------------------------------------------------:|:--------------------------------------------------:|
|
|:-----------------------------------------------------------------------------------------------------------:|:--------------------------------------------------:|
|
||||||
| [Deform a sphere mesh to dolphin](https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/deform_source_mesh_to_target_mesh.ipynb)| [Bundle adjustment](https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/bundle_adjustment.ipynb) |
|
| [Deform a sphere mesh to dolphin](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/deform_source_mesh_to_target_mesh.ipynb)| [Bundle adjustment](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/bundle_adjustment.ipynb) |
|
||||||
|
|
||||||
| <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/.github/render_textured_mesh.gif" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/.github/camera_position_teapot.gif" width="310" height="310"/>
|
| <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/render_textured_mesh.gif" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/camera_position_teapot.gif" width="310" height="310"/>
|
||||||
|:------------------------------------------------------------:|:--------------------------------------------------:|
|
|:------------------------------------------------------------:|:--------------------------------------------------:|
|
||||||
| [Render textured meshes](https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/render_textured_meshes.ipynb)| [Camera position optimization](https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/camera_position_optimization_with_differentiable_rendering.ipynb)|
|
| [Render textured meshes](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/render_textured_meshes.ipynb)| [Camera position optimization](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/camera_position_optimization_with_differentiable_rendering.ipynb)|
|
||||||
|
|
||||||
| <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/.github/pointcloud_render.png" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/.github/cow_deform.gif" width="310" height="310"/>
|
| <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/pointcloud_render.png" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/cow_deform.gif" width="310" height="310"/>
|
||||||
|:------------------------------------------------------------:|:--------------------------------------------------:|
|
|:------------------------------------------------------------:|:--------------------------------------------------:|
|
||||||
| [Render textured pointclouds](https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/render_colored_points.ipynb)| [Fit a mesh with texture](https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/fit_textured_mesh.ipynb)|
|
| [Render textured pointclouds](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/render_colored_points.ipynb)| [Fit a mesh with texture](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/fit_textured_mesh.ipynb)|
|
||||||
|
|
||||||
| <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/.github/densepose_render.png" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/.github/shapenet_render.png" width="310" height="310"/>
|
| <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/densepose_render.png" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/shapenet_render.png" width="310" height="310"/>
|
||||||
|:------------------------------------------------------------:|:--------------------------------------------------:|
|
|:------------------------------------------------------------:|:--------------------------------------------------:|
|
||||||
| [Render DensePose data](https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/render_densepose.ipynb)| [Load & Render ShapeNet data](https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/dataloaders_ShapeNetCore_R2N2.ipynb)|
|
| [Render DensePose data](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/render_densepose.ipynb)| [Load & Render ShapeNet data](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/dataloaders_ShapeNetCore_R2N2.ipynb)|
|
||||||
|
|
||||||
| <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/.github/fit_textured_volume.gif" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/.github/fit_nerf.gif" width="310" height="310"/>
|
| <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/fit_textured_volume.gif" width="310"/> | <img src="https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/.github/fit_nerf.gif" width="310" height="310"/>
|
||||||
|:------------------------------------------------------------:|:--------------------------------------------------:|
|
|:------------------------------------------------------------:|:--------------------------------------------------:|
|
||||||
| [Fit Textured Volume](https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/fit_textured_volume.ipynb)| [Fit A Simple Neural Radiance Field](https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/fit_simple_neural_radiance_field.ipynb)|
|
| [Fit Textured Volume](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/fit_textured_volume.ipynb)| [Fit A Simple Neural Radiance Field](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/fit_simple_neural_radiance_field.ipynb)|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -64,9 +64,9 @@ Learn more about the API by reading the PyTorch3D [documentation](https://pytorc
|
|||||||
|
|
||||||
We also have deep dive notes on several API components:
|
We also have deep dive notes on several API components:
|
||||||
|
|
||||||
- [Heterogeneous Batching](https://github.com/facebookresearch/pytorch3d/tree/master/docs/notes/batching.md)
|
- [Heterogeneous Batching](https://github.com/facebookresearch/pytorch3d/tree/main/docs/notes/batching.md)
|
||||||
- [Mesh IO](https://github.com/facebookresearch/pytorch3d/tree/master/docs/notes/meshes_io.md)
|
- [Mesh IO](https://github.com/facebookresearch/pytorch3d/tree/main/docs/notes/meshes_io.md)
|
||||||
- [Differentiable Rendering](https://github.com/facebookresearch/pytorch3d/tree/master/docs/notes/renderer_getting_started.md)
|
- [Differentiable Rendering](https://github.com/facebookresearch/pytorch3d/tree/main/docs/notes/renderer_getting_started.md)
|
||||||
|
|
||||||
### Overview Video
|
### Overview Video
|
||||||
|
|
||||||
@@ -78,6 +78,13 @@ We have created a short (~14 min) video tutorial providing an overview of the Py
|
|||||||
|
|
||||||
We welcome new contributions to PyTorch3D and we will be actively maintaining this library! Please refer to [CONTRIBUTING.md](./.github/CONTRIBUTING.md) for full instructions on how to run the code, tests and linter, and submit your pull requests.
|
We welcome new contributions to PyTorch3D and we will be actively maintaining this library! Please refer to [CONTRIBUTING.md](./.github/CONTRIBUTING.md) for full instructions on how to run the code, tests and linter, and submit your pull requests.
|
||||||
|
|
||||||
|
## Development and Compatibility
|
||||||
|
|
||||||
|
- `main` branch: actively developed, without any guarantee, Anything can be broken at any time
|
||||||
|
- REMARK: this includes nightly builds which are built from `main`
|
||||||
|
- HINT: the commit history can help locate regressions or changes
|
||||||
|
- backward-compatibility between releases: no guarantee. Best efforts to communicate breaking changes and facilitate migration of code or data (incl. models).
|
||||||
|
|
||||||
## Contributors
|
## Contributors
|
||||||
|
|
||||||
PyTorch3D is written and maintained by the Facebook AI Research Computer Vision Team.
|
PyTorch3D is written and maintained by the Facebook AI Research Computer Vision Team.
|
||||||
@@ -90,7 +97,7 @@ In alphabetical order:
|
|||||||
* Georgia Gkioxari
|
* Georgia Gkioxari
|
||||||
* Taylor Gordon
|
* Taylor Gordon
|
||||||
* Justin Johnson
|
* Justin Johnson
|
||||||
* Patrick Labtut
|
* Patrick Labatut
|
||||||
* Christoph Lassner
|
* Christoph Lassner
|
||||||
* Wan-Yen Lo
|
* Wan-Yen Lo
|
||||||
* David Novotny
|
* David Novotny
|
||||||
@@ -129,7 +136,13 @@ If you are using the pulsar backend for sphere-rendering (the `PulsarPointRender
|
|||||||
|
|
||||||
Please see below for a timeline of the codebase updates in reverse chronological order. We are sharing updates on the releases as well as research projects which are built with PyTorch3D. The changelogs for the releases are available under [`Releases`](https://github.com/facebookresearch/pytorch3d/releases), and the builds can be installed using `conda` as per the instructions in [INSTALL.md](INSTALL.md).
|
Please see below for a timeline of the codebase updates in reverse chronological order. We are sharing updates on the releases as well as research projects which are built with PyTorch3D. The changelogs for the releases are available under [`Releases`](https://github.com/facebookresearch/pytorch3d/releases), and the builds can be installed using `conda` as per the instructions in [INSTALL.md](INSTALL.md).
|
||||||
|
|
||||||
**[Feb 9th 2021]:** PyTorch3D [v0.4.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.4.0) released with support for implicit functions, volume rendering and a [reimplementation of NeRF](https://github.com/facebookresearch/pytorch3d/tree/master/projects/nerf).
|
**[Dec 16th 2021]:** PyTorch3D [v0.6.1](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.6.1) released
|
||||||
|
|
||||||
|
**[Oct 6th 2021]:** PyTorch3D [v0.6.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.6.0) released
|
||||||
|
|
||||||
|
**[Aug 5th 2021]:** PyTorch3D [v0.5.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.5.0) released
|
||||||
|
|
||||||
|
**[Feb 9th 2021]:** PyTorch3D [v0.4.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.4.0) released with support for implicit functions, volume rendering and a [reimplementation of NeRF](https://github.com/facebookresearch/pytorch3d/tree/main/projects/nerf).
|
||||||
|
|
||||||
**[November 2nd 2020]:** PyTorch3D [v0.3.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.3.0) released, integrating the pulsar backend.
|
**[November 2nd 2020]:** PyTorch3D [v0.3.0](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.3.0) released, integrating the pulsar backend.
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
@@ -7,20 +7,17 @@
|
|||||||
|
|
||||||
# Run this script at project root by "./dev/linter.sh" before you commit
|
# Run this script at project root by "./dev/linter.sh" before you commit
|
||||||
|
|
||||||
{
|
|
||||||
V=$(black --version|cut '-d ' -f3)
|
|
||||||
code='import distutils.version; assert "19.3" < distutils.version.LooseVersion("'$V'")'
|
|
||||||
python -c "${code}" 2> /dev/null
|
|
||||||
} || {
|
|
||||||
echo "Linter requires black 19.3b0 or higher!"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||||
DIR=$(dirname "${DIR}")
|
DIR=$(dirname "${DIR}")
|
||||||
|
|
||||||
echo "Running isort..."
|
if [[ -f "${DIR}/TARGETS" ]]
|
||||||
isort -y -sp "${DIR}"
|
then
|
||||||
|
pyfmt "${DIR}"
|
||||||
|
else
|
||||||
|
# run usort externally only
|
||||||
|
echo "Running usort..."
|
||||||
|
usort "${DIR}"
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Running black..."
|
echo "Running black..."
|
||||||
black "${DIR}"
|
black "${DIR}"
|
||||||
@@ -33,7 +30,7 @@ clangformat=$(command -v clang-format-8 || echo clang-format)
|
|||||||
find "${DIR}" -regex ".*\.\(cpp\|c\|cc\|cu\|cuh\|cxx\|h\|hh\|hpp\|hxx\|tcc\|mm\|m\)" -print0 | xargs -0 "${clangformat}" -i
|
find "${DIR}" -regex ".*\.\(cpp\|c\|cc\|cu\|cuh\|cxx\|h\|hh\|hpp\|hxx\|tcc\|mm\|m\)" -print0 | xargs -0 "${clangformat}" -i
|
||||||
|
|
||||||
# Run arc and pyre internally only.
|
# Run arc and pyre internally only.
|
||||||
if [[ -f "${DIR}/tests/TARGETS" ]]
|
if [[ -f "${DIR}/TARGETS" ]]
|
||||||
then
|
then
|
||||||
(cd "${DIR}"; command -v arc > /dev/null && arc lint) || true
|
(cd "${DIR}"; command -v arc > /dev/null && arc lint) || true
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/bash
|
#!/usr/bin/bash
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
64
dev/test_list.py
Normal file
64
dev/test_list.py
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
# LICENSE file in the root directory of this source tree.
|
||||||
|
|
||||||
|
import ast
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
This module outputs a list of tests for completion.
|
||||||
|
It has no dependencies.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def get_test_files() -> List[Path]:
|
||||||
|
root = Path(__file__).parent.parent
|
||||||
|
return list((root / "tests").glob("**/test*.py"))
|
||||||
|
|
||||||
|
|
||||||
|
def tests_from_file(path: Path, base: str) -> List[str]:
|
||||||
|
"""
|
||||||
|
Returns all the tests in the given file, in format
|
||||||
|
expected as arguments when running the tests.
|
||||||
|
e.g.
|
||||||
|
file_stem
|
||||||
|
file_stem.TestFunctionality
|
||||||
|
file_stem.TestFunctionality.test_f
|
||||||
|
file_stem.TestFunctionality.test_g
|
||||||
|
"""
|
||||||
|
with open(path) as f:
|
||||||
|
node = ast.parse(f.read())
|
||||||
|
out = [base]
|
||||||
|
for cls in node.body:
|
||||||
|
if not isinstance(cls, ast.ClassDef):
|
||||||
|
continue
|
||||||
|
if not cls.name.startswith("Test"):
|
||||||
|
continue
|
||||||
|
class_base = base + "." + cls.name
|
||||||
|
out.append(class_base)
|
||||||
|
for method in cls.body:
|
||||||
|
if not isinstance(method, ast.FunctionDef):
|
||||||
|
continue
|
||||||
|
if not method.name.startswith("test"):
|
||||||
|
continue
|
||||||
|
out.append(class_base + "." + method.name)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
files = get_test_files()
|
||||||
|
test_root = Path(__file__).parent.parent
|
||||||
|
all_tests = []
|
||||||
|
for f in files:
|
||||||
|
file_base = str(f.relative_to(test_root))[:-3].replace("/", ".")
|
||||||
|
all_tests.extend(tests_from_file(f, file_base))
|
||||||
|
for test in sorted(all_tests):
|
||||||
|
print(test)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -1,4 +1,8 @@
|
|||||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
# LICENSE file in the root directory of this source tree.
|
||||||
|
|
||||||
# Minimal makefile for Sphinx documentation
|
# Minimal makefile for Sphinx documentation
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
@@ -82,11 +82,11 @@ for m in ["cv2", "scipy", "numpy", "pytorch3d._C", "np.eye", "np.zeros"]:
|
|||||||
# -- Project information -----------------------------------------------------
|
# -- Project information -----------------------------------------------------
|
||||||
|
|
||||||
project = "PyTorch3D"
|
project = "PyTorch3D"
|
||||||
copyright = "2019, facebookresearch"
|
copyright = "Meta Platforms, Inc"
|
||||||
author = "facebookresearch"
|
author = "facebookresearch"
|
||||||
|
|
||||||
# The short X.Y version
|
# The short X.Y version
|
||||||
version = "0.2.0"
|
version = ""
|
||||||
|
|
||||||
# The full version, including alpha/beta/rc tags
|
# The full version, including alpha/beta/rc tags
|
||||||
release = version
|
release = version
|
||||||
@@ -159,7 +159,7 @@ html_theme_options = {"collapse_navigation": True}
|
|||||||
def url_resolver(url):
|
def url_resolver(url):
|
||||||
if ".html" not in url:
|
if ".html" not in url:
|
||||||
url = url.replace("../", "")
|
url = url.replace("../", "")
|
||||||
return "https://github.com/facebookresearch/pytorch3d/blob/master/" + url
|
return "https://github.com/facebookresearch/pytorch3d/blob/main/" + url
|
||||||
else:
|
else:
|
||||||
if DEPLOY:
|
if DEPLOY:
|
||||||
return "http://pytorch3d.readthedocs.io/" + url
|
return "http://pytorch3d.readthedocs.io/" + url
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ API Documentation
|
|||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
|
|
||||||
common
|
|
||||||
structures
|
structures
|
||||||
io
|
io
|
||||||
loss
|
loss
|
||||||
@@ -12,3 +11,5 @@ API Documentation
|
|||||||
transforms
|
transforms
|
||||||
utils
|
utils
|
||||||
datasets
|
datasets
|
||||||
|
common
|
||||||
|
vis
|
||||||
|
|||||||
6
docs/modules/vis.rst
Normal file
6
docs/modules/vis.rst
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
pytorch3d.vis
|
||||||
|
===========================
|
||||||
|
|
||||||
|
.. automodule:: pytorch3d.vis
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
BIN
docs/notes/assets/iou3d.gif
Normal file
BIN
docs/notes/assets/iou3d.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 221 KiB |
BIN
docs/notes/assets/iou3d_comp.png
Normal file
BIN
docs/notes/assets/iou3d_comp.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 12 KiB |
@@ -26,7 +26,7 @@ The need for different mesh batch modes is inherent to the way PyTorch operators
|
|||||||
<img src="assets/meshrcnn.png" alt="meshrcnn" width="700" align="middle" />
|
<img src="assets/meshrcnn.png" alt="meshrcnn" width="700" align="middle" />
|
||||||
|
|
||||||
|
|
||||||
[meshes]: https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/structures/meshes.py
|
[meshes]: https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/structures/meshes.py
|
||||||
[graphconv]: https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/ops/graph_conv.py
|
[graphconv]: https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/ops/graph_conv.py
|
||||||
[vert_align]: https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/ops/vert_align.py
|
[vert_align]: https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/ops/vert_align.py
|
||||||
[meshrcnn]: https://github.com/facebookresearch/meshrcnn
|
[meshrcnn]: https://github.com/facebookresearch/meshrcnn
|
||||||
|
|||||||
@@ -13,13 +13,14 @@ This is the system the object/scene lives - the world.
|
|||||||
* **Camera view coordinate system**
|
* **Camera view coordinate system**
|
||||||
This is the system that has its origin on the image plane and the `Z`-axis perpendicular to the image plane. In PyTorch3D, we assume that `+X` points left, and `+Y` points up and `+Z` points out from the image plane. The transformation from world to view happens after applying a rotation (`R`) and translation (`T`).
|
This is the system that has its origin on the image plane and the `Z`-axis perpendicular to the image plane. In PyTorch3D, we assume that `+X` points left, and `+Y` points up and `+Z` points out from the image plane. The transformation from world to view happens after applying a rotation (`R`) and translation (`T`).
|
||||||
* **NDC coordinate system**
|
* **NDC coordinate system**
|
||||||
This is the normalized coordinate system that confines in a volume the rendered part of the object/scene. Also known as view volume. Under the PyTorch3D convention, `(+1, +1, znear)` is the top left near corner, and `(-1, -1, zfar)` is the bottom right far corner of the volume. For non-square volumes, the side of the volume in `XY` with the smallest length ranges from `[-1, 1]` while the larger side from `[-s, s]`, where `s` is the aspect ratio and `s > 1` (larger divided by smaller side).
|
This is the normalized coordinate system that confines in a volume the rendered part of the object/scene. Also known as view volume. For square images, under the PyTorch3D convention, `(+1, +1, znear)` is the top left near corner, and `(-1, -1, zfar)` is the bottom right far corner of the volume. For non-square images, the side of the volume in `XY` with the smallest length ranges from `[-1, 1]` while the larger side from `[-s, s]`, where `s` is the aspect ratio and `s > 1` (larger divided by smaller side).
|
||||||
The transformation from view to NDC happens after applying the camera projection matrix (`P`).
|
The transformation from view to NDC happens after applying the camera projection matrix (`P`).
|
||||||
* **Screen coordinate system**
|
* **Screen coordinate system**
|
||||||
This is another representation of the view volume with the `XY` coordinates defined in pixel space instead of a normalized space.
|
This is another representation of the view volume with the `XY` coordinates defined in pixel space instead of a normalized space. (0,0) is the top left corner of the top left pixel
|
||||||
|
and (W,H) is the bottom right corner of the bottom right pixel.
|
||||||
|
|
||||||
An illustration of the 4 coordinate systems is shown below
|
An illustration of the 4 coordinate systems is shown below
|
||||||

|

|
||||||
|
|
||||||
## Defining Cameras in PyTorch3D
|
## Defining Cameras in PyTorch3D
|
||||||
|
|
||||||
@@ -44,7 +45,7 @@ All cameras inherit from `CamerasBase` which is a base class for all cameras. Py
|
|||||||
* `transform_points` which takes a set of input points in world coordinates and projects to NDC coordinates ranging from [-1, -1, znear] to [+1, +1, zfar].
|
* `transform_points` which takes a set of input points in world coordinates and projects to NDC coordinates ranging from [-1, -1, znear] to [+1, +1, zfar].
|
||||||
* `get_ndc_camera_transform` which defines the conversion to PyTorch3D's NDC space and is called when interfacing with the PyTorch3D renderer. If the camera is defined in NDC space, then the identity transform is returned. If the cameras is defined in screen space, the conversion from screen to NDC is returned. If users define their own camera in screen space, they need to think of the screen to NDC conversion. We provide examples for the `PerspectiveCameras` and `OrthographicCameras`.
|
* `get_ndc_camera_transform` which defines the conversion to PyTorch3D's NDC space and is called when interfacing with the PyTorch3D renderer. If the camera is defined in NDC space, then the identity transform is returned. If the cameras is defined in screen space, the conversion from screen to NDC is returned. If users define their own camera in screen space, they need to think of the screen to NDC conversion. We provide examples for the `PerspectiveCameras` and `OrthographicCameras`.
|
||||||
* `transform_points_ndc` which takes a set of points in world coordinates and projects them to PyTorch3D's NDC space
|
* `transform_points_ndc` which takes a set of points in world coordinates and projects them to PyTorch3D's NDC space
|
||||||
* `transform_points_screen` which takes a set of input points in world coordinates and projects them to the screen coordinates ranging from [0, 0, znear] to [W-1, H-1, zfar]
|
* `transform_points_screen` which takes a set of input points in world coordinates and projects them to the screen coordinates ranging from [0, 0, znear] to [W, H, zfar]
|
||||||
|
|
||||||
Users can easily customize their own cameras. For each new camera, users should implement the `get_projection_transform` routine that returns the mapping `P` from camera view coordinates to NDC coordinates.
|
Users can easily customize their own cameras. For each new camera, users should implement the `get_projection_transform` routine that returns the mapping `P` from camera view coordinates to NDC coordinates.
|
||||||
|
|
||||||
@@ -83,8 +84,8 @@ cameras_ndc = PerspectiveCameras(focal_length=fcl_ndc, principal_point=prp_ndc)
|
|||||||
|
|
||||||
# Screen space camera
|
# Screen space camera
|
||||||
image_size = ((128, 256),) # (h, w)
|
image_size = ((128, 256),) # (h, w)
|
||||||
fcl_screen = (76.2,) # fcl_ndc * (min(image_size) - 1) / 2
|
fcl_screen = (76.8,) # fcl_ndc * min(image_size) / 2
|
||||||
prp_screen = ((114.8, 31.75), ) # (w - 1) / 2 - px_ndc * (min(image_size) - 1) / 2, (h - 1) / 2 - py_ndc * (min(image_size) - 1) / 2
|
prp_screen = ((115.2, 48), ) # w / 2 - px_ndc * min(image_size) / 2, h / 2 - py_ndc * min(image_size) / 2
|
||||||
cameras_screen = PerspectiveCameras(focal_length=fcl_screen, principal_point=prp_screen, in_ndc=False, image_size=image_size)
|
cameras_screen = PerspectiveCameras(focal_length=fcl_screen, principal_point=prp_screen, in_ndc=False, image_size=image_size)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -92,9 +93,9 @@ The relationship between screen and NDC specifications of a camera's `focal_leng
|
|||||||
The transformation of x and y coordinates between screen and NDC is exactly the same as for px and py.
|
The transformation of x and y coordinates between screen and NDC is exactly the same as for px and py.
|
||||||
|
|
||||||
```
|
```
|
||||||
fx_ndc = fx_screen * 2.0 / (s - 1)
|
fx_ndc = fx_screen * 2.0 / s
|
||||||
fy_ndc = fy_screen * 2.0 / (s - 1)
|
fy_ndc = fy_screen * 2.0 / s
|
||||||
|
|
||||||
px_ndc = - (px_screen - (image_width - 1) / 2.0) * 2.0 / (s - 1)
|
px_ndc = - (px_screen - image_width / 2.0) * 2.0 / s
|
||||||
py_ndc = - (py_screen - (image_height - 1) / 2.0) * 2.0 / (s - 1)
|
py_ndc = - (py_screen - image_height / 2.0) * 2.0 / s
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ sidebar_label: Cubify
|
|||||||
|
|
||||||
# Cubify
|
# Cubify
|
||||||
|
|
||||||
The [cubify operator](https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/ops/cubify.py) converts an 3D occupancy grid of shape `BxDxHxW`, where `B` is the batch size, into a mesh instantiated as a [Meshes](https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/structures/meshes.py) data structure of `B` elements. The operator replaces every occupied voxel (if its occupancy probability is greater than a user defined threshold) with a cuboid of 12 faces and 8 vertices. Shared vertices are merged, and internal faces are removed resulting in a **watertight** mesh.
|
The [cubify operator](https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/ops/cubify.py) converts an 3D occupancy grid of shape `BxDxHxW`, where `B` is the batch size, into a mesh instantiated as a [Meshes](https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/structures/meshes.py) data structure of `B` elements. The operator replaces every occupied voxel (if its occupancy probability is greater than a user defined threshold) with a cuboid of 12 faces and 8 vertices. Shared vertices are merged, and internal faces are removed resulting in a **watertight** mesh.
|
||||||
|
|
||||||
The operator provides three alignment modes {*topleft*, *corner*, *center*} which define the span of the mesh vertices with respect to the voxel grid. The alignment modes are described in the figure below for a 2D grid.
|
The operator provides three alignment modes {*topleft*, *corner*, *center*} which define the span of the mesh vertices with respect to the voxel grid. The alignment modes are described in the figure below for a 2D grid.
|
||||||
|
|
||||||
|
|||||||
@@ -9,12 +9,12 @@ sidebar_label: Data loaders
|
|||||||
|
|
||||||
ShapeNet is a dataset of 3D CAD models. ShapeNetCore is a subset of the ShapeNet dataset and can be downloaded from https://www.shapenet.org/. There are two versions ShapeNetCore: v1 (55 categories) and v2 (57 categories).
|
ShapeNet is a dataset of 3D CAD models. ShapeNetCore is a subset of the ShapeNet dataset and can be downloaded from https://www.shapenet.org/. There are two versions ShapeNetCore: v1 (55 categories) and v2 (57 categories).
|
||||||
|
|
||||||
The PyTorch3D [ShapeNetCore data loader](https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/datasets/shapenet/shapenet_core.py) inherits from `torch.utils.data.Dataset`. It takes the path where the ShapeNetCore dataset is stored locally and loads models in the dataset. The ShapeNetCore class loads and returns models with their `categories`, `model_ids`, `vertices` and `faces`. The `ShapeNetCore` data loader also has a customized `render` function that renders models by the specified `model_ids (List[int])`, `categories (List[str])` or `indices (List[int])` with PyTorch3D's differentiable renderer.
|
The PyTorch3D [ShapeNetCore data loader](https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/datasets/shapenet/shapenet_core.py) inherits from `torch.utils.data.Dataset`. It takes the path where the ShapeNetCore dataset is stored locally and loads models in the dataset. The ShapeNetCore class loads and returns models with their `categories`, `model_ids`, `vertices` and `faces`. The `ShapeNetCore` data loader also has a customized `render` function that renders models by the specified `model_ids (List[int])`, `categories (List[str])` or `indices (List[int])` with PyTorch3D's differentiable renderer.
|
||||||
|
|
||||||
The loaded dataset can be passed to `torch.utils.data.DataLoader` with PyTorch3D's customized collate_fn: `collate_batched_meshes` from the `pytorch3d.dataset.utils` module. The `vertices` and `faces` of the models are used to construct a [Meshes](https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/structures/meshes.py) object representing the batched meshes. This `Meshes` representation can be easily used with other ops and rendering in PyTorch3D.
|
The loaded dataset can be passed to `torch.utils.data.DataLoader` with PyTorch3D's customized collate_fn: `collate_batched_meshes` from the `pytorch3d.dataset.utils` module. The `vertices` and `faces` of the models are used to construct a [Meshes](https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/structures/meshes.py) object representing the batched meshes. This `Meshes` representation can be easily used with other ops and rendering in PyTorch3D.
|
||||||
|
|
||||||
### R2N2
|
### R2N2
|
||||||
|
|
||||||
The R2N2 dataset contains 13 categories that are a subset of the ShapeNetCore v.1 dataset. The R2N2 dataset also contains its own 24 renderings of each object and voxelized models. The R2N2 Dataset can be downloaded following the instructions [here](http://3d-r2n2.stanford.edu/).
|
The R2N2 dataset contains 13 categories that are a subset of the ShapeNetCore v.1 dataset. The R2N2 dataset also contains its own 24 renderings of each object and voxelized models. The R2N2 Dataset can be downloaded following the instructions [here](http://3d-r2n2.stanford.edu/).
|
||||||
|
|
||||||
The PyTorch3D [R2N2 data loader](https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/datasets/r2n2/r2n2.py) is initialized with the paths to the ShapeNet dataset, the R2N2 dataset and the splits file for R2N2. Just like `ShapeNetCore`, it can be passed to `torch.utils.data.DataLoader` with a customized collate_fn: `collate_batched_R2N2` from the `pytorch3d.dataset.r2n2.utils` module. It returns all the data that `ShapeNetCore` returns, and in addition, it returns the R2N2 renderings (24 views for each model) along with the camera calibration matrices and a voxel representation for each model. Similar to `ShapeNetCore`, it has a customized `render` function that supports rendering specified models with the PyTorch3D differentiable renderer. In addition, it supports rendering models with the same orientations as R2N2's original renderings.
|
The PyTorch3D [R2N2 data loader](https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/datasets/r2n2/r2n2.py) is initialized with the paths to the ShapeNet dataset, the R2N2 dataset and the splits file for R2N2. Just like `ShapeNetCore`, it can be passed to `torch.utils.data.DataLoader` with a customized collate_fn: `collate_batched_R2N2` from the `pytorch3d.dataset.r2n2.utils` module. It returns all the data that `ShapeNetCore` returns, and in addition, it returns the R2N2 renderings (24 views for each model) along with the camera calibration matrices and a voxel representation for each model. Similar to `ShapeNetCore`, it has a customized `render` function that supports rendering specified models with the PyTorch3D differentiable renderer. In addition, it supports rendering models with the same orientations as R2N2's original renderings.
|
||||||
|
|||||||
93
docs/notes/iou3d.md
Normal file
93
docs/notes/iou3d.md
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
---
|
||||||
|
hide_title: true
|
||||||
|
sidebar_label: IoU3D
|
||||||
|
---
|
||||||
|
|
||||||
|
# Intersection Over Union of Oriented 3D Boxes: A New Algorithm
|
||||||
|
|
||||||
|
Author: Georgia Gkioxari
|
||||||
|
|
||||||
|
Implementation: Georgia Gkioxari and Nikhila Ravi
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Intersection over union (IoU) of boxes is widely used as an evaluation metric in object detection ([1][pascalvoc], [2][coco]).
|
||||||
|
In 2D, IoU is commonly applied to axis-aligned boxes, namely boxes with edges parallel to the image axis.
|
||||||
|
In 3D, boxes are usually not axis aligned and can be oriented in any way in the world.
|
||||||
|
We introduce a new algorithm which computes the *exact* IoU of two *oriented 3D boxes*.
|
||||||
|
|
||||||
|
Our algorithm is based on the simple observation that the intersection of two oriented 3D boxes, `box1` and `box2`, is a convex polyhedron (convex n-gon in 2D) with `n > 2` comprised of connected *planar units*.
|
||||||
|
In 3D, these planar units are 3D triangular faces.
|
||||||
|
In 2D, they are 2D edges.
|
||||||
|
Each planar unit belongs strictly to either `box1` or `box2`.
|
||||||
|
Our algorithm finds these units by iterating through the sides of each box.
|
||||||
|
|
||||||
|
1. For each 3D triangular face `e` in `box1` we check wether `e` is *inside* `box2`.
|
||||||
|
2. If `e` is not *inside*, then we discard it.
|
||||||
|
3. If `e` is *inside* or *partially inside*, then the part of `e` *inside* `box2` is added to the units that comprise the final intersection shape.
|
||||||
|
4. We repeat for `box2`.
|
||||||
|
|
||||||
|
Below, we show a visualization of our algorithm for the case of 2D oriented boxes.
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="assets/iou3d.gif" alt="drawing" width="400"/>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
Note that when a box's unit `e` is *partially inside* a `box` then `e` breaks into smaller units. In 2D, `e` is an edge and breaks into smaller edges. In 3D, `e` is a 3D triangular face and is clipped to more and smaller faces by the plane of the `box` it intersects with.
|
||||||
|
This is the sole fundamental difference between the algorithms for 2D and 3D.
|
||||||
|
|
||||||
|
## Comparison With Other Algorithms
|
||||||
|
|
||||||
|
Current algorithms for 3D box IoU rely on crude approximations or make box assumptions, for example they restrict the orientation of the 3D boxes.
|
||||||
|
[Objectron][objectron] provides a nice discussion on the limitations of prior works.
|
||||||
|
[Objectron][objectron] introduces a great algorithm for exact IoU computation of oriented 3D boxes.
|
||||||
|
Objectron's algorithm computes the intersection points of two boxes using the [Sutherland-Hodgman algorithm][clipalgo].
|
||||||
|
The intersection shape is formed by the convex hull from the intersection points, using the [Qhull library][qhull].
|
||||||
|
|
||||||
|
Our algorithm has several advantages over Objectron's:
|
||||||
|
|
||||||
|
* Our algorithm also computes the points of intersection, similar to Objectron, but in addition stores the *planar units* the points belong to. This eliminates the need for convex hull computation which is `O(nlogn)` and relies on a third party library which often crashes with nondescript error messages.
|
||||||
|
* Objectron's implementation assumes that boxes are a rotation away from axis aligned. Our algorithm and implementation make no such assumption and work for any 3D boxes.
|
||||||
|
* Our implementation supports batching, unlike Objectron which assumes single element inputs for `box1` and `box2`.
|
||||||
|
* Our implementation is easily parallelizable and in fact we provide a custom C++/CUDA implementation which is **450 times faster than Objectron**.
|
||||||
|
|
||||||
|
Below we compare the performance for Objectron (in C++) and our algorithm, in C++ and CUDA. We benchmark for a common use case in object detection where `boxes1` hold M predictions and `boxes2` hold N ground truth 3D boxes in an image and compute the `MxN` IoU matrix. We report the time in ms for `M=N=16`.
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="assets/iou3d_comp.png" alt="drawing" width="400"/>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
## Usage and Code
|
||||||
|
|
||||||
|
```python
|
||||||
|
from pytorch3d.ops import box3d_overlap
|
||||||
|
# Assume inputs: boxes1 (M, 8, 3) and boxes2 (N, 8, 3)
|
||||||
|
intersection_vol, iou_3d = box3d_overal(boxes1, boxes2)
|
||||||
|
```
|
||||||
|
|
||||||
|
For more details, read [iou_box3d.py](https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/ops/iou_box3d.py).
|
||||||
|
|
||||||
|
Note that our implementation is not differentiable as of now. We plan to add gradient support soon.
|
||||||
|
|
||||||
|
We also include have extensive [tests](https://github.com/facebookresearch/pytorch3d/blob/main/tests/test_iou_box3d.py) comparing our implementation with Objectron and MeshLab.
|
||||||
|
|
||||||
|
|
||||||
|
## Cite
|
||||||
|
|
||||||
|
If you use our 3D IoU algorithm, please cite PyTorch3D
|
||||||
|
|
||||||
|
```bibtex
|
||||||
|
@article{ravi2020pytorch3d,
|
||||||
|
author = {Nikhila Ravi and Jeremy Reizenstein and David Novotny and Taylor Gordon
|
||||||
|
and Wan-Yen Lo and Justin Johnson and Georgia Gkioxari},
|
||||||
|
title = {Accelerating 3D Deep Learning with PyTorch3D},
|
||||||
|
journal = {arXiv:2007.08501},
|
||||||
|
year = {2020},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
[pascalvoc]: http://host.robots.ox.ac.uk/pascal/VOC/
|
||||||
|
[coco]: https://cocodataset.org/
|
||||||
|
[objectron]: https://arxiv.org/abs/2012.09988
|
||||||
|
[qhull]: http://www.qhull.org/
|
||||||
|
[clipalgo]: https://en.wikipedia.org/wiki/Sutherland%E2%80%93Hodgman_algorithm
|
||||||
@@ -21,7 +21,7 @@ Our implementation decouples the rasterization and shading steps of rendering. T
|
|||||||
|
|
||||||
## <u>Get started</u>
|
## <u>Get started</u>
|
||||||
|
|
||||||
To learn about more the implementation and start using the renderer refer to [getting started with renderer](renderer_getting_started.md), which also contains the [architecture overview](assets/architecture_overview.png) and [coordinate transformation conventions](assets/transformations_overview.png).
|
To learn about more the implementation and start using the renderer refer to [getting started with renderer](renderer_getting_started.md), which also contains the [architecture overview](assets/architecture_renderer.jpg) and [coordinate transformation conventions](assets/transforms_overview.jpg).
|
||||||
|
|
||||||
## <u>Tech Report</u>
|
## <u>Tech Report</u>
|
||||||
|
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ Since v0.3, [pulsar](https://arxiv.org/abs/2004.07484) can be used as a backend
|
|||||||
|
|
||||||
<img align="center" src="assets/pulsar_bm.png" width="300">
|
<img align="center" src="assets/pulsar_bm.png" width="300">
|
||||||
|
|
||||||
Pulsar's processing steps are tightly integrated CUDA kernels and do not work with custom `rasterizer` and `compositor` components. We provide two ways to use Pulsar: (1) there is a unified interface to match the PyTorch3D calling convention seamlessly. This is, for example, illustrated in the [point cloud tutorial](https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/render_colored_points.ipynb). (2) There is a direct interface available to the pulsar backend, which exposes the full functionality of the backend (including opacity, which is not yet available in PyTorch3D). Examples showing its use as well as the matching PyTorch3D interface code are available in [this folder](https://github.com/facebookresearch/pytorch3d/tree/master/docs/examples).
|
Pulsar's processing steps are tightly integrated CUDA kernels and do not work with custom `rasterizer` and `compositor` components. We provide two ways to use Pulsar: (1) there is a unified interface to match the PyTorch3D calling convention seamlessly. This is, for example, illustrated in the [point cloud tutorial](https://github.com/facebookresearch/pytorch3d/blob/main/docs/tutorials/render_colored_points.ipynb). (2) There is a direct interface available to the pulsar backend, which exposes the full functionality of the backend (including opacity, which is not yet available in PyTorch3D). Examples showing its use as well as the matching PyTorch3D interface code are available in [this folder](https://github.com/facebookresearch/pytorch3d/tree/master/docs/examples).
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -84,7 +84,7 @@ For mesh texturing we offer several options (in `pytorch3d/renderer/mesh/texturi
|
|||||||
|
|
||||||
1. **Vertex Textures**: D dimensional textures for each vertex (for example an RGB color) which can be interpolated across the face. This can be represented as an `(N, V, D)` tensor. This is a fairly simple representation though and cannot model complex textures if the mesh faces are large.
|
1. **Vertex Textures**: D dimensional textures for each vertex (for example an RGB color) which can be interpolated across the face. This can be represented as an `(N, V, D)` tensor. This is a fairly simple representation though and cannot model complex textures if the mesh faces are large.
|
||||||
2. **UV Textures**: vertex UV coordinates and **one** texture map for the whole mesh. For a point on a face with given barycentric coordinates, the face color can be computed by interpolating the vertex uv coordinates and then sampling from the texture map. This representation requires two tensors (UVs: `(N, V, 2), Texture map: `(N, H, W, 3)`), and is limited to only support one texture map per mesh.
|
2. **UV Textures**: vertex UV coordinates and **one** texture map for the whole mesh. For a point on a face with given barycentric coordinates, the face color can be computed by interpolating the vertex uv coordinates and then sampling from the texture map. This representation requires two tensors (UVs: `(N, V, 2), Texture map: `(N, H, W, 3)`), and is limited to only support one texture map per mesh.
|
||||||
3. **Face Textures**: In more complex cases such as ShapeNet meshes, there are multiple texture maps per mesh and some faces have texture while other do not. For these cases, a more flexible representation is a texture atlas, where each face is represented as an `(RxR)` texture map where R is the texture resolution. For a given point on the face, the texture value can be sampled from the per face texture map using the barycentric coordinates of the point. This representation requires one tensor of shape `(N, F, R, R, 3)`. This texturing method is inspired by the SoftRasterizer implementation. For more details refer to the [`make_material_atlas`](https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/io/mtl_io.py#L123) and [`sample_textures`](https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/renderer/mesh/textures.py#L452) functions. **NOTE:**: The `TexturesAtlas` texture sampling is only differentiable with respect to the texture atlas but not differentiable with respect to the barycentric coordinates.
|
3. **Face Textures**: In more complex cases such as ShapeNet meshes, there are multiple texture maps per mesh and some faces have texture while other do not. For these cases, a more flexible representation is a texture atlas, where each face is represented as an `(RxR)` texture map where R is the texture resolution. For a given point on the face, the texture value can be sampled from the per face texture map using the barycentric coordinates of the point. This representation requires one tensor of shape `(N, F, R, R, 3)`. This texturing method is inspired by the SoftRasterizer implementation. For more details refer to the [`make_material_atlas`](https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/io/mtl_io.py#L123) and [`sample_textures`](https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/renderer/mesh/textures.py#L452) functions. **NOTE:**: The `TexturesAtlas` texture sampling is only differentiable with respect to the texture atlas but not differentiable with respect to the barycentric coordinates.
|
||||||
|
|
||||||
|
|
||||||
<img src="assets/texturing.jpg" width="1000">
|
<img src="assets/texturing.jpg" width="1000">
|
||||||
|
|||||||
@@ -5,12 +5,12 @@ sidebar_label: Plotly Visualization
|
|||||||
|
|
||||||
# Overview
|
# Overview
|
||||||
|
|
||||||
PyTorch3D provides a modular differentiable renderer, but for instances where we want interactive plots or are not concerned with the differentiability of the rendering process, we provide [functions to render meshes and pointclouds in plotly](../../pytorch3d/vis/plotly_vis.py). These plotly figures allow you to rotate and zoom the rendered images and support plotting batched data as multiple traces in a singular plot or divided into individual subplots.
|
PyTorch3D provides a modular differentiable renderer, but for instances where we want interactive plots or are not concerned with the differentiability of the rendering process, we provide [functions to render meshes and pointclouds in plotly](https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/vis/plotly_vis.py). These plotly figures allow you to rotate and zoom the rendered images and support plotting batched data as multiple traces in a singular plot or divided into individual subplots.
|
||||||
|
|
||||||
|
|
||||||
# Examples
|
# Examples
|
||||||
|
|
||||||
These rendering functions accept plotly x,y, and z axis arguments as `kwargs`, allowing us to customize the plots. Here are two plots with colored axes, a [Pointclouds plot](assets/plotly_pointclouds.png), a [batched Meshes plot in subplots](assets/plotly_meshes_batch.png), and a [batched Meshes plot with multiple traces](assets/plotly_meshes_trace.png). Refer to the [render textured meshes](../tutorials/render_textured_meshes.ipynb) and [render colored pointclouds](../tutorials/render_colored_points) tutorials for code examples.
|
These rendering functions accept plotly x,y, and z axis arguments as `kwargs`, allowing us to customize the plots. Here are two plots with colored axes, a [Pointclouds plot](assets/plotly_pointclouds.png), a [batched Meshes plot in subplots](assets/plotly_meshes_batch.png), and a [batched Meshes plot with multiple traces](assets/plotly_meshes_trace.png). Refer to the [render textured meshes](https://pytorch3d.org/tutorials/render_textured_meshes) and [render colored pointclouds](https://pytorch3d.org/tutorials/render_colored_points) tutorials for code examples.
|
||||||
|
|
||||||
# Saving plots to images
|
# Saving plots to images
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -36,10 +36,10 @@
|
|||||||
"where $d(g_i, g_j)$ is a suitable metric that compares the extrinsics of cameras $g_i$ and $g_j$. \n",
|
"where $d(g_i, g_j)$ is a suitable metric that compares the extrinsics of cameras $g_i$ and $g_j$. \n",
|
||||||
"\n",
|
"\n",
|
||||||
"Visually, the problem can be described as follows. The picture below depicts the situation at the beginning of our optimization. The ground truth cameras are plotted in purple while the randomly initialized estimated cameras are plotted in orange:\n",
|
"Visually, the problem can be described as follows. The picture below depicts the situation at the beginning of our optimization. The ground truth cameras are plotted in purple while the randomly initialized estimated cameras are plotted in orange:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Our optimization seeks to align the estimated (orange) cameras with the ground truth (purple) cameras, by minimizing the discrepancies between pairs of relative cameras. Thus, the solution to the problem should look as follows:\n",
|
"Our optimization seeks to align the estimated (orange) cameras with the ground truth (purple) cameras, by minimizing the discrepancies between pairs of relative cameras. Thus, the solution to the problem should look as follows:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In practice, the camera extrinsics $g_{ij}$ and $g_i$ are represented using objects from the `SfMPerspectiveCameras` class initialized with the corresponding rotation and translation matrices `R_absolute` and `T_absolute` that define the extrinsic parameters $g = (R, T); R \\in SO(3); T \\in \\mathbb{R}^3$. In order to ensure that `R_absolute` is a valid rotation matrix, we represent it using an exponential map (implemented with `so3_exp_map`) of the axis-angle representation of the rotation `log_R_absolute`.\n",
|
"In practice, the camera extrinsics $g_{ij}$ and $g_i$ are represented using objects from the `SfMPerspectiveCameras` class initialized with the corresponding rotation and translation matrices `R_absolute` and `T_absolute` that define the extrinsic parameters $g = (R, T); R \\in SO(3); T \\in \\mathbb{R}^3$. In order to ensure that `R_absolute` is a valid rotation matrix, we represent it using an exponential map (implemented with `so3_exp_map`) of the axis-angle representation of the rotation `log_R_absolute`.\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -89,14 +89,16 @@
|
|||||||
"except ModuleNotFoundError:\n",
|
"except ModuleNotFoundError:\n",
|
||||||
" need_pytorch3d=True\n",
|
" need_pytorch3d=True\n",
|
||||||
"if need_pytorch3d:\n",
|
"if need_pytorch3d:\n",
|
||||||
" if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n",
|
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||||
" # We try to install PyTorch3D via a released wheel.\n",
|
" # We try to install PyTorch3D via a released wheel.\n",
|
||||||
|
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||||
" version_str=\"\".join([\n",
|
" version_str=\"\".join([\n",
|
||||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install fvcore iopath\n",
|
||||||
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
" else:\n",
|
" else:\n",
|
||||||
" # We try to install PyTorch3D from source.\n",
|
" # We try to install PyTorch3D from source.\n",
|
||||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||||
@@ -167,11 +169,11 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/camera_visualization.py\n",
|
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/utils/camera_visualization.py\n",
|
||||||
"from camera_visualization import plot_camera_scene\n",
|
"from camera_visualization import plot_camera_scene\n",
|
||||||
"\n",
|
"\n",
|
||||||
"!mkdir data\n",
|
"!mkdir data\n",
|
||||||
"!wget -P data https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/data/camera_graph.pth"
|
"!wget -P data https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/data/camera_graph.pth"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -76,14 +76,16 @@
|
|||||||
"except ModuleNotFoundError:\n",
|
"except ModuleNotFoundError:\n",
|
||||||
" need_pytorch3d=True\n",
|
" need_pytorch3d=True\n",
|
||||||
"if need_pytorch3d:\n",
|
"if need_pytorch3d:\n",
|
||||||
" if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n",
|
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||||
" # We try to install PyTorch3D via a released wheel.\n",
|
" # We try to install PyTorch3D via a released wheel.\n",
|
||||||
|
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||||
" version_str=\"\".join([\n",
|
" version_str=\"\".join([\n",
|
||||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install fvcore iopath\n",
|
||||||
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
" else:\n",
|
" else:\n",
|
||||||
" # We try to install PyTorch3D from source.\n",
|
" # We try to install PyTorch3D from source.\n",
|
||||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -51,14 +51,16 @@
|
|||||||
"except ModuleNotFoundError:\n",
|
"except ModuleNotFoundError:\n",
|
||||||
" need_pytorch3d=True\n",
|
" need_pytorch3d=True\n",
|
||||||
"if need_pytorch3d:\n",
|
"if need_pytorch3d:\n",
|
||||||
" if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n",
|
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||||
" # We try to install PyTorch3D via a released wheel.\n",
|
" # We try to install PyTorch3D via a released wheel.\n",
|
||||||
|
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||||
" version_str=\"\".join([\n",
|
" version_str=\"\".join([\n",
|
||||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install fvcore iopath\n",
|
||||||
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
" else:\n",
|
" else:\n",
|
||||||
" # We try to install PyTorch3D from source.\n",
|
" # We try to install PyTorch3D from source.\n",
|
||||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||||
@@ -112,7 +114,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py\n",
|
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/utils/plot_image_grid.py\n",
|
||||||
"from plot_image_grid import image_grid"
|
"from plot_image_grid import image_grid"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -90,14 +90,16 @@
|
|||||||
"except ModuleNotFoundError:\n",
|
"except ModuleNotFoundError:\n",
|
||||||
" need_pytorch3d=True\n",
|
" need_pytorch3d=True\n",
|
||||||
"if need_pytorch3d:\n",
|
"if need_pytorch3d:\n",
|
||||||
" if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n",
|
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||||
" # We try to install PyTorch3D via a released wheel.\n",
|
" # We try to install PyTorch3D via a released wheel.\n",
|
||||||
|
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||||
" version_str=\"\".join([\n",
|
" version_str=\"\".join([\n",
|
||||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install fvcore iopath\n",
|
||||||
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
" else:\n",
|
" else:\n",
|
||||||
" # We try to install PyTorch3D from source.\n",
|
" # We try to install PyTorch3D from source.\n",
|
||||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -56,14 +56,16 @@
|
|||||||
"except ModuleNotFoundError:\n",
|
"except ModuleNotFoundError:\n",
|
||||||
" need_pytorch3d=True\n",
|
" need_pytorch3d=True\n",
|
||||||
"if need_pytorch3d:\n",
|
"if need_pytorch3d:\n",
|
||||||
" if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n",
|
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||||
" # We try to install PyTorch3D via a released wheel.\n",
|
" # We try to install PyTorch3D via a released wheel.\n",
|
||||||
|
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||||
" version_str=\"\".join([\n",
|
" version_str=\"\".join([\n",
|
||||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install fvcore iopath\n",
|
||||||
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
" else:\n",
|
" else:\n",
|
||||||
" # We try to install PyTorch3D from source.\n",
|
" # We try to install PyTorch3D from source.\n",
|
||||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||||
@@ -98,7 +100,7 @@
|
|||||||
"from pytorch3d.transforms import so3_exp_map\n",
|
"from pytorch3d.transforms import so3_exp_map\n",
|
||||||
"from pytorch3d.renderer import (\n",
|
"from pytorch3d.renderer import (\n",
|
||||||
" FoVPerspectiveCameras, \n",
|
" FoVPerspectiveCameras, \n",
|
||||||
" NDCGridRaysampler,\n",
|
" NDCMultinomialRaysampler,\n",
|
||||||
" MonteCarloRaysampler,\n",
|
" MonteCarloRaysampler,\n",
|
||||||
" EmissionAbsorptionRaymarcher,\n",
|
" EmissionAbsorptionRaymarcher,\n",
|
||||||
" ImplicitRenderer,\n",
|
" ImplicitRenderer,\n",
|
||||||
@@ -126,8 +128,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py\n",
|
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/utils/plot_image_grid.py\n",
|
||||||
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/generate_cow_renders.py\n",
|
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/utils/generate_cow_renders.py\n",
|
||||||
"from plot_image_grid import image_grid\n",
|
"from plot_image_grid import image_grid\n",
|
||||||
"from generate_cow_renders import generate_cow_renders"
|
"from generate_cow_renders import generate_cow_renders"
|
||||||
]
|
]
|
||||||
@@ -184,7 +186,7 @@
|
|||||||
"The renderer is composed of a *raymarcher* and a *raysampler*.\n",
|
"The renderer is composed of a *raymarcher* and a *raysampler*.\n",
|
||||||
"- The *raysampler* is responsible for emitting rays from image pixels and sampling the points along them. Here, we use two different raysamplers:\n",
|
"- The *raysampler* is responsible for emitting rays from image pixels and sampling the points along them. Here, we use two different raysamplers:\n",
|
||||||
" - `MonteCarloRaysampler` is used to generate rays from a random subset of pixels of the image plane. The random subsampling of pixels is carried out during **training** to decrease the memory consumption of the implicit model.\n",
|
" - `MonteCarloRaysampler` is used to generate rays from a random subset of pixels of the image plane. The random subsampling of pixels is carried out during **training** to decrease the memory consumption of the implicit model.\n",
|
||||||
" - `NDCGridRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user). In combination with the implicit model of the scene, `NDCGridRaysampler` consumes a large amount of memory and, hence, is only used for visualizing the results of the training at **test** time.\n",
|
" - `NDCMultinomialRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user). In combination with the implicit model of the scene, `NDCMultinomialRaysampler` consumes a large amount of memory and, hence, is only used for visualizing the results of the training at **test** time.\n",
|
||||||
"- The *raymarcher* takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the `EmissionAbsorptionRaymarcher` which implements the standard Emission-Absorption raymarching algorithm."
|
"- The *raymarcher* takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the `EmissionAbsorptionRaymarcher` which implements the standard Emission-Absorption raymarching algorithm."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -209,10 +211,10 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"# 1) Instantiate the raysamplers.\n",
|
"# 1) Instantiate the raysamplers.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Here, NDCGridRaysampler generates a rectangular image\n",
|
"# Here, NDCMultinomialRaysampler generates a rectangular image\n",
|
||||||
"# grid of rays whose coordinates follow the PyTorch3D\n",
|
"# grid of rays whose coordinates follow the PyTorch3D\n",
|
||||||
"# coordinate conventions.\n",
|
"# coordinate conventions.\n",
|
||||||
"raysampler_grid = NDCGridRaysampler(\n",
|
"raysampler_grid = NDCMultinomialRaysampler(\n",
|
||||||
" image_height=render_size,\n",
|
" image_height=render_size,\n",
|
||||||
" image_width=render_size,\n",
|
" image_width=render_size,\n",
|
||||||
" n_pts_per_ray=128,\n",
|
" n_pts_per_ray=128,\n",
|
||||||
@@ -813,7 +815,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## 5. Visualizing the optimized neural radiance field\n",
|
"## 6. Visualizing the optimized neural radiance field\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Finally, we visualize the neural radiance field by rendering from multiple viewpoints that rotate around the volume's y-axis."
|
"Finally, we visualize the neural radiance field by rendering from multiple viewpoints that rotate around the volume's y-axis."
|
||||||
]
|
]
|
||||||
@@ -842,7 +844,7 @@
|
|||||||
" fov=target_cameras.fov[0],\n",
|
" fov=target_cameras.fov[0],\n",
|
||||||
" device=device,\n",
|
" device=device,\n",
|
||||||
" )\n",
|
" )\n",
|
||||||
" # Note that we again render with `NDCGridRaySampler`\n",
|
" # Note that we again render with `NDCMultinomialRaysampler`\n",
|
||||||
" # and the batched_forward function of neural_radiance_field.\n",
|
" # and the batched_forward function of neural_radiance_field.\n",
|
||||||
" frames.append(\n",
|
" frames.append(\n",
|
||||||
" renderer_grid(\n",
|
" renderer_grid(\n",
|
||||||
@@ -863,9 +865,9 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## 6. Conclusion\n",
|
"## 7. Conclusion\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In this tutorial, we have shown how to optimize an implicit representation of a scene such that the renders of the scene from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's implicit function renderer composed of either a `MonteCarloRaysampler` or `NDCGridRaysampler`, and an `EmissionAbsorptionRaymarcher`."
|
"In this tutorial, we have shown how to optimize an implicit representation of a scene such that the renders of the scene from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's implicit function renderer composed of either a `MonteCarloRaysampler` or `NDCMultinomialRaysampler`, and an `EmissionAbsorptionRaymarcher`."
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -68,14 +68,16 @@
|
|||||||
"except ModuleNotFoundError:\n",
|
"except ModuleNotFoundError:\n",
|
||||||
" need_pytorch3d=True\n",
|
" need_pytorch3d=True\n",
|
||||||
"if need_pytorch3d:\n",
|
"if need_pytorch3d:\n",
|
||||||
" if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n",
|
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||||
" # We try to install PyTorch3D via a released wheel.\n",
|
" # We try to install PyTorch3D via a released wheel.\n",
|
||||||
|
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||||
" version_str=\"\".join([\n",
|
" version_str=\"\".join([\n",
|
||||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install fvcore iopath\n",
|
||||||
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
" else:\n",
|
" else:\n",
|
||||||
" # We try to install PyTorch3D from source.\n",
|
" # We try to install PyTorch3D from source.\n",
|
||||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||||
@@ -116,7 +118,7 @@
|
|||||||
"from pytorch3d.structures import Meshes\n",
|
"from pytorch3d.structures import Meshes\n",
|
||||||
"from pytorch3d.renderer import (\n",
|
"from pytorch3d.renderer import (\n",
|
||||||
" look_at_view_transform,\n",
|
" look_at_view_transform,\n",
|
||||||
" OpenGLPerspectiveCameras, \n",
|
" FoVPerspectiveCameras, \n",
|
||||||
" PointLights, \n",
|
" PointLights, \n",
|
||||||
" DirectionalLights, \n",
|
" DirectionalLights, \n",
|
||||||
" Materials, \n",
|
" Materials, \n",
|
||||||
@@ -155,7 +157,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py\n",
|
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/utils/plot_image_grid.py\n",
|
||||||
"from plot_image_grid import image_grid"
|
"from plot_image_grid import image_grid"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -302,11 +304,11 @@
|
|||||||
"# broadcasting. So we can view the camera from the a distance of dist=2.7, and \n",
|
"# broadcasting. So we can view the camera from the a distance of dist=2.7, and \n",
|
||||||
"# then specify elevation and azimuth angles for each viewpoint as tensors. \n",
|
"# then specify elevation and azimuth angles for each viewpoint as tensors. \n",
|
||||||
"R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)\n",
|
"R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)\n",
|
||||||
"cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)\n",
|
"cameras = FoVPerspectiveCameras(device=device, R=R, T=T)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# We arbitrarily choose one particular view that will be used to visualize \n",
|
"# We arbitrarily choose one particular view that will be used to visualize \n",
|
||||||
"# results\n",
|
"# results\n",
|
||||||
"camera = OpenGLPerspectiveCameras(device=device, R=R[None, 1, ...], \n",
|
"camera = FoVPerspectiveCameras(device=device, R=R[None, 1, ...], \n",
|
||||||
" T=T[None, 1, ...]) \n",
|
" T=T[None, 1, ...]) \n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Define the settings for rasterization and shading. Here we set the output \n",
|
"# Define the settings for rasterization and shading. Here we set the output \n",
|
||||||
@@ -349,7 +351,7 @@
|
|||||||
"# Our multi-view cow dataset will be represented by these 2 lists of tensors,\n",
|
"# Our multi-view cow dataset will be represented by these 2 lists of tensors,\n",
|
||||||
"# each of length num_views.\n",
|
"# each of length num_views.\n",
|
||||||
"target_rgb = [target_images[i, ..., :3] for i in range(num_views)]\n",
|
"target_rgb = [target_images[i, ..., :3] for i in range(num_views)]\n",
|
||||||
"target_cameras = [OpenGLPerspectiveCameras(device=device, R=R[None, i, ...], \n",
|
"target_cameras = [FoVPerspectiveCameras(device=device, R=R[None, i, ...], \n",
|
||||||
" T=T[None, i, ...]) for i in range(num_views)]"
|
" T=T[None, i, ...]) for i in range(num_views)]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -706,6 +708,7 @@
|
|||||||
" image_size=128, \n",
|
" image_size=128, \n",
|
||||||
" blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n",
|
" blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n",
|
||||||
" faces_per_pixel=50, \n",
|
" faces_per_pixel=50, \n",
|
||||||
|
" perspective_correct=False, \n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Differentiable soft renderer using per vertex RGB colors for texture\n",
|
"# Differentiable soft renderer using per vertex RGB colors for texture\n",
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -47,14 +47,16 @@
|
|||||||
"except ModuleNotFoundError:\n",
|
"except ModuleNotFoundError:\n",
|
||||||
" need_pytorch3d=True\n",
|
" need_pytorch3d=True\n",
|
||||||
"if need_pytorch3d:\n",
|
"if need_pytorch3d:\n",
|
||||||
" if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n",
|
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||||
" # We try to install PyTorch3D via a released wheel.\n",
|
" # We try to install PyTorch3D via a released wheel.\n",
|
||||||
|
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||||
" version_str=\"\".join([\n",
|
" version_str=\"\".join([\n",
|
||||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install fvcore iopath\n",
|
||||||
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
" else:\n",
|
" else:\n",
|
||||||
" # We try to install PyTorch3D from source.\n",
|
" # We try to install PyTorch3D from source.\n",
|
||||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||||
@@ -87,7 +89,7 @@
|
|||||||
"from pytorch3d.renderer import (\n",
|
"from pytorch3d.renderer import (\n",
|
||||||
" FoVPerspectiveCameras, \n",
|
" FoVPerspectiveCameras, \n",
|
||||||
" VolumeRenderer,\n",
|
" VolumeRenderer,\n",
|
||||||
" NDCGridRaysampler,\n",
|
" NDCMultinomialRaysampler,\n",
|
||||||
" EmissionAbsorptionRaymarcher\n",
|
" EmissionAbsorptionRaymarcher\n",
|
||||||
")\n",
|
")\n",
|
||||||
"from pytorch3d.transforms import so3_exp_map\n",
|
"from pytorch3d.transforms import so3_exp_map\n",
|
||||||
@@ -106,8 +108,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py\n",
|
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/utils/plot_image_grid.py\n",
|
||||||
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/generate_cow_renders.py\n",
|
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/utils/generate_cow_renders.py\n",
|
||||||
"from plot_image_grid import image_grid\n",
|
"from plot_image_grid import image_grid\n",
|
||||||
"from generate_cow_renders import generate_cow_renders"
|
"from generate_cow_renders import generate_cow_renders"
|
||||||
]
|
]
|
||||||
@@ -162,7 +164,7 @@
|
|||||||
"The following initializes a volumetric renderer that emits a ray from each pixel of a target image and samples a set of uniformly-spaced points along the ray. At each ray-point, the corresponding density and color value is obtained by querying the corresponding location in the volumetric model of the scene (the model is described & instantiated in a later cell).\n",
|
"The following initializes a volumetric renderer that emits a ray from each pixel of a target image and samples a set of uniformly-spaced points along the ray. At each ray-point, the corresponding density and color value is obtained by querying the corresponding location in the volumetric model of the scene (the model is described & instantiated in a later cell).\n",
|
||||||
"\n",
|
"\n",
|
||||||
"The renderer is composed of a *raymarcher* and a *raysampler*.\n",
|
"The renderer is composed of a *raymarcher* and a *raysampler*.\n",
|
||||||
"- The *raysampler* is responsible for emitting rays from image pixels and sampling the points along them. Here, we use the `NDCGridRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user).\n",
|
"- The *raysampler* is responsible for emitting rays from image pixels and sampling the points along them. Here, we use the `NDCMultinomialRaysampler` which follows the standard PyTorch3D coordinate grid convention (+X from right to left; +Y from bottom to top; +Z away from the user).\n",
|
||||||
"- The *raymarcher* takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the `EmissionAbsorptionRaymarcher` which implements the standard Emission-Absorption raymarching algorithm."
|
"- The *raymarcher* takes the densities and colors sampled along each ray and renders each ray into a color and an opacity value of the ray's source pixel. Here we use the `EmissionAbsorptionRaymarcher` which implements the standard Emission-Absorption raymarching algorithm."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -184,14 +186,14 @@
|
|||||||
"volume_extent_world = 3.0\n",
|
"volume_extent_world = 3.0\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# 1) Instantiate the raysampler.\n",
|
"# 1) Instantiate the raysampler.\n",
|
||||||
"# Here, NDCGridRaysampler generates a rectangular image\n",
|
"# Here, NDCMultinomialRaysampler generates a rectangular image\n",
|
||||||
"# grid of rays whose coordinates follow the PyTorch3D\n",
|
"# grid of rays whose coordinates follow the PyTorch3D\n",
|
||||||
"# coordinate conventions.\n",
|
"# coordinate conventions.\n",
|
||||||
"# Since we use a volume of size 128^3, we sample n_pts_per_ray=150,\n",
|
"# Since we use a volume of size 128^3, we sample n_pts_per_ray=150,\n",
|
||||||
"# which roughly corresponds to a one ray-point per voxel.\n",
|
"# which roughly corresponds to a one ray-point per voxel.\n",
|
||||||
"# We further set the min_depth=0.1 since there is no surface within\n",
|
"# We further set the min_depth=0.1 since there is no surface within\n",
|
||||||
"# 0.1 units of any camera plane.\n",
|
"# 0.1 units of any camera plane.\n",
|
||||||
"raysampler = NDCGridRaysampler(\n",
|
"raysampler = NDCMultinomialRaysampler(\n",
|
||||||
" image_width=render_size,\n",
|
" image_width=render_size,\n",
|
||||||
" image_height=render_size,\n",
|
" image_height=render_size,\n",
|
||||||
" n_pts_per_ray=150,\n",
|
" n_pts_per_ray=150,\n",
|
||||||
@@ -460,7 +462,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## 6. Conclusion\n",
|
"## 6. Conclusion\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In this tutorial, we have shown how to optimize a 3D volumetric representation of a scene such that the renders of the volume from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's volumetric renderer composed of an `NDCGridRaysampler` and an `EmissionAbsorptionRaymarcher`."
|
"In this tutorial, we have shown how to optimize a 3D volumetric representation of a scene such that the renders of the volume from known viewpoints match the observed images for each viewpoint. The rendering was carried out using the PyTorch3D's volumetric renderer composed of an `NDCMultinomialRaysampler` and an `EmissionAbsorptionRaymarcher`."
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -50,14 +50,16 @@
|
|||||||
"except ModuleNotFoundError:\n",
|
"except ModuleNotFoundError:\n",
|
||||||
" need_pytorch3d=True\n",
|
" need_pytorch3d=True\n",
|
||||||
"if need_pytorch3d:\n",
|
"if need_pytorch3d:\n",
|
||||||
" if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n",
|
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||||
" # We try to install PyTorch3D via a released wheel.\n",
|
" # We try to install PyTorch3D via a released wheel.\n",
|
||||||
|
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||||
" version_str=\"\".join([\n",
|
" version_str=\"\".join([\n",
|
||||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install fvcore iopath\n",
|
||||||
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
" else:\n",
|
" else:\n",
|
||||||
" # We try to install PyTorch3D from source.\n",
|
" # We try to install PyTorch3D from source.\n",
|
||||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -57,14 +57,16 @@
|
|||||||
"except ModuleNotFoundError:\n",
|
"except ModuleNotFoundError:\n",
|
||||||
" need_pytorch3d=True\n",
|
" need_pytorch3d=True\n",
|
||||||
"if need_pytorch3d:\n",
|
"if need_pytorch3d:\n",
|
||||||
" if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n",
|
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||||
" # We try to install PyTorch3D via a released wheel.\n",
|
" # We try to install PyTorch3D via a released wheel.\n",
|
||||||
|
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||||
" version_str=\"\".join([\n",
|
" version_str=\"\".join([\n",
|
||||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install fvcore iopath\n",
|
||||||
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
" else:\n",
|
" else:\n",
|
||||||
" # We try to install PyTorch3D from source.\n",
|
" # We try to install PyTorch3D from source.\n",
|
||||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
"# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -73,14 +73,16 @@
|
|||||||
"except ModuleNotFoundError:\n",
|
"except ModuleNotFoundError:\n",
|
||||||
" need_pytorch3d=True\n",
|
" need_pytorch3d=True\n",
|
||||||
"if need_pytorch3d:\n",
|
"if need_pytorch3d:\n",
|
||||||
" if torch.__version__.startswith(\"1.9\") and sys.platform.startswith(\"linux\"):\n",
|
" if torch.__version__.startswith(\"1.11.\") and sys.platform.startswith(\"linux\"):\n",
|
||||||
" # We try to install PyTorch3D via a released wheel.\n",
|
" # We try to install PyTorch3D via a released wheel.\n",
|
||||||
|
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||||
" version_str=\"\".join([\n",
|
" version_str=\"\".join([\n",
|
||||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||||
" f\"_pyt{torch.__version__[0:5:2]}\"\n",
|
" f\"_pyt{pyt_version_str}\"\n",
|
||||||
" ])\n",
|
" ])\n",
|
||||||
" !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
" !pip install fvcore iopath\n",
|
||||||
|
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||||
" else:\n",
|
" else:\n",
|
||||||
" # We try to install PyTorch3D from source.\n",
|
" # We try to install PyTorch3D from source.\n",
|
||||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||||
@@ -154,7 +156,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py\n",
|
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/utils/plot_image_grid.py\n",
|
||||||
"from plot_image_grid import image_grid"
|
"from plot_image_grid import image_grid"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
@@ -12,13 +12,13 @@ from pytorch3d.io import load_objs_as_meshes
|
|||||||
from pytorch3d.renderer import (
|
from pytorch3d.renderer import (
|
||||||
BlendParams,
|
BlendParams,
|
||||||
FoVPerspectiveCameras,
|
FoVPerspectiveCameras,
|
||||||
|
look_at_view_transform,
|
||||||
MeshRasterizer,
|
MeshRasterizer,
|
||||||
MeshRenderer,
|
MeshRenderer,
|
||||||
PointLights,
|
PointLights,
|
||||||
RasterizationSettings,
|
RasterizationSettings,
|
||||||
SoftPhongShader,
|
SoftPhongShader,
|
||||||
SoftSilhouetteShader,
|
SoftSilhouetteShader,
|
||||||
look_at_view_transform,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
@REM Copyright (c) Facebook, Inc. and its affiliates.
|
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
@REM All rights reserved.
|
@REM All rights reserved.
|
||||||
@REM
|
@REM
|
||||||
@REM This source code is licensed under the BSD-style license found in the
|
@REM This source code is licensed under the BSD-style license found in the
|
||||||
@REM LICENSE file in the root directory of this source tree.
|
@REM LICENSE file in the root directory of this source tree.
|
||||||
|
|
||||||
:: Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
|
||||||
start /wait "" "%miniconda_exe%" /S /InstallationType=JustMe /RegisterPython=0 /AddToPath=0 /D=%tmp_conda%
|
start /wait "" "%miniconda_exe%" /S /InstallationType=JustMe /RegisterPython=0 /AddToPath=0 /D=%tmp_conda%
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -20,10 +20,11 @@ commands.
|
|||||||
```
|
```
|
||||||
import sys
|
import sys
|
||||||
import torch
|
import torch
|
||||||
|
pyt_version_str=torch.__version__.split("+")[0].replace(".", "")
|
||||||
version_str="".join([
|
version_str="".join([
|
||||||
f"py3{sys.version_info.minor}_cu",
|
f"py3{sys.version_info.minor}_cu",
|
||||||
torch.version.cuda.replace(".",""),
|
torch.version.cuda.replace(".",""),
|
||||||
f"_pyt{torch.__version__[0:5:2]}"
|
f"_pyt{pyt_version_str}"
|
||||||
])
|
])
|
||||||
!pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/bash
|
#!/usr/bin/bash
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,8 +1,11 @@
|
|||||||
#!/usr/bin/bash
|
#!/usr/bin/bash
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
# LICENSE file in the root directory of this source tree.
|
# LICENSE file in the root directory of this source tree.
|
||||||
|
|
||||||
sudo docker run --rm -v "$PWD/../../:/inside" pytorch/conda-cuda bash inside/packaging/linux_wheels/inside.sh
|
sudo docker run --rm -v "$PWD/../../:/inside" pytorch/conda-cuda bash inside/packaging/linux_wheels/inside.sh
|
||||||
|
sudo docker run --rm -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu113 pytorch/conda-builder:cuda113 bash inside/packaging/linux_wheels/inside.sh
|
||||||
|
sudo docker run --rm -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu115 pytorch/conda-builder:cuda115 bash inside/packaging/linux_wheels/inside.sh
|
||||||
|
sudo docker run --rm -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu116 pytorch/conda-builder:cuda116 bash inside/packaging/linux_wheels/inside.sh
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
@@ -26,18 +26,13 @@ echo "CUB_HOME is now $CUB_HOME"
|
|||||||
# As a rule, we want to build for any combination of dependencies which is supported by
|
# As a rule, we want to build for any combination of dependencies which is supported by
|
||||||
# PyTorch3D and not older than the current Google Colab set up.
|
# PyTorch3D and not older than the current Google Colab set up.
|
||||||
|
|
||||||
PYTHON_VERSIONS="3.7 3.8 3.9"
|
PYTHON_VERSIONS="3.7 3.8 3.9 3.10"
|
||||||
# the keys are pytorch versions
|
# the keys are pytorch versions
|
||||||
declare -A CONDA_CUDA_VERSIONS=(
|
declare -A CONDA_CUDA_VERSIONS=(
|
||||||
# ["1.4.0"]="cu101"
|
["1.10.1"]="cu111 cu113"
|
||||||
# ["1.5.0"]="cu101 cu102"
|
["1.10.2"]="cu111 cu113"
|
||||||
# ["1.5.1"]="cu101 cu102"
|
["1.10.0"]="cu111 cu113"
|
||||||
# ["1.6.0"]="cu101 cu102"
|
["1.11.0"]="cu111 cu113 cu115"
|
||||||
# ["1.7.0"]="cu101 cu102 cu110"
|
|
||||||
# ["1.7.1"]="cu101 cu102 cu110"
|
|
||||||
# ["1.8.0"]="cu101 cu102 cu111"
|
|
||||||
# ["1.8.1"]="cu101 cu102 cu111"
|
|
||||||
["1.9.0"]="cu102 cu111"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -46,22 +41,59 @@ for python_version in $PYTHON_VERSIONS
|
|||||||
do
|
do
|
||||||
for pytorch_version in "${!CONDA_CUDA_VERSIONS[@]}"
|
for pytorch_version in "${!CONDA_CUDA_VERSIONS[@]}"
|
||||||
do
|
do
|
||||||
if [[ "3.6 3.7 3.8" != *$python_version* ]] && [[ "1.4.0 1.5.0 1.5.1 1.6.0 1.7.0" == *$pytorch_version* ]]
|
if [[ "3.7 3.8" != *$python_version* ]] && [[ "1.7.0" == *$pytorch_version* ]]
|
||||||
then
|
then
|
||||||
#python 3.9 and later not supported by pytorch 1.7.0 and before
|
#python 3.9 and later not supported by pytorch 1.7.0 and before
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
if [[ "3.7 3.8 3.9" != *$python_version* ]] && [[ "1.7.0 1.7.1 1.8.0 1.8.1 1.9.0 1.9.1 1.10.0 1.10.1 1.10.2" == *$pytorch_version* ]]
|
||||||
if [[ "3.9" == "$python_version" ]]
|
then
|
||||||
|
#python 3.10 and later not supported by pytorch 1.10.2 and before
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
extra_channel="-c conda-forge"
|
||||||
|
if [[ "1.11.0" == "$pytorch_version" ]]
|
||||||
then
|
then
|
||||||
extra_channel="-c conda-forge"
|
|
||||||
else
|
|
||||||
extra_channel=""
|
extra_channel=""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for cu_version in ${CONDA_CUDA_VERSIONS[$pytorch_version]}
|
for cu_version in ${CONDA_CUDA_VERSIONS[$pytorch_version]}
|
||||||
do
|
do
|
||||||
|
if [[ "cu113 cu115 cu116" == *$cu_version* ]]
|
||||||
|
# ^^^ CUDA versions listed here have to be built
|
||||||
|
# in their own containers.
|
||||||
|
then
|
||||||
|
if [[ $SELECTED_CUDA != "$cu_version" ]]
|
||||||
|
then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
elif [[ $SELECTED_CUDA != "" ]]
|
||||||
|
then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
case "$cu_version" in
|
case "$cu_version" in
|
||||||
|
cu116)
|
||||||
|
export CUDA_HOME=/usr/local/cuda-11.6/
|
||||||
|
export CUDA_TAG=11.6
|
||||||
|
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
||||||
|
;;
|
||||||
|
cu115)
|
||||||
|
export CUDA_HOME=/usr/local/cuda-11.5/
|
||||||
|
export CUDA_TAG=11.5
|
||||||
|
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
||||||
|
;;
|
||||||
|
cu113)
|
||||||
|
export CUDA_HOME=/usr/local/cuda-11.3/
|
||||||
|
export CUDA_TAG=11.3
|
||||||
|
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
||||||
|
;;
|
||||||
|
cu112)
|
||||||
|
export CUDA_HOME=/usr/local/cuda-11.2/
|
||||||
|
export CUDA_TAG=11.2
|
||||||
|
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
||||||
|
;;
|
||||||
cu111)
|
cu111)
|
||||||
export CUDA_HOME=/usr/local/cuda-11.1/
|
export CUDA_HOME=/usr/local/cuda-11.1/
|
||||||
export CUDA_TAG=11.1
|
export CUDA_TAG=11.1
|
||||||
@@ -97,6 +129,7 @@ do
|
|||||||
|
|
||||||
conda create -y -n "$tag" "python=$python_version"
|
conda create -y -n "$tag" "python=$python_version"
|
||||||
conda activate "$tag"
|
conda activate "$tag"
|
||||||
|
# shellcheck disable=SC2086
|
||||||
conda install -y -c pytorch $extra_channel "pytorch=$pytorch_version" "cudatoolkit=$CUDA_TAG" torchvision
|
conda install -y -c pytorch $extra_channel "pytorch=$pytorch_version" "cudatoolkit=$CUDA_TAG" torchvision
|
||||||
pip install fvcore iopath
|
pip install fvcore iopath
|
||||||
echo "python version" "$python_version" "pytorch version" "$pytorch_version" "cuda version" "$cu_version" "tag" "$tag"
|
echo "python version" "$python_version" "pytorch version" "$pytorch_version" "cuda version" "$cu_version" "tag" "$tag"
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
# All rights reserved.
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# This source code is licensed under the BSD-style license found in the
|
# This source code is licensed under the BSD-style license found in the
|
||||||
# LICENSE file in the root directory of this source tree.
|
# LICENSE file in the root directory of this source tree.
|
||||||
|
|
||||||
import os
|
|
||||||
import subprocess
|
import subprocess
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List
|
from typing import List
|
||||||
@@ -15,13 +14,12 @@ dest = "s3://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/"
|
|||||||
output = Path("output")
|
output = Path("output")
|
||||||
|
|
||||||
|
|
||||||
def fs3cmd(args, allow_failure: bool = False) -> List[str]:
|
def aws_s3_cmd(args) -> List[str]:
|
||||||
"""
|
"""
|
||||||
This function returns the args for subprocess to mimic the bash command
|
This function returns the full args for subprocess to do a command
|
||||||
fs3cmd available in the fairusers_aws module on the FAIR cluster.
|
with aws.
|
||||||
"""
|
"""
|
||||||
os.environ["FAIR_CLUSTER_NAME"] = os.environ["FAIR_ENV_CLUSTER"].lower()
|
cmd_args = ["aws", "s3", "--profile", "saml"] + args
|
||||||
cmd_args = ["/public/apps/fairusers_aws/bin/fs3cmd"] + args
|
|
||||||
return cmd_args
|
return cmd_args
|
||||||
|
|
||||||
|
|
||||||
@@ -31,7 +29,7 @@ def fs3_exists(path) -> bool:
|
|||||||
In fact, will also return True if there is a file which has the given
|
In fact, will also return True if there is a file which has the given
|
||||||
path as a prefix, but we are careful about this.
|
path as a prefix, but we are careful about this.
|
||||||
"""
|
"""
|
||||||
out = subprocess.check_output(fs3cmd(["ls", path]))
|
out = subprocess.check_output(aws_s3_cmd(["ls", path]))
|
||||||
return len(out) != 0
|
return len(out) != 0
|
||||||
|
|
||||||
|
|
||||||
@@ -41,7 +39,7 @@ def get_html_wrappers() -> None:
|
|||||||
assert not output_wrapper.exists()
|
assert not output_wrapper.exists()
|
||||||
dest_wrapper = dest + directory.name + "/download.html"
|
dest_wrapper = dest + directory.name + "/download.html"
|
||||||
if fs3_exists(dest_wrapper):
|
if fs3_exists(dest_wrapper):
|
||||||
subprocess.check_call(fs3cmd(["get", dest_wrapper, str(output_wrapper)]))
|
subprocess.check_call(aws_s3_cmd(["cp", dest_wrapper, str(output_wrapper)]))
|
||||||
|
|
||||||
|
|
||||||
def write_html_wrappers() -> None:
|
def write_html_wrappers() -> None:
|
||||||
@@ -70,7 +68,7 @@ def to_aws() -> None:
|
|||||||
for file in directory.iterdir():
|
for file in directory.iterdir():
|
||||||
print(file)
|
print(file)
|
||||||
subprocess.check_call(
|
subprocess.check_call(
|
||||||
fs3cmd(["put", str(file), dest + str(file.relative_to(output))])
|
aws_s3_cmd(["cp", str(file), dest + str(file.relative_to(output))])
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -79,3 +77,11 @@ if __name__ == "__main__":
|
|||||||
# get_html_wrappers()
|
# get_html_wrappers()
|
||||||
write_html_wrappers()
|
write_html_wrappers()
|
||||||
to_aws()
|
to_aws()
|
||||||
|
|
||||||
|
|
||||||
|
# see all files with
|
||||||
|
# aws s3 --profile saml ls --recursive s3://dl.fbaipublicfiles.com/pytorch3d/
|
||||||
|
|
||||||
|
# empty current with
|
||||||
|
# aws s3 --profile saml rm --recursive
|
||||||
|
# s3://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/
|
||||||
|
|||||||
@@ -1,9 +1,13 @@
|
|||||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
# LICENSE file in the root directory of this source tree.
|
||||||
|
|
||||||
# shellcheck shell=bash
|
# shellcheck shell=bash
|
||||||
# A set of useful bash functions for common functionality we need to do in
|
# A set of useful bash functions for common functionality we need to do in
|
||||||
# many build scripts
|
# many build scripts
|
||||||
|
|
||||||
|
|
||||||
# Setup CUDA environment variables, based on CU_VERSION
|
# Setup CUDA environment variables, based on CU_VERSION
|
||||||
#
|
#
|
||||||
# Inputs:
|
# Inputs:
|
||||||
@@ -51,6 +55,50 @@ setup_cuda() {
|
|||||||
|
|
||||||
# Now work out the CUDA settings
|
# Now work out the CUDA settings
|
||||||
case "$CU_VERSION" in
|
case "$CU_VERSION" in
|
||||||
|
cu116)
|
||||||
|
if [[ "$OSTYPE" == "msys" ]]; then
|
||||||
|
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.6"
|
||||||
|
else
|
||||||
|
export CUDA_HOME=/usr/local/cuda-11.6/
|
||||||
|
fi
|
||||||
|
export FORCE_CUDA=1
|
||||||
|
# Hard-coding gencode flags is temporary situation until
|
||||||
|
# https://github.com/pytorch/pytorch/pull/23408 lands
|
||||||
|
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
||||||
|
;;
|
||||||
|
cu115)
|
||||||
|
if [[ "$OSTYPE" == "msys" ]]; then
|
||||||
|
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.5"
|
||||||
|
else
|
||||||
|
export CUDA_HOME=/usr/local/cuda-11.5/
|
||||||
|
fi
|
||||||
|
export FORCE_CUDA=1
|
||||||
|
# Hard-coding gencode flags is temporary situation until
|
||||||
|
# https://github.com/pytorch/pytorch/pull/23408 lands
|
||||||
|
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
||||||
|
;;
|
||||||
|
cu113)
|
||||||
|
if [[ "$OSTYPE" == "msys" ]]; then
|
||||||
|
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.3"
|
||||||
|
else
|
||||||
|
export CUDA_HOME=/usr/local/cuda-11.3/
|
||||||
|
fi
|
||||||
|
export FORCE_CUDA=1
|
||||||
|
# Hard-coding gencode flags is temporary situation until
|
||||||
|
# https://github.com/pytorch/pytorch/pull/23408 lands
|
||||||
|
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
||||||
|
;;
|
||||||
|
cu112)
|
||||||
|
if [[ "$OSTYPE" == "msys" ]]; then
|
||||||
|
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.2"
|
||||||
|
else
|
||||||
|
export CUDA_HOME=/usr/local/cuda-11.2/
|
||||||
|
fi
|
||||||
|
export FORCE_CUDA=1
|
||||||
|
# Hard-coding gencode flags is temporary situation until
|
||||||
|
# https://github.com/pytorch/pytorch/pull/23408 lands
|
||||||
|
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
||||||
|
;;
|
||||||
cu111)
|
cu111)
|
||||||
if [[ "$OSTYPE" == "msys" ]]; then
|
if [[ "$OSTYPE" == "msys" ]]; then
|
||||||
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.1"
|
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.1"
|
||||||
@@ -267,9 +315,20 @@ setup_conda_cudatoolkit_constraint() {
|
|||||||
export CONDA_CUDATOOLKIT_CONSTRAINT=""
|
export CONDA_CUDATOOLKIT_CONSTRAINT=""
|
||||||
else
|
else
|
||||||
case "$CU_VERSION" in
|
case "$CU_VERSION" in
|
||||||
|
cu116)
|
||||||
|
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.6,<11.7 # [not osx]"
|
||||||
|
;;
|
||||||
|
cu115)
|
||||||
|
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.5,<11.6 # [not osx]"
|
||||||
|
;;
|
||||||
|
cu113)
|
||||||
|
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.3,<11.4 # [not osx]"
|
||||||
|
;;
|
||||||
|
cu112)
|
||||||
|
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.2,<11.3 # [not osx]"
|
||||||
|
;;
|
||||||
cu111)
|
cu111)
|
||||||
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.1,<11.2 # [not osx]"
|
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.1,<11.2 # [not osx]"
|
||||||
#export CONDA_CUB_CONSTRAINT="- nvidiacub"
|
|
||||||
;;
|
;;
|
||||||
cu110)
|
cu110)
|
||||||
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.0,<11.1 # [not osx]"
|
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.0,<11.1 # [not osx]"
|
||||||
|
|||||||
@@ -45,9 +45,12 @@ test:
|
|||||||
- docs
|
- docs
|
||||||
requires:
|
requires:
|
||||||
- imageio
|
- imageio
|
||||||
|
- hydra-core
|
||||||
|
- accelerate
|
||||||
|
- lpips
|
||||||
commands:
|
commands:
|
||||||
#pytest .
|
#pytest .
|
||||||
python -m unittest discover -v -s tests
|
python -m unittest discover -v -s tests -t .
|
||||||
|
|
||||||
|
|
||||||
about:
|
about:
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
@REM Copyright (c) Facebook, Inc. and its affiliates.
|
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
@REM All rights reserved.
|
@REM All rights reserved.
|
||||||
@REM
|
@REM
|
||||||
@REM This source code is licensed under the BSD-style license found in the
|
@REM This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
@REM Copyright (c) Facebook, Inc. and its affiliates.
|
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
@REM All rights reserved.
|
@REM All rights reserved.
|
||||||
@REM
|
@REM
|
||||||
@REM This source code is licensed under the BSD-style license found in the
|
@REM This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
@REM Copyright (c) Facebook, Inc. and its affiliates.
|
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
@REM All rights reserved.
|
@REM All rights reserved.
|
||||||
@REM
|
@REM
|
||||||
@REM This source code is licensed under the BSD-style license found in the
|
@REM This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
@REM Copyright (c) Facebook, Inc. and its affiliates.
|
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
@REM All rights reserved.
|
@REM All rights reserved.
|
||||||
@REM
|
@REM
|
||||||
@REM This source code is licensed under the BSD-style license found in the
|
@REM This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
@REM Copyright (c) Facebook, Inc. and its affiliates.
|
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
@REM All rights reserved.
|
@REM All rights reserved.
|
||||||
@REM
|
@REM
|
||||||
@REM This source code is licensed under the BSD-style license found in the
|
@REM This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
@REM Copyright (c) Facebook, Inc. and its affiliates.
|
@REM Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
@REM All rights reserved.
|
@REM All rights reserved.
|
||||||
@REM
|
@REM
|
||||||
@REM This source code is licensed under the BSD-style license found in the
|
@REM This source code is licensed under the BSD-style license found in the
|
||||||
|
|||||||
5
projects/__init__.py
Normal file
5
projects/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
# LICENSE file in the root directory of this source tree.
|
||||||
280
projects/implicitron_trainer/README.md
Normal file
280
projects/implicitron_trainer/README.md
Normal file
@@ -0,0 +1,280 @@
|
|||||||
|
# Introduction
|
||||||
|
|
||||||
|
Implicitron is a PyTorch3D-based framework for new-view synthesis via modeling the neural-network based representations.
|
||||||
|
|
||||||
|
# License
|
||||||
|
|
||||||
|
Implicitron is distributed as part of PyTorch3D under the [BSD license](https://github.com/facebookresearch/pytorch3d/blob/main/LICENSE).
|
||||||
|
It includes code from the [NeRF](https://github.com/bmild/nerf), [SRN](http://github.com/vsitzmann/scene-representation-networks) and [IDR](http://github.com/lioryariv/idr) repos.
|
||||||
|
See [LICENSE-3RD-PARTY](https://github.com/facebookresearch/pytorch3d/blob/main/LICENSE-3RD-PARTY) for their licenses.
|
||||||
|
|
||||||
|
|
||||||
|
# Installation
|
||||||
|
|
||||||
|
There are three ways to set up Implicitron, depending on the flexibility level required.
|
||||||
|
If you only want to train or evaluate models as they are implemented changing only the parameters, you can just install the package.
|
||||||
|
Implicitron also provides a flexible API that supports user-defined plug-ins;
|
||||||
|
if you want to re-implement some of the components without changing the high-level pipeline, you need to create a custom launcher script.
|
||||||
|
The most flexible option, though, is cloning PyTorch3D repo and building it from sources, which allows changing the code in arbitrary ways.
|
||||||
|
Below, we descibe all three options in more details.
|
||||||
|
|
||||||
|
|
||||||
|
## [Option 1] Running an executable from the package
|
||||||
|
|
||||||
|
This option allows you to use the code as is without changing the implementations.
|
||||||
|
Only configuration can be changed (see [Configuration system](#configuration-system)).
|
||||||
|
|
||||||
|
For this setup, install the dependencies and PyTorch3D from conda following [the guide](https://github.com/facebookresearch/pytorch3d/blob/master/INSTALL.md#1-install-with-cuda-support-from-anaconda-cloud-on-linux-only). Then, install implicitron-specific dependencies:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pip install "hydra-core>=1.1" visdom lpips matplotlib accelerate
|
||||||
|
```
|
||||||
|
|
||||||
|
Runner executable is available as `pytorch3d_implicitron_runner` shell command.
|
||||||
|
See [Running](#running) section below for examples of training and evaluation commands.
|
||||||
|
|
||||||
|
## [Option 2] Supporting custom implementations
|
||||||
|
|
||||||
|
To plug in custom implementations, for example, of renderer or implicit-function protocols, you need to create your own runner script and import the plug-in implementations there.
|
||||||
|
First, install PyTorch3D and Implicitron dependencies as described in the previous section.
|
||||||
|
Then, implement the custom script; copying `pytorch3d/projects/implicitron_trainer/experiment.py` is a good place to start.
|
||||||
|
See [Custom plugins](#custom-plugins) for more information on how to import implementations and enable them in the configs.
|
||||||
|
|
||||||
|
|
||||||
|
## [Option 3] Cloning PyTorch3D repo
|
||||||
|
|
||||||
|
This is the most flexible way to set up Implicitron as it allows changing the code directly.
|
||||||
|
It allows modifying the high-level rendering pipeline or implementing yet-unsupported loss functions.
|
||||||
|
Please follow the instructions to [install PyTorch3D from a local clone](https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md#2-install-from-a-local-clone).
|
||||||
|
Then, install Implicitron-specific dependencies:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pip install "hydra-core>=1.1" visdom lpips matplotlib accelerate
|
||||||
|
```
|
||||||
|
|
||||||
|
You are still encouraged to implement custom plugins as above where possible as it makes reusing the code easier.
|
||||||
|
The executable is located in `pytorch3d/projects/implicitron_trainer`.
|
||||||
|
|
||||||
|
|
||||||
|
# Running
|
||||||
|
|
||||||
|
This section assumes that you use the executable provided by the installed package.
|
||||||
|
If you have a custom `experiment.py` script (as in the Option 2 above), replace the executable with the path to your script.
|
||||||
|
|
||||||
|
## Training
|
||||||
|
|
||||||
|
To run training, pass a yaml config file, followed by a list of overridden arguments.
|
||||||
|
For example, to train NeRF on the first skateboard sequence from CO3D dataset, you can run:
|
||||||
|
```shell
|
||||||
|
dataset_args=data_source_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
|
||||||
|
pytorch3d_implicitron_runner --config-path ./configs/ --config-name repro_singleseq_nerf $dataset_args.dataset_root=<DATASET_ROOT> $dataset_args.category='skateboard' $dataset_args.test_restrict_sequence_id=0 test_when_finished=True exp_dir=<CHECKPOINT_DIR>
|
||||||
|
```
|
||||||
|
|
||||||
|
Here, `--config-path` points to the config path relative to `pytorch3d_implicitron_runner` location;
|
||||||
|
`--config-name` picks the config (in this case, `repro_singleseq_nerf.yaml`);
|
||||||
|
`test_when_finished` will launch evaluation script once training is finished.
|
||||||
|
Replace `<DATASET_ROOT>` with the location where the dataset in Implicitron format is stored
|
||||||
|
and `<CHECKPOINT_DIR>` with a directory where checkpoints will be dumped during training.
|
||||||
|
Other configuration parameters can be overridden in the same way.
|
||||||
|
See [Configuration system](#configuration-system) section for more information on this.
|
||||||
|
|
||||||
|
|
||||||
|
## Evaluation
|
||||||
|
|
||||||
|
To run evaluation on the latest checkpoint after (or during) training, simply add `eval_only=True` to your training command.
|
||||||
|
|
||||||
|
E.g. for executing the evaluation on the NeRF skateboard sequence, you can run:
|
||||||
|
```shell
|
||||||
|
dataset_args=data_source_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
|
||||||
|
pytorch3d_implicitron_runner --config-path ./configs/ --config-name repro_singleseq_nerf $dataset_args.dataset_root=<CO3D_DATASET_ROOT> $dataset_args.category='skateboard' $dataset_args.test_restrict_sequence_id=0 exp_dir=<CHECKPOINT_DIR> eval_only=True
|
||||||
|
```
|
||||||
|
Evaluation prints the metrics to `stdout` and dumps them to a json file in `exp_dir`.
|
||||||
|
|
||||||
|
## Visualisation
|
||||||
|
|
||||||
|
The script produces a video of renders by a trained model assuming a pre-defined camera trajectory.
|
||||||
|
In order for it to work, `ffmpeg` needs to be installed:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
conda install ffmpeg
|
||||||
|
```
|
||||||
|
|
||||||
|
Here is an example of calling the script:
|
||||||
|
```shell
|
||||||
|
projects/implicitron_trainer/visualize_reconstruction.py exp_dir=<CHECKPOINT_DIR> visdom_show_preds=True n_eval_cameras=40 render_size="[64,64]" video_size="[256,256]"
|
||||||
|
```
|
||||||
|
|
||||||
|
The argument `n_eval_cameras` sets the number of renderring viewpoints sampled on a trajectory, which defaults to a circular fly-around;
|
||||||
|
`render_size` sets the size of a render passed to the model, which can be resized to `video_size` before writing.
|
||||||
|
|
||||||
|
Rendered videos of images, masks, and depth maps will be saved to `<CHECKPOINT_DIR>/vis`.
|
||||||
|
|
||||||
|
|
||||||
|
# Configuration system
|
||||||
|
|
||||||
|
We use hydra and OmegaConf to parse the configs.
|
||||||
|
The config schema and default values are defined by the dataclasses implementing the modules.
|
||||||
|
More specifically, if a class derives from `Configurable`, its fields can be set in config yaml files or overridden in CLI.
|
||||||
|
For example, `GenericModel` has a field `render_image_width` with the default value 400.
|
||||||
|
If it is specified in the yaml config file or in CLI command, the new value will be used.
|
||||||
|
|
||||||
|
Configurables can form hierarchies.
|
||||||
|
For example, `GenericModel` has a field `raysampler: RaySampler`, which is also Configurable.
|
||||||
|
In the config, inner parameters can be propagated using `_args` postfix, e.g. to change `raysampler.n_pts_per_ray_training` (the number of sampled points per ray), the node `raysampler_args.n_pts_per_ray_training` should be specified.
|
||||||
|
|
||||||
|
The root of the hierarchy is defined by `ExperimentConfig` dataclass.
|
||||||
|
It has top-level fields like `eval_only` which was used above for running evaluation by adding a CLI override.
|
||||||
|
Additionally, it has non-leaf nodes like `generic_model_args`, which dispatches the config parameters to `GenericModel`. Thus, changing the model parameters may be achieved in two ways: either by editing the config file, e.g.
|
||||||
|
```yaml
|
||||||
|
generic_model_args:
|
||||||
|
render_image_width: 800
|
||||||
|
raysampler_args:
|
||||||
|
n_pts_per_ray_training: 128
|
||||||
|
```
|
||||||
|
|
||||||
|
or, equivalently, by adding the following to `pytorch3d_implicitron_runner` arguments:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
generic_model_args.render_image_width=800 generic_model_args.raysampler_args.n_pts_per_ray_training=128
|
||||||
|
```
|
||||||
|
|
||||||
|
See the documentation in `pytorch3d/implicitron/tools/config.py` for more details.
|
||||||
|
|
||||||
|
## Replaceable implementations
|
||||||
|
|
||||||
|
Sometimes changing the model parameters does not provide enough flexibility, and you want to provide a new implementation for a building block.
|
||||||
|
The configuration system also supports it!
|
||||||
|
Abstract classes like `BaseRenderer` derive from `ReplaceableBase` instead of `Configurable`.
|
||||||
|
This means that other Configurables can refer to them using the base type, while the specific implementation is chosen in the config using `_class_type`-postfixed node.
|
||||||
|
In that case, `_args` node name has to include the implementation type.
|
||||||
|
More specifically, to change renderer settings, the config will look like this:
|
||||||
|
```yaml
|
||||||
|
generic_model_args:
|
||||||
|
renderer_class_type: LSTMRenderer
|
||||||
|
renderer_LSTMRenderer_args:
|
||||||
|
num_raymarch_steps: 10
|
||||||
|
hidden_size: 16
|
||||||
|
```
|
||||||
|
|
||||||
|
See the documentation in `pytorch3d/implicitron/tools/config.py` for more details on the configuration system.
|
||||||
|
|
||||||
|
## Custom plugins
|
||||||
|
|
||||||
|
If you have an idea for another implementation of a replaceable component, it can be plugged in without changing the core code.
|
||||||
|
For that, you need to set up Implicitron through option 2 or 3 above.
|
||||||
|
Let's say you want to implement a renderer that accumulates opacities similar to an X-ray machine.
|
||||||
|
First, create a module `x_ray_renderer.py` with a class deriving from `BaseRenderer`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from pytorch3d.implicitron.tools.config import registry
|
||||||
|
|
||||||
|
@registry.register
|
||||||
|
class XRayRenderer(BaseRenderer, torch.nn.Module):
|
||||||
|
n_pts_per_ray: int = 64
|
||||||
|
|
||||||
|
# if there are other base classes, make sure to call `super().__init__()` explicitly
|
||||||
|
def __post_init__(self):
|
||||||
|
super().__init__()
|
||||||
|
# custom initialization
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
ray_bundle,
|
||||||
|
implicit_functions=[],
|
||||||
|
evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION,
|
||||||
|
**kwargs,
|
||||||
|
) -> RendererOutput:
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
Please note `@registry.register` decorator that registers the plug-in as an implementation of `Renderer`.
|
||||||
|
IMPORTANT: In order for it to run, the class (or its enclosing module) has to be imported in your launch script. Additionally, this has to be done before parsing the root configuration class `ExperimentConfig`.
|
||||||
|
Simply add `import .x_ray_renderer` in the beginning of `experiment.py`.
|
||||||
|
|
||||||
|
After that, you should be able to change the config with:
|
||||||
|
```yaml
|
||||||
|
generic_model_args:
|
||||||
|
renderer_class_type: XRayRenderer
|
||||||
|
renderer_XRayRenderer_args:
|
||||||
|
n_pts_per_ray: 128
|
||||||
|
```
|
||||||
|
|
||||||
|
to replace the implementation and potentially override the parameters.
|
||||||
|
|
||||||
|
# Code and config structure
|
||||||
|
|
||||||
|
As per above, the config structure is parsed automatically from the module hierarchy.
|
||||||
|
In particular, model parameters are contained in `generic_model_args` node, and dataset parameters in `data_source_args` node.
|
||||||
|
|
||||||
|
Here is the class structure (single-line edges show aggregation, while double lines show available implementations):
|
||||||
|
```
|
||||||
|
generic_model_args: GenericModel
|
||||||
|
└-- sequence_autodecoder_args: Autodecoder
|
||||||
|
└-- raysampler_args: RaySampler
|
||||||
|
└-- renderer_*_args: BaseRenderer
|
||||||
|
╘== MultiPassEmissionAbsorptionRenderer
|
||||||
|
╘== LSTMRenderer
|
||||||
|
╘== SignedDistanceFunctionRenderer
|
||||||
|
└-- ray_tracer_args: RayTracing
|
||||||
|
└-- ray_normal_coloring_network_args: RayNormalColoringNetwork
|
||||||
|
└-- implicit_function_*_args: ImplicitFunctionBase
|
||||||
|
╘== NeuralRadianceFieldImplicitFunction
|
||||||
|
╘== SRNImplicitFunction
|
||||||
|
└-- raymarch_function_args: SRNRaymarchFunction
|
||||||
|
└-- pixel_generator_args: SRNPixelGenerator
|
||||||
|
╘== SRNHyperNetImplicitFunction
|
||||||
|
└-- hypernet_args: SRNRaymarchHyperNet
|
||||||
|
└-- pixel_generator_args: SRNPixelGenerator
|
||||||
|
╘== IdrFeatureField
|
||||||
|
└-- image_feature_extractor_*_args: FeatureExtractorBase
|
||||||
|
╘== ResNetFeatureExtractor
|
||||||
|
└-- view_sampler_args: ViewSampler
|
||||||
|
└-- feature_aggregator_*_args: FeatureAggregatorBase
|
||||||
|
╘== IdentityFeatureAggregator
|
||||||
|
╘== AngleWeightedIdentityFeatureAggregator
|
||||||
|
╘== AngleWeightedReductionFeatureAggregator
|
||||||
|
╘== ReductionFeatureAggregator
|
||||||
|
solver_args: init_optimizer
|
||||||
|
data_source_args: ImplicitronDataSource
|
||||||
|
└-- dataset_map_provider_*_args
|
||||||
|
└-- data_loader_map_provider_*_args
|
||||||
|
```
|
||||||
|
|
||||||
|
Please look at the annotations of the respective classes or functions for the lists of hyperparameters.
|
||||||
|
|
||||||
|
# Reproducing CO3D experiments
|
||||||
|
|
||||||
|
Common Objects in 3D (CO3D) is a large-scale dataset of videos of rigid objects grouped into 50 common categories.
|
||||||
|
Implicitron provides implementations and config files to reproduce the results from [the paper](https://arxiv.org/abs/2109.00512).
|
||||||
|
Please follow [the link](https://github.com/facebookresearch/co3d#automatic-batch-download) for the instructions to download the dataset.
|
||||||
|
In training and evaluation scripts, use the download location as `<DATASET_ROOT>`.
|
||||||
|
It is also possible to define environment variable `CO3D_DATASET_ROOT` instead of specifying it.
|
||||||
|
To reproduce the experiments from the paper, use the following configs. For single-sequence experiments:
|
||||||
|
|
||||||
|
| Method | config file |
|
||||||
|
|-----------------|-------------------------------------|
|
||||||
|
| NeRF | repro_singleseq_nerf.yaml |
|
||||||
|
| NeRF + WCE | repro_singleseq_nerf_wce.yaml |
|
||||||
|
| NerFormer | repro_singleseq_nerformer.yaml |
|
||||||
|
| IDR | repro_singleseq_idr.yaml |
|
||||||
|
| SRN | repro_singleseq_srn_noharm.yaml |
|
||||||
|
| SRN + γ | repro_singleseq_srn.yaml |
|
||||||
|
| SRN + WCE | repro_singleseq_srn_wce_noharm.yaml |
|
||||||
|
| SRN + WCE + γ | repro_singleseq_srn_wce_noharm.yaml |
|
||||||
|
|
||||||
|
For multi-sequence experiments (without generalisation to new sequences):
|
||||||
|
|
||||||
|
| Method | config file |
|
||||||
|
|-----------------|--------------------------------------------|
|
||||||
|
| NeRF + AD | repro_multiseq_nerf_ad.yaml |
|
||||||
|
| SRN + AD | repro_multiseq_srn_ad_hypernet_noharm.yaml |
|
||||||
|
| SRN + γ + AD | repro_multiseq_srn_ad_hypernet.yaml |
|
||||||
|
|
||||||
|
For multi-sequence experiments (with generalisation to new sequences):
|
||||||
|
|
||||||
|
| Method | config file |
|
||||||
|
|-----------------|--------------------------------------|
|
||||||
|
| NeRF + WCE | repro_multiseq_nerf_wce.yaml |
|
||||||
|
| NerFormer | repro_multiseq_nerformer.yaml |
|
||||||
|
| SRN + WCE | repro_multiseq_srn_wce_noharm.yaml |
|
||||||
|
| SRN + WCE + γ | repro_multiseq_srn_wce.yaml |
|
||||||
5
projects/implicitron_trainer/__init__.py
Normal file
5
projects/implicitron_trainer/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
# LICENSE file in the root directory of this source tree.
|
||||||
75
projects/implicitron_trainer/configs/repro_base.yaml
Normal file
75
projects/implicitron_trainer/configs/repro_base.yaml
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
defaults:
|
||||||
|
- default_config
|
||||||
|
- _self_
|
||||||
|
exp_dir: ./data/exps/base/
|
||||||
|
architecture: generic
|
||||||
|
visualize_interval: 0
|
||||||
|
visdom_port: 8097
|
||||||
|
data_source_args:
|
||||||
|
data_loader_map_provider_class_type: SequenceDataLoaderMapProvider
|
||||||
|
dataset_map_provider_class_type: JsonIndexDatasetMapProvider
|
||||||
|
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
||||||
|
dataset_length_train: 1000
|
||||||
|
dataset_length_val: 1
|
||||||
|
num_workers: 8
|
||||||
|
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
||||||
|
dataset_root: ${oc.env:CO3D_DATASET_ROOT}
|
||||||
|
n_frames_per_sequence: -1
|
||||||
|
test_on_train: true
|
||||||
|
test_restrict_sequence_id: 0
|
||||||
|
dataset_JsonIndexDataset_args:
|
||||||
|
load_point_clouds: false
|
||||||
|
mask_depths: false
|
||||||
|
mask_images: false
|
||||||
|
generic_model_args:
|
||||||
|
loss_weights:
|
||||||
|
loss_mask_bce: 1.0
|
||||||
|
loss_prev_stage_mask_bce: 1.0
|
||||||
|
loss_autodecoder_norm: 0.01
|
||||||
|
loss_rgb_mse: 1.0
|
||||||
|
loss_prev_stage_rgb_mse: 1.0
|
||||||
|
output_rasterized_mc: false
|
||||||
|
chunk_size_grid: 102400
|
||||||
|
render_image_height: 400
|
||||||
|
render_image_width: 400
|
||||||
|
num_passes: 2
|
||||||
|
implicit_function_NeuralRadianceFieldImplicitFunction_args:
|
||||||
|
n_harmonic_functions_xyz: 10
|
||||||
|
n_harmonic_functions_dir: 4
|
||||||
|
n_hidden_neurons_xyz: 256
|
||||||
|
n_hidden_neurons_dir: 128
|
||||||
|
n_layers_xyz: 8
|
||||||
|
append_xyz:
|
||||||
|
- 5
|
||||||
|
latent_dim: 0
|
||||||
|
raysampler_AdaptiveRaySampler_args:
|
||||||
|
n_rays_per_image_sampled_from_mask: 1024
|
||||||
|
scene_extent: 8.0
|
||||||
|
n_pts_per_ray_training: 64
|
||||||
|
n_pts_per_ray_evaluation: 64
|
||||||
|
stratified_point_sampling_training: true
|
||||||
|
stratified_point_sampling_evaluation: false
|
||||||
|
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
||||||
|
n_pts_per_ray_fine_training: 64
|
||||||
|
n_pts_per_ray_fine_evaluation: 64
|
||||||
|
append_coarse_samples_to_fine: true
|
||||||
|
density_noise_std_train: 1.0
|
||||||
|
view_pooler_args:
|
||||||
|
view_sampler_args:
|
||||||
|
masked_sampling: false
|
||||||
|
image_feature_extractor_ResNetFeatureExtractor_args:
|
||||||
|
stages:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
proj_dim: 16
|
||||||
|
image_rescale: 0.32
|
||||||
|
first_max_pool: false
|
||||||
|
solver_args:
|
||||||
|
breed: adam
|
||||||
|
lr: 0.0005
|
||||||
|
lr_policy: multistep
|
||||||
|
max_epochs: 2000
|
||||||
|
momentum: 0.9
|
||||||
|
weight_decay: 0.0
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
generic_model_args:
|
||||||
|
image_feature_extractor_class_type: ResNetFeatureExtractor
|
||||||
|
image_feature_extractor_ResNetFeatureExtractor_args:
|
||||||
|
add_images: true
|
||||||
|
add_masks: true
|
||||||
|
first_max_pool: true
|
||||||
|
image_rescale: 0.375
|
||||||
|
l2_norm: true
|
||||||
|
name: resnet34
|
||||||
|
normalize_image: true
|
||||||
|
pretrained: true
|
||||||
|
stages:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
proj_dim: 32
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
generic_model_args:
|
||||||
|
image_feature_extractor_class_type: ResNetFeatureExtractor
|
||||||
|
image_feature_extractor_ResNetFeatureExtractor_args:
|
||||||
|
add_images: true
|
||||||
|
add_masks: true
|
||||||
|
first_max_pool: false
|
||||||
|
image_rescale: 0.375
|
||||||
|
l2_norm: true
|
||||||
|
name: resnet34
|
||||||
|
normalize_image: true
|
||||||
|
pretrained: true
|
||||||
|
stages:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
proj_dim: 16
|
||||||
@@ -0,0 +1,18 @@
|
|||||||
|
generic_model_args:
|
||||||
|
image_feature_extractor_class_type: ResNetFeatureExtractor
|
||||||
|
image_feature_extractor_ResNetFeatureExtractor_args:
|
||||||
|
stages:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 3
|
||||||
|
first_max_pool: false
|
||||||
|
proj_dim: -1
|
||||||
|
l2_norm: false
|
||||||
|
image_rescale: 0.375
|
||||||
|
name: resnet34
|
||||||
|
normalize_image: true
|
||||||
|
pretrained: true
|
||||||
|
view_pooler_args:
|
||||||
|
feature_aggregator_AngleWeightedReductionFeatureAggregator_args:
|
||||||
|
reduction_functions:
|
||||||
|
- AVG
|
||||||
@@ -0,0 +1,35 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_base.yaml
|
||||||
|
- _self_
|
||||||
|
data_source_args:
|
||||||
|
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
||||||
|
batch_size: 10
|
||||||
|
dataset_length_train: 1000
|
||||||
|
dataset_length_val: 1
|
||||||
|
num_workers: 8
|
||||||
|
train_conditioning_type: SAME
|
||||||
|
val_conditioning_type: SAME
|
||||||
|
test_conditioning_type: SAME
|
||||||
|
images_per_seq_options:
|
||||||
|
- 2
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
|
- 6
|
||||||
|
- 7
|
||||||
|
- 8
|
||||||
|
- 9
|
||||||
|
- 10
|
||||||
|
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
||||||
|
assert_single_seq: false
|
||||||
|
task_str: multisequence
|
||||||
|
n_frames_per_sequence: -1
|
||||||
|
test_on_train: true
|
||||||
|
test_restrict_sequence_id: 0
|
||||||
|
solver_args:
|
||||||
|
max_epochs: 3000
|
||||||
|
milestones:
|
||||||
|
- 1000
|
||||||
|
camera_difficulty_bin_breaks:
|
||||||
|
- 0.666667
|
||||||
|
- 0.833334
|
||||||
@@ -0,0 +1,65 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_multiseq_base.yaml
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
loss_weights:
|
||||||
|
loss_mask_bce: 100.0
|
||||||
|
loss_kl: 0.0
|
||||||
|
loss_rgb_mse: 1.0
|
||||||
|
loss_eikonal: 0.1
|
||||||
|
chunk_size_grid: 65536
|
||||||
|
num_passes: 1
|
||||||
|
output_rasterized_mc: true
|
||||||
|
sampling_mode_training: mask_sample
|
||||||
|
global_encoder_class_type: SequenceAutodecoder
|
||||||
|
global_encoder_SequenceAutodecoder_args:
|
||||||
|
autodecoder_args:
|
||||||
|
n_instances: 20000
|
||||||
|
init_scale: 1.0
|
||||||
|
encoding_dim: 256
|
||||||
|
implicit_function_IdrFeatureField_args:
|
||||||
|
n_harmonic_functions_xyz: 6
|
||||||
|
bias: 0.6
|
||||||
|
d_in: 3
|
||||||
|
d_out: 1
|
||||||
|
dims:
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
geometric_init: true
|
||||||
|
pooled_feature_dim: 0
|
||||||
|
skip_in:
|
||||||
|
- 6
|
||||||
|
weight_norm: true
|
||||||
|
renderer_SignedDistanceFunctionRenderer_args:
|
||||||
|
ray_tracer_args:
|
||||||
|
line_search_step: 0.5
|
||||||
|
line_step_iters: 3
|
||||||
|
n_secant_steps: 8
|
||||||
|
n_steps: 100
|
||||||
|
object_bounding_sphere: 8.0
|
||||||
|
sdf_threshold: 5.0e-05
|
||||||
|
ray_normal_coloring_network_args:
|
||||||
|
d_in: 9
|
||||||
|
d_out: 3
|
||||||
|
dims:
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
mode: idr
|
||||||
|
n_harmonic_functions_dir: 4
|
||||||
|
pooled_feature_dim: 0
|
||||||
|
weight_norm: true
|
||||||
|
raysampler_AdaptiveRaySampler_args:
|
||||||
|
n_rays_per_image_sampled_from_mask: 1024
|
||||||
|
n_pts_per_ray_training: 0
|
||||||
|
n_pts_per_ray_evaluation: 0
|
||||||
|
scene_extent: 8.0
|
||||||
|
renderer_class_type: SignedDistanceFunctionRenderer
|
||||||
|
implicit_function_class_type: IdrFeatureField
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_multiseq_base.yaml
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
chunk_size_grid: 16000
|
||||||
|
view_pooler_enabled: false
|
||||||
|
global_encoder_class_type: SequenceAutodecoder
|
||||||
|
global_encoder_SequenceAutodecoder_args:
|
||||||
|
autodecoder_args:
|
||||||
|
n_instances: 20000
|
||||||
|
encoding_dim: 256
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_multiseq_base.yaml
|
||||||
|
- repro_feat_extractor_unnormed.yaml
|
||||||
|
- _self_
|
||||||
|
clip_grad: 1.0
|
||||||
|
generic_model_args:
|
||||||
|
chunk_size_grid: 16000
|
||||||
|
view_pooler_enabled: true
|
||||||
|
raysampler_AdaptiveRaySampler_args:
|
||||||
|
n_rays_per_image_sampled_from_mask: 850
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_multiseq_base.yaml
|
||||||
|
- repro_feat_extractor_transformer.yaml
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
chunk_size_grid: 16000
|
||||||
|
raysampler_AdaptiveRaySampler_args:
|
||||||
|
n_rays_per_image_sampled_from_mask: 800
|
||||||
|
n_pts_per_ray_training: 32
|
||||||
|
n_pts_per_ray_evaluation: 32
|
||||||
|
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
||||||
|
n_pts_per_ray_fine_training: 16
|
||||||
|
n_pts_per_ray_fine_evaluation: 16
|
||||||
|
implicit_function_class_type: NeRFormerImplicitFunction
|
||||||
|
view_pooler_enabled: true
|
||||||
|
view_pooler_args:
|
||||||
|
feature_aggregator_class_type: IdentityFeatureAggregator
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_multiseq_nerformer.yaml
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
view_pooler_args:
|
||||||
|
feature_aggregator_class_type: AngleWeightedIdentityFeatureAggregator
|
||||||
@@ -0,0 +1,34 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_multiseq_base.yaml
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
chunk_size_grid: 16000
|
||||||
|
view_pooler_enabled: false
|
||||||
|
n_train_target_views: -1
|
||||||
|
num_passes: 1
|
||||||
|
loss_weights:
|
||||||
|
loss_rgb_mse: 200.0
|
||||||
|
loss_prev_stage_rgb_mse: 0.0
|
||||||
|
loss_mask_bce: 1.0
|
||||||
|
loss_prev_stage_mask_bce: 0.0
|
||||||
|
loss_autodecoder_norm: 0.001
|
||||||
|
depth_neg_penalty: 10000.0
|
||||||
|
global_encoder_class_type: SequenceAutodecoder
|
||||||
|
global_encoder_SequenceAutodecoder_args:
|
||||||
|
autodecoder_args:
|
||||||
|
encoding_dim: 256
|
||||||
|
n_instances: 20000
|
||||||
|
raysampler_class_type: NearFarRaySampler
|
||||||
|
raysampler_NearFarRaySampler_args:
|
||||||
|
n_rays_per_image_sampled_from_mask: 2048
|
||||||
|
min_depth: 0.05
|
||||||
|
max_depth: 0.05
|
||||||
|
n_pts_per_ray_training: 1
|
||||||
|
n_pts_per_ray_evaluation: 1
|
||||||
|
stratified_point_sampling_training: false
|
||||||
|
stratified_point_sampling_evaluation: false
|
||||||
|
renderer_class_type: LSTMRenderer
|
||||||
|
implicit_function_class_type: SRNHyperNetImplicitFunction
|
||||||
|
solver_args:
|
||||||
|
breed: adam
|
||||||
|
lr: 5.0e-05
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_multiseq_srn_ad_hypernet.yaml
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
num_passes: 1
|
||||||
|
implicit_function_SRNHyperNetImplicitFunction_args:
|
||||||
|
pixel_generator_args:
|
||||||
|
n_harmonic_functions: 0
|
||||||
|
hypernet_args:
|
||||||
|
n_harmonic_functions: 0
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_multiseq_base.yaml
|
||||||
|
- repro_feat_extractor_normed.yaml
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
chunk_size_grid: 32000
|
||||||
|
num_passes: 1
|
||||||
|
n_train_target_views: -1
|
||||||
|
loss_weights:
|
||||||
|
loss_rgb_mse: 200.0
|
||||||
|
loss_prev_stage_rgb_mse: 0.0
|
||||||
|
loss_mask_bce: 1.0
|
||||||
|
loss_prev_stage_mask_bce: 0.0
|
||||||
|
loss_autodecoder_norm: 0.0
|
||||||
|
depth_neg_penalty: 10000.0
|
||||||
|
raysampler_class_type: NearFarRaySampler
|
||||||
|
raysampler_NearFarRaySampler_args:
|
||||||
|
n_rays_per_image_sampled_from_mask: 2048
|
||||||
|
min_depth: 0.05
|
||||||
|
max_depth: 0.05
|
||||||
|
n_pts_per_ray_training: 1
|
||||||
|
n_pts_per_ray_evaluation: 1
|
||||||
|
stratified_point_sampling_training: false
|
||||||
|
stratified_point_sampling_evaluation: false
|
||||||
|
renderer_class_type: LSTMRenderer
|
||||||
|
implicit_function_class_type: SRNImplicitFunction
|
||||||
|
view_pooler_enabled: true
|
||||||
|
solver_args:
|
||||||
|
breed: adam
|
||||||
|
lr: 5.0e-05
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_multiseq_srn_wce.yaml
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
num_passes: 1
|
||||||
|
implicit_function_SRNImplicitFunction_args:
|
||||||
|
pixel_generator_args:
|
||||||
|
n_harmonic_functions: 0
|
||||||
|
raymarch_function_args:
|
||||||
|
n_harmonic_functions: 0
|
||||||
@@ -0,0 +1,39 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_base
|
||||||
|
- _self_
|
||||||
|
data_source_args:
|
||||||
|
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
||||||
|
batch_size: 1
|
||||||
|
dataset_length_train: 1000
|
||||||
|
dataset_length_val: 1
|
||||||
|
num_workers: 8
|
||||||
|
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
||||||
|
assert_single_seq: true
|
||||||
|
n_frames_per_sequence: -1
|
||||||
|
test_restrict_sequence_id: 0
|
||||||
|
test_on_train: false
|
||||||
|
generic_model_args:
|
||||||
|
render_image_height: 800
|
||||||
|
render_image_width: 800
|
||||||
|
log_vars:
|
||||||
|
- loss_rgb_psnr_fg
|
||||||
|
- loss_rgb_psnr
|
||||||
|
- loss_eikonal
|
||||||
|
- loss_prev_stage_rgb_psnr
|
||||||
|
- loss_mask_bce
|
||||||
|
- loss_prev_stage_mask_bce
|
||||||
|
- loss_rgb_mse
|
||||||
|
- loss_prev_stage_rgb_mse
|
||||||
|
- loss_depth_abs
|
||||||
|
- loss_depth_abs_fg
|
||||||
|
- loss_kl
|
||||||
|
- loss_mask_neg_iou
|
||||||
|
- objective
|
||||||
|
- epoch
|
||||||
|
- sec/it
|
||||||
|
solver_args:
|
||||||
|
lr: 0.0005
|
||||||
|
max_epochs: 400
|
||||||
|
milestones:
|
||||||
|
- 200
|
||||||
|
- 300
|
||||||
@@ -0,0 +1,57 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_singleseq_base
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
loss_weights:
|
||||||
|
loss_mask_bce: 100.0
|
||||||
|
loss_kl: 0.0
|
||||||
|
loss_rgb_mse: 1.0
|
||||||
|
loss_eikonal: 0.1
|
||||||
|
chunk_size_grid: 65536
|
||||||
|
num_passes: 1
|
||||||
|
view_pooler_enabled: false
|
||||||
|
implicit_function_IdrFeatureField_args:
|
||||||
|
n_harmonic_functions_xyz: 6
|
||||||
|
bias: 0.6
|
||||||
|
d_in: 3
|
||||||
|
d_out: 1
|
||||||
|
dims:
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
geometric_init: true
|
||||||
|
pooled_feature_dim: 0
|
||||||
|
skip_in:
|
||||||
|
- 6
|
||||||
|
weight_norm: true
|
||||||
|
renderer_SignedDistanceFunctionRenderer_args:
|
||||||
|
ray_tracer_args:
|
||||||
|
line_search_step: 0.5
|
||||||
|
line_step_iters: 3
|
||||||
|
n_secant_steps: 8
|
||||||
|
n_steps: 100
|
||||||
|
object_bounding_sphere: 8.0
|
||||||
|
sdf_threshold: 5.0e-05
|
||||||
|
ray_normal_coloring_network_args:
|
||||||
|
d_in: 9
|
||||||
|
d_out: 3
|
||||||
|
dims:
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
mode: idr
|
||||||
|
n_harmonic_functions_dir: 4
|
||||||
|
pooled_feature_dim: 0
|
||||||
|
weight_norm: true
|
||||||
|
raysampler_AdaptiveRaySampler_args:
|
||||||
|
n_rays_per_image_sampled_from_mask: 1024
|
||||||
|
n_pts_per_ray_training: 0
|
||||||
|
n_pts_per_ray_evaluation: 0
|
||||||
|
renderer_class_type: SignedDistanceFunctionRenderer
|
||||||
|
implicit_function_class_type: IdrFeatureField
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_singleseq_base
|
||||||
|
- _self_
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_singleseq_wce_base.yaml
|
||||||
|
- repro_feat_extractor_unnormed.yaml
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
chunk_size_grid: 16000
|
||||||
|
view_pooler_enabled: true
|
||||||
|
raysampler_AdaptiveRaySampler_args:
|
||||||
|
n_rays_per_image_sampled_from_mask: 850
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_singleseq_wce_base.yaml
|
||||||
|
- repro_feat_extractor_transformer.yaml
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
chunk_size_grid: 16000
|
||||||
|
view_pooler_enabled: true
|
||||||
|
implicit_function_class_type: NeRFormerImplicitFunction
|
||||||
|
raysampler_AdaptiveRaySampler_args:
|
||||||
|
n_rays_per_image_sampled_from_mask: 800
|
||||||
|
n_pts_per_ray_training: 32
|
||||||
|
n_pts_per_ray_evaluation: 32
|
||||||
|
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
||||||
|
n_pts_per_ray_fine_training: 16
|
||||||
|
n_pts_per_ray_fine_evaluation: 16
|
||||||
|
view_pooler_args:
|
||||||
|
feature_aggregator_class_type: IdentityFeatureAggregator
|
||||||
@@ -0,0 +1,28 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_singleseq_base.yaml
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
num_passes: 1
|
||||||
|
chunk_size_grid: 32000
|
||||||
|
view_pooler_enabled: false
|
||||||
|
loss_weights:
|
||||||
|
loss_rgb_mse: 200.0
|
||||||
|
loss_prev_stage_rgb_mse: 0.0
|
||||||
|
loss_mask_bce: 1.0
|
||||||
|
loss_prev_stage_mask_bce: 0.0
|
||||||
|
loss_autodecoder_norm: 0.0
|
||||||
|
depth_neg_penalty: 10000.0
|
||||||
|
raysampler_class_type: NearFarRaySampler
|
||||||
|
raysampler_NearFarRaySampler_args:
|
||||||
|
n_rays_per_image_sampled_from_mask: 2048
|
||||||
|
min_depth: 0.05
|
||||||
|
max_depth: 0.05
|
||||||
|
n_pts_per_ray_training: 1
|
||||||
|
n_pts_per_ray_evaluation: 1
|
||||||
|
stratified_point_sampling_training: false
|
||||||
|
stratified_point_sampling_evaluation: false
|
||||||
|
renderer_class_type: LSTMRenderer
|
||||||
|
implicit_function_class_type: SRNImplicitFunction
|
||||||
|
solver_args:
|
||||||
|
breed: adam
|
||||||
|
lr: 5.0e-05
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_singleseq_srn.yaml
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
num_passes: 1
|
||||||
|
implicit_function_SRNImplicitFunction_args:
|
||||||
|
pixel_generator_args:
|
||||||
|
n_harmonic_functions: 0
|
||||||
|
raymarch_function_args:
|
||||||
|
n_harmonic_functions: 0
|
||||||
@@ -0,0 +1,29 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_singleseq_wce_base
|
||||||
|
- repro_feat_extractor_normed.yaml
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
num_passes: 1
|
||||||
|
chunk_size_grid: 32000
|
||||||
|
view_pooler_enabled: true
|
||||||
|
loss_weights:
|
||||||
|
loss_rgb_mse: 200.0
|
||||||
|
loss_prev_stage_rgb_mse: 0.0
|
||||||
|
loss_mask_bce: 1.0
|
||||||
|
loss_prev_stage_mask_bce: 0.0
|
||||||
|
loss_autodecoder_norm: 0.0
|
||||||
|
depth_neg_penalty: 10000.0
|
||||||
|
raysampler_class_type: NearFarRaySampler
|
||||||
|
raysampler_NearFarRaySampler_args:
|
||||||
|
n_rays_per_image_sampled_from_mask: 2048
|
||||||
|
min_depth: 0.05
|
||||||
|
max_depth: 0.05
|
||||||
|
n_pts_per_ray_training: 1
|
||||||
|
n_pts_per_ray_evaluation: 1
|
||||||
|
stratified_point_sampling_training: false
|
||||||
|
stratified_point_sampling_evaluation: false
|
||||||
|
renderer_class_type: LSTMRenderer
|
||||||
|
implicit_function_class_type: SRNImplicitFunction
|
||||||
|
solver_args:
|
||||||
|
breed: adam
|
||||||
|
lr: 5.0e-05
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_singleseq_srn_wce.yaml
|
||||||
|
- _self_
|
||||||
|
generic_model_args:
|
||||||
|
num_passes: 1
|
||||||
|
implicit_function_SRNImplicitFunction_args:
|
||||||
|
pixel_generator_args:
|
||||||
|
n_harmonic_functions: 0
|
||||||
|
raymarch_function_args:
|
||||||
|
n_harmonic_functions: 0
|
||||||
@@ -0,0 +1,22 @@
|
|||||||
|
defaults:
|
||||||
|
- repro_singleseq_base
|
||||||
|
- _self_
|
||||||
|
data_source_args:
|
||||||
|
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
||||||
|
batch_size: 10
|
||||||
|
dataset_length_train: 1000
|
||||||
|
dataset_length_val: 1
|
||||||
|
num_workers: 8
|
||||||
|
train_conditioning_type: SAME
|
||||||
|
val_conditioning_type: SAME
|
||||||
|
test_conditioning_type: SAME
|
||||||
|
images_per_seq_options:
|
||||||
|
- 2
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
|
- 6
|
||||||
|
- 7
|
||||||
|
- 8
|
||||||
|
- 9
|
||||||
|
- 10
|
||||||
706
projects/implicitron_trainer/experiment.py
Executable file
706
projects/implicitron_trainer/experiment.py
Executable file
@@ -0,0 +1,706 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
# LICENSE file in the root directory of this source tree.
|
||||||
|
|
||||||
|
""""
|
||||||
|
This file is the entry point for launching experiments with Implicitron.
|
||||||
|
|
||||||
|
Main functions
|
||||||
|
---------------
|
||||||
|
- `run_training` is the wrapper for the train, val, test loops
|
||||||
|
and checkpointing
|
||||||
|
- `trainvalidate` is the inner loop which runs the model forward/backward
|
||||||
|
pass, visualizations and metric printing
|
||||||
|
|
||||||
|
Launch Training
|
||||||
|
---------------
|
||||||
|
Experiment config .yaml files are located in the
|
||||||
|
`projects/implicitron_trainer/configs` folder. To launch
|
||||||
|
an experiment, specify the name of the file. Specific config values can
|
||||||
|
also be overridden from the command line, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
./experiment.py --config-name base_config.yaml override.param.one=42 override.param.two=84
|
||||||
|
```
|
||||||
|
|
||||||
|
To run an experiment on a specific GPU, specify the `gpu_idx` key
|
||||||
|
in the config file / CLI. To run on a different device, specify the
|
||||||
|
device in `run_training`.
|
||||||
|
|
||||||
|
Outputs
|
||||||
|
--------
|
||||||
|
The outputs of the experiment are saved and logged in multiple ways:
|
||||||
|
- Checkpoints:
|
||||||
|
Model, optimizer and stats are stored in the directory
|
||||||
|
named by the `exp_dir` key from the config file / CLI parameters.
|
||||||
|
- Stats
|
||||||
|
Stats are logged and plotted to the file "train_stats.pdf" in the
|
||||||
|
same directory. The stats are also saved as part of the checkpoint file.
|
||||||
|
- Visualizations
|
||||||
|
Prredictions are plotted to a visdom server running at the
|
||||||
|
port specified by the `visdom_server` and `visdom_port` keys in the
|
||||||
|
config file.
|
||||||
|
|
||||||
|
"""
|
||||||
|
import copy
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import time
|
||||||
|
import warnings
|
||||||
|
from typing import Any, Dict, Optional, Tuple
|
||||||
|
|
||||||
|
import hydra
|
||||||
|
import lpips
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
import tqdm
|
||||||
|
from accelerate import Accelerator
|
||||||
|
from omegaconf import DictConfig, OmegaConf
|
||||||
|
from packaging import version
|
||||||
|
from pytorch3d.implicitron.dataset import utils as ds_utils
|
||||||
|
from pytorch3d.implicitron.dataset.data_loader_map_provider import DataLoaderMap
|
||||||
|
from pytorch3d.implicitron.dataset.data_source import ImplicitronDataSource, Task
|
||||||
|
from pytorch3d.implicitron.dataset.dataset_map_provider import DatasetMap
|
||||||
|
from pytorch3d.implicitron.evaluation import evaluate_new_view_synthesis as evaluate
|
||||||
|
from pytorch3d.implicitron.models.generic_model import EvaluationMode, GenericModel
|
||||||
|
from pytorch3d.implicitron.models.renderer.multipass_ea import (
|
||||||
|
MultiPassEmissionAbsorptionRenderer,
|
||||||
|
)
|
||||||
|
from pytorch3d.implicitron.models.renderer.ray_sampler import AdaptiveRaySampler
|
||||||
|
from pytorch3d.implicitron.tools import model_io, vis_utils
|
||||||
|
from pytorch3d.implicitron.tools.config import (
|
||||||
|
expand_args_fields,
|
||||||
|
remove_unused_components,
|
||||||
|
)
|
||||||
|
from pytorch3d.implicitron.tools.stats import Stats
|
||||||
|
from pytorch3d.renderer.cameras import CamerasBase
|
||||||
|
|
||||||
|
from .impl.experiment_config import ExperimentConfig
|
||||||
|
from .impl.optimization import init_optimizer
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
if version.parse(hydra.__version__) < version.Version("1.1"):
|
||||||
|
raise ValueError(
|
||||||
|
f"Hydra version {hydra.__version__} is too old."
|
||||||
|
" (Implicitron requires version 1.1 or later.)"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# only makes sense in FAIR cluster
|
||||||
|
import pytorch3d.implicitron.fair_cluster.slurm # noqa: F401
|
||||||
|
except ModuleNotFoundError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
no_accelerate = os.environ.get("PYTORCH3D_NO_ACCELERATE") is not None
|
||||||
|
|
||||||
|
|
||||||
|
def init_model(
|
||||||
|
*,
|
||||||
|
cfg: DictConfig,
|
||||||
|
accelerator: Optional[Accelerator] = None,
|
||||||
|
force_load: bool = False,
|
||||||
|
clear_stats: bool = False,
|
||||||
|
load_model_only: bool = False,
|
||||||
|
) -> Tuple[GenericModel, Stats, Optional[Dict[str, Any]]]:
|
||||||
|
"""
|
||||||
|
Returns an instance of `GenericModel`.
|
||||||
|
|
||||||
|
If `cfg.resume` is set or `force_load` is true,
|
||||||
|
attempts to load the last checkpoint from `cfg.exp_dir`. Failure to do so
|
||||||
|
will return the model with initial weights, unless `force_load` is passed,
|
||||||
|
in which case a FileNotFoundError is raised.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
force_load: If true, force load model from checkpoint even if
|
||||||
|
cfg.resume is false.
|
||||||
|
clear_stats: If true, clear the stats object loaded from checkpoint
|
||||||
|
load_model_only: If true, load only the model weights from checkpoint
|
||||||
|
and do not load the state of the optimizer and stats.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
model: The model with optionally loaded weights from checkpoint
|
||||||
|
stats: The stats structure (optionally loaded from checkpoint)
|
||||||
|
optimizer_state: The optimizer state dict containing
|
||||||
|
`state` and `param_groups` keys (optionally loaded from checkpoint)
|
||||||
|
|
||||||
|
Raise:
|
||||||
|
FileNotFoundError if `force_load` is passed but checkpoint is not found.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Initialize the model
|
||||||
|
if cfg.architecture == "generic":
|
||||||
|
model = GenericModel(**cfg.generic_model_args)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"No such arch {cfg.architecture}.")
|
||||||
|
|
||||||
|
# Determine the network outputs that should be logged
|
||||||
|
if hasattr(model, "log_vars"):
|
||||||
|
log_vars = copy.deepcopy(list(model.log_vars))
|
||||||
|
else:
|
||||||
|
log_vars = ["objective"]
|
||||||
|
|
||||||
|
visdom_env_charts = vis_utils.get_visdom_env(cfg) + "_charts"
|
||||||
|
|
||||||
|
# Init the stats struct
|
||||||
|
stats = Stats(
|
||||||
|
log_vars,
|
||||||
|
visdom_env=visdom_env_charts,
|
||||||
|
verbose=False,
|
||||||
|
visdom_server=cfg.visdom_server,
|
||||||
|
visdom_port=cfg.visdom_port,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Retrieve the last checkpoint
|
||||||
|
if cfg.resume_epoch > 0:
|
||||||
|
model_path = model_io.get_checkpoint(cfg.exp_dir, cfg.resume_epoch)
|
||||||
|
else:
|
||||||
|
model_path = model_io.find_last_checkpoint(cfg.exp_dir)
|
||||||
|
|
||||||
|
optimizer_state = None
|
||||||
|
if model_path is not None:
|
||||||
|
logger.info("found previous model %s" % model_path)
|
||||||
|
if force_load or cfg.resume:
|
||||||
|
logger.info(" -> resuming")
|
||||||
|
|
||||||
|
map_location = None
|
||||||
|
if accelerator is not None and not accelerator.is_local_main_process:
|
||||||
|
map_location = {
|
||||||
|
"cuda:%d" % 0: "cuda:%d" % accelerator.local_process_index
|
||||||
|
}
|
||||||
|
if load_model_only:
|
||||||
|
model_state_dict = torch.load(
|
||||||
|
model_io.get_model_path(model_path), map_location=map_location
|
||||||
|
)
|
||||||
|
stats_load, optimizer_state = None, None
|
||||||
|
else:
|
||||||
|
model_state_dict, stats_load, optimizer_state = model_io.load_model(
|
||||||
|
model_path, map_location=map_location
|
||||||
|
)
|
||||||
|
|
||||||
|
# Determine if stats should be reset
|
||||||
|
if not clear_stats:
|
||||||
|
if stats_load is None:
|
||||||
|
logger.info("\n\n\n\nCORRUPT STATS -> clearing stats\n\n\n\n")
|
||||||
|
last_epoch = model_io.parse_epoch_from_model_path(model_path)
|
||||||
|
logger.info(f"Estimated resume epoch = {last_epoch}")
|
||||||
|
|
||||||
|
# Reset the stats struct
|
||||||
|
for _ in range(last_epoch + 1):
|
||||||
|
stats.new_epoch()
|
||||||
|
assert last_epoch == stats.epoch
|
||||||
|
else:
|
||||||
|
stats = stats_load
|
||||||
|
|
||||||
|
# Update stats properties incase it was reset on load
|
||||||
|
stats.visdom_env = visdom_env_charts
|
||||||
|
stats.visdom_server = cfg.visdom_server
|
||||||
|
stats.visdom_port = cfg.visdom_port
|
||||||
|
stats.plot_file = os.path.join(cfg.exp_dir, "train_stats.pdf")
|
||||||
|
stats.synchronize_logged_vars(log_vars)
|
||||||
|
else:
|
||||||
|
logger.info(" -> clearing stats")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# TODO: fix on creation of the buffers
|
||||||
|
# after the hack above, this will not pass in most cases
|
||||||
|
# ... but this is fine for now
|
||||||
|
model.load_state_dict(model_state_dict, strict=True)
|
||||||
|
except RuntimeError as e:
|
||||||
|
logger.error(e)
|
||||||
|
logger.info("Cant load state dict in strict mode! -> trying non-strict")
|
||||||
|
model.load_state_dict(model_state_dict, strict=False)
|
||||||
|
model.log_vars = log_vars
|
||||||
|
else:
|
||||||
|
logger.info(" -> but not resuming -> starting from scratch")
|
||||||
|
elif force_load:
|
||||||
|
raise FileNotFoundError(f"Cannot find a checkpoint in {cfg.exp_dir}!")
|
||||||
|
|
||||||
|
return model, stats, optimizer_state
|
||||||
|
|
||||||
|
|
||||||
|
def trainvalidate(
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
epoch,
|
||||||
|
loader,
|
||||||
|
optimizer,
|
||||||
|
validation: bool,
|
||||||
|
*,
|
||||||
|
accelerator: Optional[Accelerator],
|
||||||
|
device: torch.device,
|
||||||
|
bp_var: str = "objective",
|
||||||
|
metric_print_interval: int = 5,
|
||||||
|
visualize_interval: int = 100,
|
||||||
|
visdom_env_root: str = "trainvalidate",
|
||||||
|
clip_grad: float = 0.0,
|
||||||
|
**kwargs,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
This is the main loop for training and evaluation including:
|
||||||
|
model forward pass, loss computation, backward pass and visualization.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: The model module optionally loaded from checkpoint
|
||||||
|
stats: The stats struct, also optionally loaded from checkpoint
|
||||||
|
epoch: The index of the current epoch
|
||||||
|
loader: The dataloader to use for the loop
|
||||||
|
optimizer: The optimizer module optionally loaded from checkpoint
|
||||||
|
validation: If true, run the loop with the model in eval mode
|
||||||
|
and skip the backward pass
|
||||||
|
bp_var: The name of the key in the model output `preds` dict which
|
||||||
|
should be used as the loss for the backward pass.
|
||||||
|
metric_print_interval: The batch interval at which the stats should be
|
||||||
|
logged.
|
||||||
|
visualize_interval: The batch interval at which the visualizations
|
||||||
|
should be plotted
|
||||||
|
visdom_env_root: The name of the visdom environment to use for plotting
|
||||||
|
clip_grad: Optionally clip the gradient norms.
|
||||||
|
If set to a value <=0.0, no clipping
|
||||||
|
device: The device on which to run the model.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
|
|
||||||
|
if validation:
|
||||||
|
model.eval()
|
||||||
|
trainmode = "val"
|
||||||
|
else:
|
||||||
|
model.train()
|
||||||
|
trainmode = "train"
|
||||||
|
|
||||||
|
t_start = time.time()
|
||||||
|
|
||||||
|
# get the visdom env name
|
||||||
|
visdom_env_imgs = visdom_env_root + "_images_" + trainmode
|
||||||
|
viz = vis_utils.get_visdom_connection(
|
||||||
|
server=stats.visdom_server,
|
||||||
|
port=stats.visdom_port,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Iterate through the batches
|
||||||
|
n_batches = len(loader)
|
||||||
|
for it, net_input in enumerate(loader):
|
||||||
|
last_iter = it == n_batches - 1
|
||||||
|
|
||||||
|
# move to gpu where possible (in place)
|
||||||
|
net_input = net_input.to(device)
|
||||||
|
|
||||||
|
# run the forward pass
|
||||||
|
if not validation:
|
||||||
|
optimizer.zero_grad()
|
||||||
|
preds = model(**{**net_input, "evaluation_mode": EvaluationMode.TRAINING})
|
||||||
|
else:
|
||||||
|
with torch.no_grad():
|
||||||
|
preds = model(
|
||||||
|
**{**net_input, "evaluation_mode": EvaluationMode.EVALUATION}
|
||||||
|
)
|
||||||
|
|
||||||
|
# make sure we dont overwrite something
|
||||||
|
assert all(k not in preds for k in net_input.keys())
|
||||||
|
# merge everything into one big dict
|
||||||
|
preds.update(net_input)
|
||||||
|
|
||||||
|
# update the stats logger
|
||||||
|
stats.update(preds, time_start=t_start, stat_set=trainmode)
|
||||||
|
assert stats.it[trainmode] == it, "inconsistent stat iteration number!"
|
||||||
|
|
||||||
|
# print textual status update
|
||||||
|
if it % metric_print_interval == 0 or last_iter:
|
||||||
|
stats.print(stat_set=trainmode, max_it=n_batches)
|
||||||
|
|
||||||
|
# visualize results
|
||||||
|
if (
|
||||||
|
(accelerator is None or accelerator.is_local_main_process)
|
||||||
|
and visualize_interval > 0
|
||||||
|
and it % visualize_interval == 0
|
||||||
|
):
|
||||||
|
prefix = f"e{stats.epoch}_it{stats.it[trainmode]}"
|
||||||
|
|
||||||
|
model.visualize(
|
||||||
|
viz,
|
||||||
|
visdom_env_imgs,
|
||||||
|
preds,
|
||||||
|
prefix,
|
||||||
|
)
|
||||||
|
|
||||||
|
# optimizer step
|
||||||
|
if not validation:
|
||||||
|
loss = preds[bp_var]
|
||||||
|
assert torch.isfinite(loss).all(), "Non-finite loss!"
|
||||||
|
# backprop
|
||||||
|
if accelerator is None:
|
||||||
|
loss.backward()
|
||||||
|
else:
|
||||||
|
accelerator.backward(loss)
|
||||||
|
if clip_grad > 0.0:
|
||||||
|
# Optionally clip the gradient norms.
|
||||||
|
total_norm = torch.nn.utils.clip_grad_norm(
|
||||||
|
model.parameters(), clip_grad
|
||||||
|
)
|
||||||
|
if total_norm > clip_grad:
|
||||||
|
logger.info(
|
||||||
|
f"Clipping gradient: {total_norm}"
|
||||||
|
+ f" with coef {clip_grad / float(total_norm)}."
|
||||||
|
)
|
||||||
|
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
|
||||||
|
def run_training(cfg: DictConfig) -> None:
|
||||||
|
"""
|
||||||
|
Entry point to run the training and validation loops
|
||||||
|
based on the specified config file.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Initialize the accelerator
|
||||||
|
if no_accelerate:
|
||||||
|
accelerator = None
|
||||||
|
device = torch.device("cuda:0")
|
||||||
|
else:
|
||||||
|
accelerator = Accelerator(device_placement=False)
|
||||||
|
logger.info(accelerator.state)
|
||||||
|
device = accelerator.device
|
||||||
|
|
||||||
|
logger.info(f"Running experiment on device: {device}")
|
||||||
|
|
||||||
|
# set the debug mode
|
||||||
|
if cfg.detect_anomaly:
|
||||||
|
logger.info("Anomaly detection!")
|
||||||
|
torch.autograd.set_detect_anomaly(cfg.detect_anomaly)
|
||||||
|
|
||||||
|
# create the output folder
|
||||||
|
os.makedirs(cfg.exp_dir, exist_ok=True)
|
||||||
|
_seed_all_random_engines(cfg.seed)
|
||||||
|
remove_unused_components(cfg)
|
||||||
|
|
||||||
|
# dump the exp config to the exp dir
|
||||||
|
try:
|
||||||
|
cfg_filename = os.path.join(cfg.exp_dir, "expconfig.yaml")
|
||||||
|
OmegaConf.save(config=cfg, f=cfg_filename)
|
||||||
|
except PermissionError:
|
||||||
|
warnings.warn("Cant dump config due to insufficient permissions!")
|
||||||
|
|
||||||
|
# setup datasets
|
||||||
|
datasource = ImplicitronDataSource(**cfg.data_source_args)
|
||||||
|
datasets, dataloaders = datasource.get_datasets_and_dataloaders()
|
||||||
|
task = datasource.get_task()
|
||||||
|
|
||||||
|
# init the model
|
||||||
|
model, stats, optimizer_state = init_model(cfg=cfg, accelerator=accelerator)
|
||||||
|
start_epoch = stats.epoch + 1
|
||||||
|
|
||||||
|
# move model to gpu
|
||||||
|
model.to(device)
|
||||||
|
|
||||||
|
# only run evaluation on the test dataloader
|
||||||
|
if cfg.eval_only:
|
||||||
|
_eval_and_dump(
|
||||||
|
cfg,
|
||||||
|
task,
|
||||||
|
datasource.all_train_cameras,
|
||||||
|
datasets,
|
||||||
|
dataloaders,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# init the optimizer
|
||||||
|
optimizer, scheduler = init_optimizer(
|
||||||
|
model,
|
||||||
|
optimizer_state=optimizer_state,
|
||||||
|
last_epoch=start_epoch,
|
||||||
|
**cfg.solver_args,
|
||||||
|
)
|
||||||
|
|
||||||
|
# check the scheduler and stats have been initialized correctly
|
||||||
|
assert scheduler.last_epoch == stats.epoch + 1
|
||||||
|
assert scheduler.last_epoch == start_epoch
|
||||||
|
|
||||||
|
# Wrap all modules in the distributed library
|
||||||
|
# Note: we don't pass the scheduler to prepare as it
|
||||||
|
# doesn't need to be stepped at each optimizer step
|
||||||
|
train_loader = dataloaders.train
|
||||||
|
val_loader = dataloaders.val
|
||||||
|
if accelerator is not None:
|
||||||
|
(
|
||||||
|
model,
|
||||||
|
optimizer,
|
||||||
|
train_loader,
|
||||||
|
val_loader,
|
||||||
|
) = accelerator.prepare(model, optimizer, train_loader, val_loader)
|
||||||
|
|
||||||
|
past_scheduler_lrs = []
|
||||||
|
# loop through epochs
|
||||||
|
for epoch in range(start_epoch, cfg.solver_args.max_epochs):
|
||||||
|
# automatic new_epoch and plotting of stats at every epoch start
|
||||||
|
with stats:
|
||||||
|
|
||||||
|
# Make sure to re-seed random generators to ensure reproducibility
|
||||||
|
# even after restart.
|
||||||
|
_seed_all_random_engines(cfg.seed + epoch)
|
||||||
|
|
||||||
|
cur_lr = float(scheduler.get_last_lr()[-1])
|
||||||
|
logger.info(f"scheduler lr = {cur_lr:1.2e}")
|
||||||
|
past_scheduler_lrs.append(cur_lr)
|
||||||
|
|
||||||
|
# train loop
|
||||||
|
trainvalidate(
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
epoch,
|
||||||
|
train_loader,
|
||||||
|
optimizer,
|
||||||
|
False,
|
||||||
|
visdom_env_root=vis_utils.get_visdom_env(cfg),
|
||||||
|
device=device,
|
||||||
|
accelerator=accelerator,
|
||||||
|
**cfg,
|
||||||
|
)
|
||||||
|
|
||||||
|
# val loop (optional)
|
||||||
|
if val_loader is not None and epoch % cfg.validation_interval == 0:
|
||||||
|
trainvalidate(
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
epoch,
|
||||||
|
val_loader,
|
||||||
|
optimizer,
|
||||||
|
True,
|
||||||
|
visdom_env_root=vis_utils.get_visdom_env(cfg),
|
||||||
|
device=device,
|
||||||
|
accelerator=accelerator,
|
||||||
|
**cfg,
|
||||||
|
)
|
||||||
|
|
||||||
|
# eval loop (optional)
|
||||||
|
if (
|
||||||
|
dataloaders.test is not None
|
||||||
|
and cfg.test_interval > 0
|
||||||
|
and epoch % cfg.test_interval == 0
|
||||||
|
):
|
||||||
|
_run_eval(
|
||||||
|
model,
|
||||||
|
datasource.all_train_cameras,
|
||||||
|
dataloaders.test,
|
||||||
|
task,
|
||||||
|
camera_difficulty_bin_breaks=cfg.camera_difficulty_bin_breaks,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert stats.epoch == epoch, "inconsistent stats!"
|
||||||
|
|
||||||
|
# delete previous models if required
|
||||||
|
# save model only on the main process
|
||||||
|
if cfg.store_checkpoints and (
|
||||||
|
accelerator is None or accelerator.is_local_main_process
|
||||||
|
):
|
||||||
|
if cfg.store_checkpoints_purge > 0:
|
||||||
|
for prev_epoch in range(epoch - cfg.store_checkpoints_purge):
|
||||||
|
model_io.purge_epoch(cfg.exp_dir, prev_epoch)
|
||||||
|
outfile = model_io.get_checkpoint(cfg.exp_dir, epoch)
|
||||||
|
unwrapped_model = (
|
||||||
|
model if accelerator is None else accelerator.unwrap_model(model)
|
||||||
|
)
|
||||||
|
model_io.safe_save_model(
|
||||||
|
unwrapped_model, stats, outfile, optimizer=optimizer
|
||||||
|
)
|
||||||
|
|
||||||
|
scheduler.step()
|
||||||
|
|
||||||
|
new_lr = float(scheduler.get_last_lr()[-1])
|
||||||
|
if new_lr != cur_lr:
|
||||||
|
logger.info(f"LR change! {cur_lr} -> {new_lr}")
|
||||||
|
|
||||||
|
if cfg.test_when_finished:
|
||||||
|
_eval_and_dump(
|
||||||
|
cfg,
|
||||||
|
task,
|
||||||
|
datasource.all_train_cameras,
|
||||||
|
datasets,
|
||||||
|
dataloaders,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _eval_and_dump(
|
||||||
|
cfg,
|
||||||
|
task: Task,
|
||||||
|
all_train_cameras: Optional[CamerasBase],
|
||||||
|
datasets: DatasetMap,
|
||||||
|
dataloaders: DataLoaderMap,
|
||||||
|
model,
|
||||||
|
stats,
|
||||||
|
device,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Run the evaluation loop with the test data loader and
|
||||||
|
save the predictions to the `exp_dir`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dataloader = dataloaders.test
|
||||||
|
|
||||||
|
if dataloader is None:
|
||||||
|
raise ValueError('DataLoaderMap have to contain the "test" entry for eval!')
|
||||||
|
|
||||||
|
results = _run_eval(
|
||||||
|
model,
|
||||||
|
all_train_cameras,
|
||||||
|
dataloader,
|
||||||
|
task,
|
||||||
|
camera_difficulty_bin_breaks=cfg.camera_difficulty_bin_breaks,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
|
||||||
|
# add the evaluation epoch to the results
|
||||||
|
for r in results:
|
||||||
|
r["eval_epoch"] = int(stats.epoch)
|
||||||
|
|
||||||
|
logger.info("Evaluation results")
|
||||||
|
evaluate.pretty_print_nvs_metrics(results)
|
||||||
|
|
||||||
|
with open(os.path.join(cfg.exp_dir, "results_test.json"), "w") as f:
|
||||||
|
json.dump(results, f)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_eval_frame_data(frame_data):
|
||||||
|
"""
|
||||||
|
Masks the unknown image data to make sure we cannot use it at model evaluation time.
|
||||||
|
"""
|
||||||
|
frame_data_for_eval = copy.deepcopy(frame_data)
|
||||||
|
is_known = ds_utils.is_known_frame(frame_data.frame_type).type_as(
|
||||||
|
frame_data.image_rgb
|
||||||
|
)[:, None, None, None]
|
||||||
|
for k in ("image_rgb", "depth_map", "fg_probability", "mask_crop"):
|
||||||
|
value_masked = getattr(frame_data_for_eval, k).clone() * is_known
|
||||||
|
setattr(frame_data_for_eval, k, value_masked)
|
||||||
|
return frame_data_for_eval
|
||||||
|
|
||||||
|
|
||||||
|
def _run_eval(
|
||||||
|
model,
|
||||||
|
all_train_cameras,
|
||||||
|
loader,
|
||||||
|
task: Task,
|
||||||
|
camera_difficulty_bin_breaks: Tuple[float, float],
|
||||||
|
device,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Run the evaluation loop on the test dataloader
|
||||||
|
"""
|
||||||
|
lpips_model = lpips.LPIPS(net="vgg")
|
||||||
|
lpips_model = lpips_model.to(device)
|
||||||
|
|
||||||
|
model.eval()
|
||||||
|
|
||||||
|
per_batch_eval_results = []
|
||||||
|
logger.info("Evaluating model ...")
|
||||||
|
for frame_data in tqdm.tqdm(loader):
|
||||||
|
frame_data = frame_data.to(device)
|
||||||
|
|
||||||
|
# mask out the unknown images so that the model does not see them
|
||||||
|
frame_data_for_eval = _get_eval_frame_data(frame_data)
|
||||||
|
|
||||||
|
with torch.no_grad():
|
||||||
|
preds = model(
|
||||||
|
**{**frame_data_for_eval, "evaluation_mode": EvaluationMode.EVALUATION}
|
||||||
|
)
|
||||||
|
|
||||||
|
# TODO: Cannot use accelerate gather for two reasons:.
|
||||||
|
# (1) TypeError: Can't apply _gpu_gather_one on object of type
|
||||||
|
# <class 'pytorch3d.implicitron.models.base_model.ImplicitronRender'>,
|
||||||
|
# only of nested list/tuple/dicts of objects that satisfy is_torch_tensor.
|
||||||
|
# (2) Same error above but for frame_data which contains Cameras.
|
||||||
|
|
||||||
|
implicitron_render = copy.deepcopy(preds["implicitron_render"])
|
||||||
|
|
||||||
|
per_batch_eval_results.append(
|
||||||
|
evaluate.eval_batch(
|
||||||
|
frame_data,
|
||||||
|
implicitron_render,
|
||||||
|
bg_color="black",
|
||||||
|
lpips_model=lpips_model,
|
||||||
|
source_cameras=all_train_cameras,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
_, category_result = evaluate.summarize_nvs_eval_results(
|
||||||
|
per_batch_eval_results, task, camera_difficulty_bin_breaks
|
||||||
|
)
|
||||||
|
|
||||||
|
return category_result["results"]
|
||||||
|
|
||||||
|
|
||||||
|
def _seed_all_random_engines(seed: int) -> None:
|
||||||
|
np.random.seed(seed)
|
||||||
|
torch.manual_seed(seed)
|
||||||
|
random.seed(seed)
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_envvars_for_cluster() -> bool:
|
||||||
|
"""
|
||||||
|
Prepares to run on cluster if relevant.
|
||||||
|
Returns whether FAIR cluster in use.
|
||||||
|
"""
|
||||||
|
# TODO: How much of this is needed in general?
|
||||||
|
|
||||||
|
try:
|
||||||
|
import submitit
|
||||||
|
except ImportError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Only needed when launching on cluster with slurm and submitit
|
||||||
|
job_env = submitit.JobEnvironment()
|
||||||
|
except RuntimeError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
os.environ["LOCAL_RANK"] = str(job_env.local_rank)
|
||||||
|
os.environ["RANK"] = str(job_env.global_rank)
|
||||||
|
os.environ["WORLD_SIZE"] = str(job_env.num_tasks)
|
||||||
|
os.environ["MASTER_ADDR"] = "localhost"
|
||||||
|
os.environ["MASTER_PORT"] = "42918"
|
||||||
|
logger.info(
|
||||||
|
"Num tasks %s, global_rank %s"
|
||||||
|
% (str(job_env.num_tasks), str(job_env.global_rank))
|
||||||
|
)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
expand_args_fields(ExperimentConfig)
|
||||||
|
cs = hydra.core.config_store.ConfigStore.instance()
|
||||||
|
cs.store(name="default_config", node=ExperimentConfig)
|
||||||
|
|
||||||
|
|
||||||
|
@hydra.main(config_path="./configs/", config_name="default_config")
|
||||||
|
def experiment(cfg: DictConfig) -> None:
|
||||||
|
# CUDA_VISIBLE_DEVICES must have been set.
|
||||||
|
|
||||||
|
if "CUDA_DEVICE_ORDER" not in os.environ:
|
||||||
|
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
|
||||||
|
|
||||||
|
if not _setup_envvars_for_cluster():
|
||||||
|
logger.info("Running locally")
|
||||||
|
|
||||||
|
# TODO: The following may be needed for hydra/submitit it to work
|
||||||
|
expand_args_fields(GenericModel)
|
||||||
|
expand_args_fields(AdaptiveRaySampler)
|
||||||
|
expand_args_fields(MultiPassEmissionAbsorptionRenderer)
|
||||||
|
expand_args_fields(ImplicitronDataSource)
|
||||||
|
|
||||||
|
run_training(cfg)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
experiment()
|
||||||
5
projects/implicitron_trainer/impl/__init__.py
Normal file
5
projects/implicitron_trainer/impl/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
# LICENSE file in the root directory of this source tree.
|
||||||
49
projects/implicitron_trainer/impl/experiment_config.py
Normal file
49
projects/implicitron_trainer/impl/experiment_config.py
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
# LICENSE file in the root directory of this source tree.
|
||||||
|
|
||||||
|
from dataclasses import field
|
||||||
|
from typing import Any, Dict, Tuple
|
||||||
|
|
||||||
|
from omegaconf import DictConfig
|
||||||
|
from pytorch3d.implicitron.dataset.data_source import ImplicitronDataSource
|
||||||
|
from pytorch3d.implicitron.models.generic_model import GenericModel
|
||||||
|
from pytorch3d.implicitron.tools.config import Configurable, get_default_args_field
|
||||||
|
|
||||||
|
from .optimization import init_optimizer
|
||||||
|
|
||||||
|
|
||||||
|
class ExperimentConfig(Configurable):
|
||||||
|
generic_model_args: DictConfig = get_default_args_field(GenericModel)
|
||||||
|
solver_args: DictConfig = get_default_args_field(init_optimizer)
|
||||||
|
data_source_args: DictConfig = get_default_args_field(ImplicitronDataSource)
|
||||||
|
architecture: str = "generic"
|
||||||
|
detect_anomaly: bool = False
|
||||||
|
eval_only: bool = False
|
||||||
|
exp_dir: str = "./data/default_experiment/"
|
||||||
|
exp_idx: int = 0
|
||||||
|
gpu_idx: int = 0
|
||||||
|
metric_print_interval: int = 5
|
||||||
|
resume: bool = True
|
||||||
|
resume_epoch: int = -1
|
||||||
|
seed: int = 0
|
||||||
|
store_checkpoints: bool = True
|
||||||
|
store_checkpoints_purge: int = 1
|
||||||
|
test_interval: int = -1
|
||||||
|
test_when_finished: bool = False
|
||||||
|
validation_interval: int = 1
|
||||||
|
visdom_env: str = ""
|
||||||
|
visdom_port: int = 8097
|
||||||
|
visdom_server: str = "http://127.0.0.1"
|
||||||
|
visualize_interval: int = 1000
|
||||||
|
clip_grad: float = 0.0
|
||||||
|
camera_difficulty_bin_breaks: Tuple[float, ...] = 0.97, 0.98
|
||||||
|
|
||||||
|
hydra: Dict[str, Any] = field(
|
||||||
|
default_factory=lambda: {
|
||||||
|
"run": {"dir": "."}, # Make hydra not change the working dir.
|
||||||
|
"output_subdir": None, # disable storing the .hydra logs
|
||||||
|
}
|
||||||
|
)
|
||||||
109
projects/implicitron_trainer/impl/optimization.py
Normal file
109
projects/implicitron_trainer/impl/optimization.py
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
# LICENSE file in the root directory of this source tree.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Any, Dict, Optional, Tuple
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from pytorch3d.implicitron.models.generic_model import GenericModel
|
||||||
|
from pytorch3d.implicitron.tools.config import enable_get_default_args
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def init_optimizer(
|
||||||
|
model: GenericModel,
|
||||||
|
optimizer_state: Optional[Dict[str, Any]],
|
||||||
|
last_epoch: int,
|
||||||
|
breed: str = "adam",
|
||||||
|
weight_decay: float = 0.0,
|
||||||
|
lr_policy: str = "multistep",
|
||||||
|
lr: float = 0.0005,
|
||||||
|
gamma: float = 0.1,
|
||||||
|
momentum: float = 0.9,
|
||||||
|
betas: Tuple[float, ...] = (0.9, 0.999),
|
||||||
|
milestones: Tuple[int, ...] = (),
|
||||||
|
max_epochs: int = 1000,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize the optimizer (optionally from checkpoint state)
|
||||||
|
and the learning rate scheduler.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: The model with optionally loaded weights
|
||||||
|
optimizer_state: The state dict for the optimizer. If None
|
||||||
|
it has not been loaded from checkpoint
|
||||||
|
last_epoch: If the model was loaded from checkpoint this will be the
|
||||||
|
number of the last epoch that was saved
|
||||||
|
breed: The type of optimizer to use e.g. adam
|
||||||
|
weight_decay: The optimizer weight_decay (L2 penalty on model weights)
|
||||||
|
lr_policy: The policy to use for learning rate. Currently, only "multistep:
|
||||||
|
is supported.
|
||||||
|
lr: The value for the initial learning rate
|
||||||
|
gamma: Multiplicative factor of learning rate decay
|
||||||
|
momentum: Momentum factor for SGD optimizer
|
||||||
|
betas: Coefficients used for computing running averages of gradient and its square
|
||||||
|
in the Adam optimizer
|
||||||
|
milestones: List of increasing epoch indices at which the learning rate is
|
||||||
|
modified
|
||||||
|
max_epochs: The maximum number of epochs to run the optimizer for
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
optimizer: Optimizer module, optionally loaded from checkpoint
|
||||||
|
scheduler: Learning rate scheduler module
|
||||||
|
|
||||||
|
Raise:
|
||||||
|
ValueError if `breed` or `lr_policy` are not supported.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Get the parameters to optimize
|
||||||
|
if hasattr(model, "_get_param_groups"): # use the model function
|
||||||
|
# pyre-ignore[29]
|
||||||
|
p_groups = model._get_param_groups(lr, wd=weight_decay)
|
||||||
|
else:
|
||||||
|
allprm = [prm for prm in model.parameters() if prm.requires_grad]
|
||||||
|
p_groups = [{"params": allprm, "lr": lr}]
|
||||||
|
|
||||||
|
# Intialize the optimizer
|
||||||
|
if breed == "sgd":
|
||||||
|
optimizer = torch.optim.SGD(
|
||||||
|
p_groups, lr=lr, momentum=momentum, weight_decay=weight_decay
|
||||||
|
)
|
||||||
|
elif breed == "adagrad":
|
||||||
|
optimizer = torch.optim.Adagrad(p_groups, lr=lr, weight_decay=weight_decay)
|
||||||
|
elif breed == "adam":
|
||||||
|
optimizer = torch.optim.Adam(
|
||||||
|
p_groups, lr=lr, betas=betas, weight_decay=weight_decay
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError("no such solver type %s" % breed)
|
||||||
|
logger.info(" -> solver type = %s" % breed)
|
||||||
|
|
||||||
|
# Load state from checkpoint
|
||||||
|
if optimizer_state is not None:
|
||||||
|
logger.info(" -> setting loaded optimizer state")
|
||||||
|
optimizer.load_state_dict(optimizer_state)
|
||||||
|
|
||||||
|
# Initialize the learning rate scheduler
|
||||||
|
if lr_policy == "multistep":
|
||||||
|
scheduler = torch.optim.lr_scheduler.MultiStepLR(
|
||||||
|
optimizer,
|
||||||
|
milestones=milestones,
|
||||||
|
gamma=gamma,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError("no such lr policy %s" % lr_policy)
|
||||||
|
|
||||||
|
# When loading from checkpoint, this will make sure that the
|
||||||
|
# lr is correctly set even after returning
|
||||||
|
for _ in range(last_epoch):
|
||||||
|
scheduler.step()
|
||||||
|
|
||||||
|
optimizer.zero_grad()
|
||||||
|
return optimizer, scheduler
|
||||||
|
|
||||||
|
|
||||||
|
enable_get_default_args(init_optimizer)
|
||||||
5
projects/implicitron_trainer/tests/__init__.py
Normal file
5
projects/implicitron_trainer/tests/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
# LICENSE file in the root directory of this source tree.
|
||||||
425
projects/implicitron_trainer/tests/experiment.yaml
Normal file
425
projects/implicitron_trainer/tests/experiment.yaml
Normal file
@@ -0,0 +1,425 @@
|
|||||||
|
generic_model_args:
|
||||||
|
mask_images: true
|
||||||
|
mask_depths: true
|
||||||
|
render_image_width: 400
|
||||||
|
render_image_height: 400
|
||||||
|
mask_threshold: 0.5
|
||||||
|
output_rasterized_mc: false
|
||||||
|
bg_color:
|
||||||
|
- 0.0
|
||||||
|
- 0.0
|
||||||
|
- 0.0
|
||||||
|
num_passes: 1
|
||||||
|
chunk_size_grid: 4096
|
||||||
|
render_features_dimensions: 3
|
||||||
|
tqdm_trigger_threshold: 16
|
||||||
|
n_train_target_views: 1
|
||||||
|
sampling_mode_training: mask_sample
|
||||||
|
sampling_mode_evaluation: full_grid
|
||||||
|
global_encoder_class_type: null
|
||||||
|
raysampler_class_type: AdaptiveRaySampler
|
||||||
|
renderer_class_type: MultiPassEmissionAbsorptionRenderer
|
||||||
|
image_feature_extractor_class_type: null
|
||||||
|
view_pooler_enabled: false
|
||||||
|
implicit_function_class_type: NeuralRadianceFieldImplicitFunction
|
||||||
|
view_metrics_class_type: ViewMetrics
|
||||||
|
regularization_metrics_class_type: RegularizationMetrics
|
||||||
|
loss_weights:
|
||||||
|
loss_rgb_mse: 1.0
|
||||||
|
loss_prev_stage_rgb_mse: 1.0
|
||||||
|
loss_mask_bce: 0.0
|
||||||
|
loss_prev_stage_mask_bce: 0.0
|
||||||
|
log_vars:
|
||||||
|
- loss_rgb_psnr_fg
|
||||||
|
- loss_rgb_psnr
|
||||||
|
- loss_rgb_mse
|
||||||
|
- loss_rgb_huber
|
||||||
|
- loss_depth_abs
|
||||||
|
- loss_depth_abs_fg
|
||||||
|
- loss_mask_neg_iou
|
||||||
|
- loss_mask_bce
|
||||||
|
- loss_mask_beta_prior
|
||||||
|
- loss_eikonal
|
||||||
|
- loss_density_tv
|
||||||
|
- loss_depth_neg_penalty
|
||||||
|
- loss_autodecoder_norm
|
||||||
|
- loss_prev_stage_rgb_mse
|
||||||
|
- loss_prev_stage_rgb_psnr_fg
|
||||||
|
- loss_prev_stage_rgb_psnr
|
||||||
|
- loss_prev_stage_mask_bce
|
||||||
|
- objective
|
||||||
|
- epoch
|
||||||
|
- sec/it
|
||||||
|
global_encoder_HarmonicTimeEncoder_args:
|
||||||
|
n_harmonic_functions: 10
|
||||||
|
append_input: true
|
||||||
|
time_divisor: 1.0
|
||||||
|
global_encoder_SequenceAutodecoder_args:
|
||||||
|
autodecoder_args:
|
||||||
|
encoding_dim: 0
|
||||||
|
n_instances: 0
|
||||||
|
init_scale: 1.0
|
||||||
|
ignore_input: false
|
||||||
|
raysampler_AdaptiveRaySampler_args:
|
||||||
|
image_width: 400
|
||||||
|
image_height: 400
|
||||||
|
sampling_mode_training: mask_sample
|
||||||
|
sampling_mode_evaluation: full_grid
|
||||||
|
n_pts_per_ray_training: 64
|
||||||
|
n_pts_per_ray_evaluation: 64
|
||||||
|
n_rays_per_image_sampled_from_mask: 1024
|
||||||
|
stratified_point_sampling_training: true
|
||||||
|
stratified_point_sampling_evaluation: false
|
||||||
|
scene_extent: 8.0
|
||||||
|
scene_center:
|
||||||
|
- 0.0
|
||||||
|
- 0.0
|
||||||
|
- 0.0
|
||||||
|
raysampler_NearFarRaySampler_args:
|
||||||
|
image_width: 400
|
||||||
|
image_height: 400
|
||||||
|
sampling_mode_training: mask_sample
|
||||||
|
sampling_mode_evaluation: full_grid
|
||||||
|
n_pts_per_ray_training: 64
|
||||||
|
n_pts_per_ray_evaluation: 64
|
||||||
|
n_rays_per_image_sampled_from_mask: 1024
|
||||||
|
stratified_point_sampling_training: true
|
||||||
|
stratified_point_sampling_evaluation: false
|
||||||
|
min_depth: 0.1
|
||||||
|
max_depth: 8.0
|
||||||
|
renderer_LSTMRenderer_args:
|
||||||
|
num_raymarch_steps: 10
|
||||||
|
init_depth: 17.0
|
||||||
|
init_depth_noise_std: 0.0005
|
||||||
|
hidden_size: 16
|
||||||
|
n_feature_channels: 256
|
||||||
|
bg_color: null
|
||||||
|
verbose: false
|
||||||
|
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
||||||
|
raymarcher_class_type: EmissionAbsorptionRaymarcher
|
||||||
|
n_pts_per_ray_fine_training: 64
|
||||||
|
n_pts_per_ray_fine_evaluation: 64
|
||||||
|
stratified_sampling_coarse_training: true
|
||||||
|
stratified_sampling_coarse_evaluation: false
|
||||||
|
append_coarse_samples_to_fine: true
|
||||||
|
density_noise_std_train: 0.0
|
||||||
|
return_weights: false
|
||||||
|
raymarcher_CumsumRaymarcher_args:
|
||||||
|
surface_thickness: 1
|
||||||
|
bg_color:
|
||||||
|
- 0.0
|
||||||
|
background_opacity: 0.0
|
||||||
|
density_relu: true
|
||||||
|
blend_output: false
|
||||||
|
raymarcher_EmissionAbsorptionRaymarcher_args:
|
||||||
|
surface_thickness: 1
|
||||||
|
bg_color:
|
||||||
|
- 0.0
|
||||||
|
background_opacity: 10000000000.0
|
||||||
|
density_relu: true
|
||||||
|
blend_output: false
|
||||||
|
renderer_SignedDistanceFunctionRenderer_args:
|
||||||
|
render_features_dimensions: 3
|
||||||
|
ray_tracer_args:
|
||||||
|
object_bounding_sphere: 1.0
|
||||||
|
sdf_threshold: 5.0e-05
|
||||||
|
line_search_step: 0.5
|
||||||
|
line_step_iters: 1
|
||||||
|
sphere_tracing_iters: 10
|
||||||
|
n_steps: 100
|
||||||
|
n_secant_steps: 8
|
||||||
|
ray_normal_coloring_network_args:
|
||||||
|
feature_vector_size: 3
|
||||||
|
mode: idr
|
||||||
|
d_in: 9
|
||||||
|
d_out: 3
|
||||||
|
dims:
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
weight_norm: true
|
||||||
|
n_harmonic_functions_dir: 0
|
||||||
|
pooled_feature_dim: 0
|
||||||
|
bg_color:
|
||||||
|
- 0.0
|
||||||
|
soft_mask_alpha: 50.0
|
||||||
|
image_feature_extractor_ResNetFeatureExtractor_args:
|
||||||
|
name: resnet34
|
||||||
|
pretrained: true
|
||||||
|
stages:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
normalize_image: true
|
||||||
|
image_rescale: 0.16
|
||||||
|
first_max_pool: true
|
||||||
|
proj_dim: 32
|
||||||
|
l2_norm: true
|
||||||
|
add_masks: true
|
||||||
|
add_images: true
|
||||||
|
global_average_pool: false
|
||||||
|
feature_rescale: 1.0
|
||||||
|
view_pooler_args:
|
||||||
|
feature_aggregator_class_type: AngleWeightedReductionFeatureAggregator
|
||||||
|
view_sampler_args:
|
||||||
|
masked_sampling: false
|
||||||
|
sampling_mode: bilinear
|
||||||
|
feature_aggregator_AngleWeightedIdentityFeatureAggregator_args:
|
||||||
|
exclude_target_view: true
|
||||||
|
exclude_target_view_mask_features: true
|
||||||
|
concatenate_output: true
|
||||||
|
weight_by_ray_angle_gamma: 1.0
|
||||||
|
min_ray_angle_weight: 0.1
|
||||||
|
feature_aggregator_AngleWeightedReductionFeatureAggregator_args:
|
||||||
|
exclude_target_view: true
|
||||||
|
exclude_target_view_mask_features: true
|
||||||
|
concatenate_output: true
|
||||||
|
reduction_functions:
|
||||||
|
- AVG
|
||||||
|
- STD
|
||||||
|
weight_by_ray_angle_gamma: 1.0
|
||||||
|
min_ray_angle_weight: 0.1
|
||||||
|
feature_aggregator_IdentityFeatureAggregator_args:
|
||||||
|
exclude_target_view: true
|
||||||
|
exclude_target_view_mask_features: true
|
||||||
|
concatenate_output: true
|
||||||
|
feature_aggregator_ReductionFeatureAggregator_args:
|
||||||
|
exclude_target_view: true
|
||||||
|
exclude_target_view_mask_features: true
|
||||||
|
concatenate_output: true
|
||||||
|
reduction_functions:
|
||||||
|
- AVG
|
||||||
|
- STD
|
||||||
|
implicit_function_IdrFeatureField_args:
|
||||||
|
feature_vector_size: 3
|
||||||
|
d_in: 3
|
||||||
|
d_out: 1
|
||||||
|
dims:
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
- 512
|
||||||
|
geometric_init: true
|
||||||
|
bias: 1.0
|
||||||
|
skip_in: []
|
||||||
|
weight_norm: true
|
||||||
|
n_harmonic_functions_xyz: 0
|
||||||
|
pooled_feature_dim: 0
|
||||||
|
encoding_dim: 0
|
||||||
|
implicit_function_NeRFormerImplicitFunction_args:
|
||||||
|
n_harmonic_functions_xyz: 10
|
||||||
|
n_harmonic_functions_dir: 4
|
||||||
|
n_hidden_neurons_dir: 128
|
||||||
|
latent_dim: 0
|
||||||
|
input_xyz: true
|
||||||
|
xyz_ray_dir_in_camera_coords: false
|
||||||
|
color_dim: 3
|
||||||
|
transformer_dim_down_factor: 2.0
|
||||||
|
n_hidden_neurons_xyz: 80
|
||||||
|
n_layers_xyz: 2
|
||||||
|
append_xyz:
|
||||||
|
- 1
|
||||||
|
implicit_function_NeuralRadianceFieldImplicitFunction_args:
|
||||||
|
n_harmonic_functions_xyz: 10
|
||||||
|
n_harmonic_functions_dir: 4
|
||||||
|
n_hidden_neurons_dir: 128
|
||||||
|
latent_dim: 0
|
||||||
|
input_xyz: true
|
||||||
|
xyz_ray_dir_in_camera_coords: false
|
||||||
|
color_dim: 3
|
||||||
|
transformer_dim_down_factor: 1.0
|
||||||
|
n_hidden_neurons_xyz: 256
|
||||||
|
n_layers_xyz: 8
|
||||||
|
append_xyz:
|
||||||
|
- 5
|
||||||
|
implicit_function_SRNHyperNetImplicitFunction_args:
|
||||||
|
hypernet_args:
|
||||||
|
n_harmonic_functions: 3
|
||||||
|
n_hidden_units: 256
|
||||||
|
n_layers: 2
|
||||||
|
n_hidden_units_hypernet: 256
|
||||||
|
n_layers_hypernet: 1
|
||||||
|
in_features: 3
|
||||||
|
out_features: 256
|
||||||
|
latent_dim_hypernet: 0
|
||||||
|
latent_dim: 0
|
||||||
|
xyz_in_camera_coords: false
|
||||||
|
pixel_generator_args:
|
||||||
|
n_harmonic_functions: 4
|
||||||
|
n_hidden_units: 256
|
||||||
|
n_hidden_units_color: 128
|
||||||
|
n_layers: 2
|
||||||
|
in_features: 256
|
||||||
|
out_features: 3
|
||||||
|
ray_dir_in_camera_coords: false
|
||||||
|
implicit_function_SRNImplicitFunction_args:
|
||||||
|
raymarch_function_args:
|
||||||
|
n_harmonic_functions: 3
|
||||||
|
n_hidden_units: 256
|
||||||
|
n_layers: 2
|
||||||
|
in_features: 3
|
||||||
|
out_features: 256
|
||||||
|
latent_dim: 0
|
||||||
|
xyz_in_camera_coords: false
|
||||||
|
raymarch_function: null
|
||||||
|
pixel_generator_args:
|
||||||
|
n_harmonic_functions: 4
|
||||||
|
n_hidden_units: 256
|
||||||
|
n_hidden_units_color: 128
|
||||||
|
n_layers: 2
|
||||||
|
in_features: 256
|
||||||
|
out_features: 3
|
||||||
|
ray_dir_in_camera_coords: false
|
||||||
|
view_metrics_ViewMetrics_args: {}
|
||||||
|
regularization_metrics_RegularizationMetrics_args: {}
|
||||||
|
solver_args:
|
||||||
|
breed: adam
|
||||||
|
weight_decay: 0.0
|
||||||
|
lr_policy: multistep
|
||||||
|
lr: 0.0005
|
||||||
|
gamma: 0.1
|
||||||
|
momentum: 0.9
|
||||||
|
betas:
|
||||||
|
- 0.9
|
||||||
|
- 0.999
|
||||||
|
milestones: []
|
||||||
|
max_epochs: 1000
|
||||||
|
data_source_args:
|
||||||
|
dataset_map_provider_class_type: ???
|
||||||
|
data_loader_map_provider_class_type: SequenceDataLoaderMapProvider
|
||||||
|
dataset_map_provider_BlenderDatasetMapProvider_args:
|
||||||
|
base_dir: ???
|
||||||
|
object_name: ???
|
||||||
|
path_manager_factory_class_type: PathManagerFactory
|
||||||
|
n_known_frames_for_test: null
|
||||||
|
path_manager_factory_PathManagerFactory_args:
|
||||||
|
silence_logs: true
|
||||||
|
dataset_map_provider_JsonIndexDatasetMapProvider_args:
|
||||||
|
category: ???
|
||||||
|
task_str: singlesequence
|
||||||
|
dataset_root: ''
|
||||||
|
n_frames_per_sequence: -1
|
||||||
|
test_on_train: false
|
||||||
|
restrict_sequence_name: []
|
||||||
|
test_restrict_sequence_id: -1
|
||||||
|
assert_single_seq: false
|
||||||
|
only_test_set: false
|
||||||
|
dataset_class_type: JsonIndexDataset
|
||||||
|
path_manager_factory_class_type: PathManagerFactory
|
||||||
|
dataset_JsonIndexDataset_args:
|
||||||
|
limit_to: 0
|
||||||
|
limit_sequences_to: 0
|
||||||
|
exclude_sequence: []
|
||||||
|
limit_category_to: []
|
||||||
|
load_images: true
|
||||||
|
load_depths: true
|
||||||
|
load_depth_masks: true
|
||||||
|
load_masks: true
|
||||||
|
load_point_clouds: false
|
||||||
|
max_points: 0
|
||||||
|
mask_images: false
|
||||||
|
mask_depths: false
|
||||||
|
image_height: 800
|
||||||
|
image_width: 800
|
||||||
|
box_crop: true
|
||||||
|
box_crop_mask_thr: 0.4
|
||||||
|
box_crop_context: 0.3
|
||||||
|
remove_empty_masks: true
|
||||||
|
seed: 0
|
||||||
|
sort_frames: false
|
||||||
|
path_manager_factory_PathManagerFactory_args:
|
||||||
|
silence_logs: true
|
||||||
|
dataset_map_provider_JsonIndexDatasetMapProviderV2_args:
|
||||||
|
category: ???
|
||||||
|
subset_name: ???
|
||||||
|
dataset_root: ''
|
||||||
|
test_on_train: false
|
||||||
|
only_test_set: false
|
||||||
|
load_eval_batches: true
|
||||||
|
dataset_class_type: JsonIndexDataset
|
||||||
|
path_manager_factory_class_type: PathManagerFactory
|
||||||
|
dataset_JsonIndexDataset_args:
|
||||||
|
path_manager: null
|
||||||
|
frame_annotations_file: ''
|
||||||
|
sequence_annotations_file: ''
|
||||||
|
subset_lists_file: ''
|
||||||
|
subsets: null
|
||||||
|
limit_to: 0
|
||||||
|
limit_sequences_to: 0
|
||||||
|
pick_sequence: []
|
||||||
|
exclude_sequence: []
|
||||||
|
limit_category_to: []
|
||||||
|
dataset_root: ''
|
||||||
|
load_images: true
|
||||||
|
load_depths: true
|
||||||
|
load_depth_masks: true
|
||||||
|
load_masks: true
|
||||||
|
load_point_clouds: false
|
||||||
|
max_points: 0
|
||||||
|
mask_images: false
|
||||||
|
mask_depths: false
|
||||||
|
image_height: 800
|
||||||
|
image_width: 800
|
||||||
|
box_crop: true
|
||||||
|
box_crop_mask_thr: 0.4
|
||||||
|
box_crop_context: 0.3
|
||||||
|
remove_empty_masks: true
|
||||||
|
n_frames_per_sequence: -1
|
||||||
|
seed: 0
|
||||||
|
sort_frames: false
|
||||||
|
eval_batches: null
|
||||||
|
path_manager_factory_PathManagerFactory_args:
|
||||||
|
silence_logs: true
|
||||||
|
dataset_map_provider_LlffDatasetMapProvider_args:
|
||||||
|
base_dir: ???
|
||||||
|
object_name: ???
|
||||||
|
path_manager_factory_class_type: PathManagerFactory
|
||||||
|
n_known_frames_for_test: null
|
||||||
|
path_manager_factory_PathManagerFactory_args:
|
||||||
|
silence_logs: true
|
||||||
|
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
||||||
|
batch_size: 1
|
||||||
|
num_workers: 0
|
||||||
|
dataset_length_train: 0
|
||||||
|
dataset_length_val: 0
|
||||||
|
dataset_length_test: 0
|
||||||
|
train_conditioning_type: SAME
|
||||||
|
val_conditioning_type: SAME
|
||||||
|
test_conditioning_type: KNOWN
|
||||||
|
images_per_seq_options: []
|
||||||
|
sample_consecutive_frames: false
|
||||||
|
consecutive_frames_max_gap: 0
|
||||||
|
consecutive_frames_max_gap_seconds: 0.1
|
||||||
|
architecture: generic
|
||||||
|
detect_anomaly: false
|
||||||
|
eval_only: false
|
||||||
|
exp_dir: ./data/default_experiment/
|
||||||
|
exp_idx: 0
|
||||||
|
gpu_idx: 0
|
||||||
|
metric_print_interval: 5
|
||||||
|
resume: true
|
||||||
|
resume_epoch: -1
|
||||||
|
seed: 0
|
||||||
|
store_checkpoints: true
|
||||||
|
store_checkpoints_purge: 1
|
||||||
|
test_interval: -1
|
||||||
|
test_when_finished: false
|
||||||
|
validation_interval: 1
|
||||||
|
visdom_env: ''
|
||||||
|
visdom_port: 8097
|
||||||
|
visdom_server: http://127.0.0.1
|
||||||
|
visualize_interval: 1000
|
||||||
|
clip_grad: 0.0
|
||||||
|
camera_difficulty_bin_breaks:
|
||||||
|
- 0.97
|
||||||
|
- 0.98
|
||||||
|
hydra:
|
||||||
|
run:
|
||||||
|
dir: .
|
||||||
|
output_subdir: null
|
||||||
91
projects/implicitron_trainer/tests/test_experiment.py
Normal file
91
projects/implicitron_trainer/tests/test_experiment.py
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
# LICENSE file in the root directory of this source tree.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from hydra import compose, initialize_config_dir
|
||||||
|
from omegaconf import OmegaConf
|
||||||
|
|
||||||
|
from .. import experiment
|
||||||
|
|
||||||
|
|
||||||
|
def interactive_testing_requested() -> bool:
|
||||||
|
"""
|
||||||
|
Certain tests are only useful when run interactively, and so are not regularly run.
|
||||||
|
These are activated by this funciton returning True, which the user requests by
|
||||||
|
setting the environment variable `PYTORCH3D_INTERACTIVE_TESTING` to 1.
|
||||||
|
"""
|
||||||
|
return os.environ.get("PYTORCH3D_INTERACTIVE_TESTING", "") == "1"
|
||||||
|
|
||||||
|
|
||||||
|
internal = os.environ.get("FB_TEST", False)
|
||||||
|
|
||||||
|
|
||||||
|
DATA_DIR = Path(__file__).resolve().parent
|
||||||
|
IMPLICITRON_CONFIGS_DIR = Path(__file__).resolve().parent.parent / "configs"
|
||||||
|
DEBUG: bool = False
|
||||||
|
|
||||||
|
# TODO:
|
||||||
|
# - add enough files to skateboard_first_5 that this works on RE.
|
||||||
|
# - share common code with PyTorch3D tests?
|
||||||
|
# - deal with the temporary output files this test creates
|
||||||
|
|
||||||
|
|
||||||
|
class TestExperiment(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.maxDiff = None
|
||||||
|
|
||||||
|
def test_from_defaults(self):
|
||||||
|
# Test making minimal changes to the dataclass defaults.
|
||||||
|
if not interactive_testing_requested() or not internal:
|
||||||
|
return
|
||||||
|
cfg = OmegaConf.structured(experiment.ExperimentConfig)
|
||||||
|
cfg.data_source_args.dataset_map_provider_class_type = (
|
||||||
|
"JsonIndexDatasetMapProvider"
|
||||||
|
)
|
||||||
|
dataset_args = (
|
||||||
|
cfg.data_source_args.dataset_map_provider_JsonIndexDatasetMapProvider_args
|
||||||
|
)
|
||||||
|
dataloader_args = (
|
||||||
|
cfg.data_source_args.data_loader_map_provider_SequenceDataLoaderMapProvider_args
|
||||||
|
)
|
||||||
|
dataset_args.category = "skateboard"
|
||||||
|
dataset_args.test_restrict_sequence_id = 0
|
||||||
|
dataset_args.dataset_root = "manifold://co3d/tree/extracted"
|
||||||
|
dataset_args.dataset_JsonIndexDataset_args.limit_sequences_to = 5
|
||||||
|
dataset_args.dataset_JsonIndexDataset_args.image_height = 80
|
||||||
|
dataset_args.dataset_JsonIndexDataset_args.image_width = 80
|
||||||
|
dataloader_args.dataset_length_train = 1
|
||||||
|
dataloader_args.dataset_length_val = 1
|
||||||
|
cfg.solver_args.max_epochs = 2
|
||||||
|
|
||||||
|
experiment.run_training(cfg)
|
||||||
|
|
||||||
|
def test_yaml_contents(self):
|
||||||
|
cfg = OmegaConf.structured(experiment.ExperimentConfig)
|
||||||
|
yaml = OmegaConf.to_yaml(cfg, sort_keys=False)
|
||||||
|
if DEBUG:
|
||||||
|
(DATA_DIR / "experiment.yaml").write_text(yaml)
|
||||||
|
self.assertEqual(yaml, (DATA_DIR / "experiment.yaml").read_text())
|
||||||
|
|
||||||
|
def test_load_configs(self):
|
||||||
|
config_files = []
|
||||||
|
|
||||||
|
for pattern in ("repro_singleseq*.yaml", "repro_multiseq*.yaml"):
|
||||||
|
config_files.extend(
|
||||||
|
[
|
||||||
|
f
|
||||||
|
for f in IMPLICITRON_CONFIGS_DIR.glob(pattern)
|
||||||
|
if not f.name.endswith("_base.yaml")
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
for file in config_files:
|
||||||
|
with self.subTest(file.name):
|
||||||
|
with initialize_config_dir(config_dir=str(IMPLICITRON_CONFIGS_DIR)):
|
||||||
|
compose(file.name)
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user