mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-12-22 07:10:34 +08:00
Compare commits
131 Commits
v0.7.3
...
bottler/ac
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9c586b1351 | ||
|
|
e13848265d | ||
|
|
58566963d6 | ||
|
|
e17ed5cd50 | ||
|
|
8ed0c7a002 | ||
|
|
2da913c7e6 | ||
|
|
fca83e6369 | ||
|
|
75ebeeaea0 | ||
|
|
ab793177c6 | ||
|
|
9acdd67b83 | ||
|
|
3f428d9981 | ||
|
|
05cbea115a | ||
|
|
38afdcfc68 | ||
|
|
1e0b1d9c72 | ||
|
|
44702fdb4b | ||
|
|
7edaee71a9 | ||
|
|
d0d0e02007 | ||
|
|
4df110b0a9 | ||
|
|
51fd114d8b | ||
|
|
89653419d0 | ||
|
|
7980854d44 | ||
|
|
51d7c06ddd | ||
|
|
00c36ec01c | ||
|
|
b0462d8079 | ||
|
|
b66d17a324 | ||
|
|
717493cb79 | ||
|
|
302da69461 | ||
|
|
4ae25bfce7 | ||
|
|
bd52f4a408 | ||
|
|
17117106e4 | ||
|
|
aec76bb4c8 | ||
|
|
47d5dc8824 | ||
|
|
fe0b1bae49 | ||
|
|
ccf22911d4 | ||
|
|
128be02fc0 | ||
|
|
31e3488a51 | ||
|
|
b215776f2d | ||
|
|
38cf0dc1c5 | ||
|
|
7566530669 | ||
|
|
a27755db41 | ||
|
|
3da7703c5a | ||
|
|
f34104cf6e | ||
|
|
f247c86dc0 | ||
|
|
ae9d8787ce | ||
|
|
8772fe0de8 | ||
|
|
c292c71c1a | ||
|
|
d0d9cae9cd | ||
|
|
1f92c4e9d2 | ||
|
|
9b981f2c7e | ||
|
|
85eccbbf77 | ||
|
|
b80ab0caf0 | ||
|
|
1e817914b3 | ||
|
|
799c1cd21b | ||
|
|
292acc71a3 | ||
|
|
3621a36494 | ||
|
|
3087ab7f62 | ||
|
|
e46ab49a34 | ||
|
|
8a27590c5f | ||
|
|
06cdc313a7 | ||
|
|
94da8841af | ||
|
|
fbc6725f03 | ||
|
|
6b8766080d | ||
|
|
c373a84400 | ||
|
|
7606854ff7 | ||
|
|
83bacda8fb | ||
|
|
f74fc450e8 | ||
|
|
3b4f8a4980 | ||
|
|
79b46734cb | ||
|
|
55638f3bae | ||
|
|
f4f2209271 | ||
|
|
f613682551 | ||
|
|
2f11ddc5ee | ||
|
|
650cc09d22 | ||
|
|
8c15afe71d | ||
|
|
6b437e21a6 | ||
|
|
03f17ca1ea | ||
|
|
a8c70161a1 | ||
|
|
28f914bf3b | ||
|
|
eaf0709d6a | ||
|
|
b7f4ba097c | ||
|
|
6f2212da46 | ||
|
|
a3d99cab6b | ||
|
|
d84f274a08 | ||
|
|
099fc069fb | ||
|
|
57f6e79280 | ||
|
|
2883a07bfe | ||
|
|
6462aa60ea | ||
|
|
d851bc3173 | ||
|
|
8164ac4081 | ||
|
|
9446d91fae | ||
|
|
3d011a9198 | ||
|
|
5910d81b7b | ||
|
|
ccf860f1db | ||
|
|
29b8ebd802 | ||
|
|
4e7715ce66 | ||
|
|
f68371d398 | ||
|
|
dc2c7e489f | ||
|
|
42e7de418c | ||
|
|
88429853b9 | ||
|
|
009a3d3b3c | ||
|
|
cd5db076d5 | ||
|
|
3d886c32d5 | ||
|
|
5592d25f68 | ||
|
|
09a99f2e6d | ||
|
|
5ffeb4d580 | ||
|
|
573a42cd5f | ||
|
|
928efdd640 | ||
|
|
35badc0892 | ||
|
|
e0c3ca97ff | ||
|
|
d2119c285f | ||
|
|
ff80183fdb | ||
|
|
b0462598ac | ||
|
|
d08fe6d45a | ||
|
|
297020a4b1 | ||
|
|
062e6c54ae | ||
|
|
c80180c96e | ||
|
|
23cd19fbc7 | ||
|
|
092400f1e7 | ||
|
|
ec87284c4b | ||
|
|
f5a117c74b | ||
|
|
b921efae3e | ||
|
|
c8d6cd427e | ||
|
|
ef5f620263 | ||
|
|
3e3644e534 | ||
|
|
178a7774d4 | ||
|
|
823ab75d27 | ||
|
|
32e1992924 | ||
|
|
7aeedd17a4 | ||
|
|
0e3138eca8 | ||
|
|
1af6bf4768 | ||
|
|
355d6332cb |
@@ -64,7 +64,7 @@ jobs:
|
||||
CUDA_VERSION: "11.3"
|
||||
resource_class: gpu.nvidia.small.multi
|
||||
machine:
|
||||
image: ubuntu-2004:202101-01
|
||||
image: linux-cuda-11:default
|
||||
steps:
|
||||
- checkout
|
||||
- <<: *setupcuda
|
||||
@@ -116,7 +116,7 @@ jobs:
|
||||
# so we aren't running the tests.
|
||||
- run:
|
||||
name: build
|
||||
no_output_timeout: 20m
|
||||
no_output_timeout: 40m
|
||||
command: MAX_JOBS=15 TEST_FLAG=--no-test python3 packaging/build_conda.py
|
||||
- store_artifacts:
|
||||
path: /opt/conda/conda-bld/linux-64
|
||||
@@ -128,7 +128,7 @@ jobs:
|
||||
binary_linux_conda_cuda:
|
||||
<<: *binary_common
|
||||
machine:
|
||||
image: ubuntu-1604-cuda-10.2:202012-01
|
||||
image: linux-cuda-11:default
|
||||
resource_class: gpu.nvidia.small.multi
|
||||
steps:
|
||||
- checkout
|
||||
@@ -145,7 +145,7 @@ jobs:
|
||||
docker pull $TESTRUN_DOCKER_IMAGE
|
||||
- run:
|
||||
name: Build and run tests
|
||||
no_output_timeout: 20m
|
||||
no_output_timeout: 40m
|
||||
command: |
|
||||
set -e
|
||||
|
||||
@@ -156,24 +156,6 @@ jobs:
|
||||
|
||||
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${TESTRUN_DOCKER_IMAGE} python3 ./packaging/build_conda.py
|
||||
|
||||
binary_macos_wheel:
|
||||
<<: *binary_common
|
||||
macos:
|
||||
xcode: "13.4.1"
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
# Cannot easily deduplicate this as source'ing activate
|
||||
# will set environment variables which we need to propagate
|
||||
# to build_wheel.sh
|
||||
command: |
|
||||
curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
sh conda.sh -b
|
||||
source $HOME/miniconda3/bin/activate
|
||||
packaging/build_wheel.sh
|
||||
- store_artifacts:
|
||||
path: dist
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build_and_test:
|
||||
@@ -182,23 +164,8 @@ workflows:
|
||||
# context: DOCKERHUB_TOKEN
|
||||
{{workflows()}}
|
||||
- binary_linux_conda_cuda:
|
||||
name: testrun_conda_cuda_py38_cu102_pyt190
|
||||
name: testrun_conda_cuda_py310_cu117_pyt201
|
||||
context: DOCKERHUB_TOKEN
|
||||
python_version: "3.8"
|
||||
pytorch_version: '1.9.0'
|
||||
cu_version: "cu102"
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py3.8_cpu
|
||||
python_version: '3.8'
|
||||
pytorch_version: '1.13.0'
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py3.9_cpu
|
||||
python_version: '3.9'
|
||||
pytorch_version: '1.13.0'
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py3.10_cpu
|
||||
python_version: '3.10'
|
||||
pytorch_version: '1.13.0'
|
||||
python_version: "3.10"
|
||||
pytorch_version: '2.0.1'
|
||||
cu_version: "cu117"
|
||||
|
||||
1005
.circleci/config.yml
1005
.circleci/config.yml
File diff suppressed because it is too large
Load Diff
@@ -18,25 +18,19 @@ from packaging import version
|
||||
|
||||
# The CUDA versions which have pytorch conda packages available for linux for each
|
||||
# version of pytorch.
|
||||
# Pytorch 1.4 also supports cuda 10.0 but we no longer build for cuda 10.0 at all.
|
||||
CONDA_CUDA_VERSIONS = {
|
||||
"1.9.0": ["cu102", "cu111"],
|
||||
"1.9.1": ["cu102", "cu111"],
|
||||
"1.10.0": ["cu102", "cu111", "cu113"],
|
||||
"1.10.1": ["cu102", "cu111", "cu113"],
|
||||
"1.10.2": ["cu102", "cu111", "cu113"],
|
||||
"1.11.0": ["cu102", "cu111", "cu113", "cu115"],
|
||||
"1.12.0": ["cu102", "cu113", "cu116"],
|
||||
"1.12.1": ["cu102", "cu113", "cu116"],
|
||||
"1.13.0": ["cu116", "cu117"],
|
||||
"1.13.1": ["cu116", "cu117"],
|
||||
"2.0.0": ["cu117", "cu118"],
|
||||
"2.1.0": ["cu118", "cu121"],
|
||||
"2.1.1": ["cu118", "cu121"],
|
||||
"2.1.2": ["cu118", "cu121"],
|
||||
"2.2.0": ["cu118", "cu121"],
|
||||
"2.2.2": ["cu118", "cu121"],
|
||||
"2.3.1": ["cu118", "cu121"],
|
||||
"2.4.0": ["cu118", "cu121"],
|
||||
"2.4.1": ["cu118", "cu121"],
|
||||
}
|
||||
|
||||
|
||||
def conda_docker_image_for_cuda(cuda_version):
|
||||
if cuda_version in ("cu101", "cu102", "cu111"):
|
||||
return None
|
||||
if len(cuda_version) != 5:
|
||||
raise ValueError("Unknown cuda version")
|
||||
return "pytorch/conda-builder:cuda" + cuda_version[2:]
|
||||
@@ -51,12 +45,24 @@ def pytorch_versions_for_python(python_version):
|
||||
for i in CONDA_CUDA_VERSIONS
|
||||
if version.Version(i) >= version.Version("1.11.0")
|
||||
]
|
||||
if python_version == "3.11":
|
||||
return [
|
||||
i
|
||||
for i in CONDA_CUDA_VERSIONS
|
||||
if version.Version(i) >= version.Version("2.1.0")
|
||||
]
|
||||
if python_version == "3.12":
|
||||
return [
|
||||
i
|
||||
for i in CONDA_CUDA_VERSIONS
|
||||
if version.Version(i) >= version.Version("2.2.0")
|
||||
]
|
||||
|
||||
|
||||
def workflows(prefix="", filter_branch=None, upload=False, indentation=6):
|
||||
w = []
|
||||
for btype in ["conda"]:
|
||||
for python_version in ["3.8", "3.9", "3.10"]:
|
||||
for python_version in ["3.8", "3.9", "3.10", "3.11", "3.12"]:
|
||||
for pytorch_version in pytorch_versions_for_python(python_version):
|
||||
for cu_version in CONDA_CUDA_VERSIONS[pytorch_version]:
|
||||
w += workflow_pair(
|
||||
|
||||
5
.flake8
5
.flake8
@@ -1,5 +1,8 @@
|
||||
[flake8]
|
||||
ignore = E203, E266, E501, W503, E221
|
||||
# B028 No explicit stacklevel argument found.
|
||||
# B907 'foo' is manually surrounded by quotes, consider using the `!r` conversion flag.
|
||||
# B905 `zip()` without an explicit `strict=` parameter.
|
||||
ignore = E203, E266, E501, W503, E221, B028, B905, B907
|
||||
max-line-length = 88
|
||||
max-complexity = 18
|
||||
select = B,C,E,F,W,T4,B9
|
||||
|
||||
20
.github/workflows/build.yml
vendored
Normal file
20
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: facebookresearch/pytorch3d/build_and_test
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
binary_linux_conda_cuda:
|
||||
runs-on: 4-core-ubuntu-gpu-t4
|
||||
env:
|
||||
PYTHON_VERSION: "3.12"
|
||||
BUILD_VERSION: "${{ github.run_number }}"
|
||||
PYTORCH_VERSION: "2.4.1"
|
||||
CU_VERSION: "cu121"
|
||||
JUST_TESTRUN: 1
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Build and run tests
|
||||
run: |-
|
||||
conda create --name env --yes --quiet conda-build
|
||||
conda run --no-capture-output --name env python3 ./packaging/build_conda.py --use-conda-cuda
|
||||
18
INSTALL.md
18
INSTALL.md
@@ -8,11 +8,10 @@
|
||||
The core library is written in PyTorch. Several components have underlying implementation in CUDA for improved performance. A subset of these components have CPU implementations in C++/PyTorch. It is advised to use PyTorch3D with GPU support in order to use all the features.
|
||||
|
||||
- Linux or macOS or Windows
|
||||
- Python 3.8, 3.9 or 3.10
|
||||
- PyTorch 1.9.0, 1.9.1, 1.10.0, 1.10.1, 1.10.2, 1.11.0, 1.12.0, 1.12.1, 1.13.0 or 2.0.0.
|
||||
- Python
|
||||
- PyTorch 2.1.0, 2.1.1, 2.1.2, 2.2.0, 2.2.1, 2.2.2, 2.3.0, 2.3.1, 2.4.0 or 2.4.1.
|
||||
- torchvision that matches the PyTorch installation. You can install them together as explained at pytorch.org to make sure of this.
|
||||
- gcc & g++ ≥ 4.9
|
||||
- [fvcore](https://github.com/facebookresearch/fvcore)
|
||||
- [ioPath](https://github.com/facebookresearch/iopath)
|
||||
- If CUDA is to be used, use a version which is supported by the corresponding pytorch version and at least version 9.2.
|
||||
- If CUDA older than 11.7 is to be used and you are building from source, the CUB library must be available. We recommend version 1.10.0.
|
||||
@@ -22,7 +21,7 @@ The runtime dependencies can be installed by running:
|
||||
conda create -n pytorch3d python=3.9
|
||||
conda activate pytorch3d
|
||||
conda install pytorch=1.13.0 torchvision pytorch-cuda=11.6 -c pytorch -c nvidia
|
||||
conda install -c fvcore -c iopath -c conda-forge fvcore iopath
|
||||
conda install -c iopath iopath
|
||||
```
|
||||
|
||||
For the CUB build time dependency, which you only need if you have CUDA older than 11.7, if you are using conda, you can continue with
|
||||
@@ -49,6 +48,7 @@ For developing on top of PyTorch3D or contributing, you will need to run the lin
|
||||
- tdqm
|
||||
- jupyter
|
||||
- imageio
|
||||
- fvcore
|
||||
- plotly
|
||||
- opencv-python
|
||||
|
||||
@@ -59,6 +59,7 @@ conda install jupyter
|
||||
pip install scikit-image matplotlib imageio plotly opencv-python
|
||||
|
||||
# Tests/Linting
|
||||
conda install -c fvcore -c conda-forge fvcore
|
||||
pip install black usort flake8 flake8-bugbear flake8-comprehensions
|
||||
```
|
||||
|
||||
@@ -77,13 +78,8 @@ Or, to install a nightly (non-official, alpha) build:
|
||||
# Anaconda Cloud
|
||||
conda install pytorch3d -c pytorch3d-nightly
|
||||
```
|
||||
### 2. Install from PyPI, on Mac only.
|
||||
This works with pytorch 1.13.0 only. The build is CPU only.
|
||||
```
|
||||
pip install pytorch3d
|
||||
```
|
||||
|
||||
### 3. Install wheels for Linux
|
||||
### 2. Install wheels for Linux
|
||||
We have prebuilt wheels with CUDA for Linux for PyTorch 1.11.0, for each of the supported CUDA versions,
|
||||
for Python 3.8 and 3.9. This is for ease of use on Google Colab.
|
||||
These are installed in a special way.
|
||||
@@ -102,7 +98,7 @@ version_str="".join([
|
||||
torch.version.cuda.replace(".",""),
|
||||
f"_pyt{pyt_version_str}"
|
||||
])
|
||||
!pip install fvcore iopath
|
||||
!pip install iopath
|
||||
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||
```
|
||||
|
||||
|
||||
@@ -146,6 +146,12 @@ If you are using the pulsar backend for sphere-rendering (the `PulsarPointRender
|
||||
|
||||
Please see below for a timeline of the codebase updates in reverse chronological order. We are sharing updates on the releases as well as research projects which are built with PyTorch3D. The changelogs for the releases are available under [`Releases`](https://github.com/facebookresearch/pytorch3d/releases), and the builds can be installed using `conda` as per the instructions in [INSTALL.md](INSTALL.md).
|
||||
|
||||
**[Oct 31st 2023]:** PyTorch3D [v0.7.5](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.5) released.
|
||||
|
||||
**[May 10th 2023]:** PyTorch3D [v0.7.4](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.4) released.
|
||||
|
||||
**[Apr 5th 2023]:** PyTorch3D [v0.7.3](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.3) released.
|
||||
|
||||
**[Dec 19th 2022]:** PyTorch3D [v0.7.2](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.2) released.
|
||||
|
||||
**[Oct 23rd 2022]:** PyTorch3D [v0.7.1](https://github.com/facebookresearch/pytorch3d/releases/tag/v0.7.1) released.
|
||||
|
||||
@@ -23,7 +23,7 @@ conda init bash
|
||||
source ~/.bashrc
|
||||
conda create -y -n myenv python=3.8 matplotlib ipython ipywidgets nbconvert
|
||||
conda activate myenv
|
||||
conda install -y -c fvcore -c iopath -c conda-forge fvcore iopath
|
||||
conda install -y -c iopath iopath
|
||||
conda install -y -c pytorch pytorch=1.6.0 cudatoolkit=10.1 torchvision
|
||||
conda install -y -c pytorch3d-nightly pytorch3d
|
||||
pip install plotly scikit-image
|
||||
|
||||
27
docs/.readthedocs.yaml
Normal file
27
docs/.readthedocs.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# Read the Docs configuration file
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
# Required
|
||||
version: 2
|
||||
|
||||
# Set the version of Python and other tools you might need
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.11"
|
||||
|
||||
# Build documentation in the docs/ directory with Sphinx
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
||||
|
||||
# We recommend specifying your dependencies to enable reproducible builds:
|
||||
# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
||||
@@ -3,7 +3,7 @@
|
||||
### Install dependencies
|
||||
|
||||
```
|
||||
pip install -U recommonmark mock sphinx sphinx_rtd_theme sphinx_markdown_tables
|
||||
pip install -U recommonmark sphinx sphinx_rtd_theme sphinx_markdown_tables
|
||||
```
|
||||
|
||||
### Add symlink to the root README.md
|
||||
|
||||
@@ -20,7 +20,8 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
import mock
|
||||
import unittest.mock as mock
|
||||
|
||||
from recommonmark.parser import CommonMarkParser
|
||||
from recommonmark.states import DummyStateMachine
|
||||
from sphinx.builders.html import StandaloneHTMLBuilder
|
||||
|
||||
@@ -85,7 +85,7 @@ cameras_ndc = PerspectiveCameras(focal_length=fcl_ndc, principal_point=prp_ndc)
|
||||
# Screen space camera
|
||||
image_size = ((128, 256),) # (h, w)
|
||||
fcl_screen = (76.8,) # fcl_ndc * min(image_size) / 2
|
||||
prp_screen = ((115.2, 48), ) # w / 2 - px_ndc * min(image_size) / 2, h / 2 - py_ndc * min(image_size) / 2
|
||||
prp_screen = ((115.2, 32), ) # w / 2 - px_ndc * min(image_size) / 2, h / 2 - py_ndc * min(image_size) / 2
|
||||
cameras_screen = PerspectiveCameras(focal_length=fcl_screen, principal_point=prp_screen, in_ndc=False, image_size=image_size)
|
||||
```
|
||||
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
docutils>=0.14
|
||||
Sphinx>=1.7
|
||||
recommonmark==0.4.0
|
||||
recommonmark
|
||||
sphinx_rtd_theme
|
||||
sphinx_markdown_tables
|
||||
mock
|
||||
numpy
|
||||
iopath
|
||||
fvcore
|
||||
https://download.pytorch.org/whl/cpu/torchvision-0.8.2%2Bcpu-cp37-cp37m-linux_x86_64.whl
|
||||
https://download.pytorch.org/whl/cpu/torch-1.7.1%2Bcpu-cp37-cp37m-linux_x86_64.whl
|
||||
https://download.pytorch.org/whl/cpu/torchvision-0.15.2%2Bcpu-cp311-cp311-linux_x86_64.whl
|
||||
https://download.pytorch.org/whl/cpu/torch-2.0.1%2Bcpu-cp311-cp311-linux_x86_64.whl
|
||||
omegaconf
|
||||
|
||||
@@ -83,25 +83,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -70,25 +70,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -45,25 +45,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -405,7 +411,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"random_model_images = shapenet_dataset.render(\n",
|
||||
" sample_nums=[3],\n",
|
||||
" sample_nums=[5],\n",
|
||||
" device=device,\n",
|
||||
" cameras=cameras,\n",
|
||||
" raster_settings=raster_settings,\n",
|
||||
|
||||
@@ -84,25 +84,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -262,7 +268,7 @@
|
||||
" points = sample_points_from_meshes(mesh, 5000)\n",
|
||||
" x, y, z = points.clone().detach().cpu().squeeze().unbind(1) \n",
|
||||
" fig = plt.figure(figsize=(5, 5))\n",
|
||||
" ax = Axes3D(fig)\n",
|
||||
" ax = fig.add_subplot(111, projection='3d')\n",
|
||||
" ax.scatter3D(x, z, -y)\n",
|
||||
" ax.set_xlabel('x')\n",
|
||||
" ax.set_ylabel('z')\n",
|
||||
|
||||
@@ -50,25 +50,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -62,25 +62,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -41,25 +41,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -72,25 +72,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -66,25 +66,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -44,25 +44,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -51,25 +51,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -67,25 +67,31 @@
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"import subprocess\n",
|
||||
"need_pytorch3d=False\n",
|
||||
"try:\n",
|
||||
" import pytorch3d\n",
|
||||
"except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" if torch.__version__.startswith((\"1.13.\", \"2.0.\")) and sys.platform.startswith(\"linux\"):\n",
|
||||
" # We try to install PyTorch3D via a released wheel.\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install fvcore iopath\n",
|
||||
" pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
|
||||
" version_str=\"\".join([\n",
|
||||
" f\"py3{sys.version_info.minor}_cu\",\n",
|
||||
" torch.version.cuda.replace(\".\",\"\"),\n",
|
||||
" f\"_pyt{pyt_version_str}\"\n",
|
||||
" ])\n",
|
||||
" !pip install iopath\n",
|
||||
" if sys.platform.startswith(\"linux\"):\n",
|
||||
" print(\"Trying to install wheel for PyTorch3D\")\n",
|
||||
" !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n",
|
||||
" else:\n",
|
||||
" # We try to install PyTorch3D from source.\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" pip_list = !pip freeze\n",
|
||||
" need_pytorch3d = not any(i.startswith(\"pytorch3d==\") for i in pip_list)\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" print(f\"failed to find/install wheel for {version_str}\")\n",
|
||||
"if need_pytorch3d:\n",
|
||||
" print(\"Installing PyTorch3D from source\")\n",
|
||||
" !pip install ninja\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -33,7 +33,7 @@ def plot_camera_scene(cameras, cameras_gt, status: str):
|
||||
a string passed inside the `status` argument.
|
||||
"""
|
||||
fig = plt.figure()
|
||||
ax = fig.gca(projection="3d")
|
||||
ax = fig.add_subplot(projection="3d")
|
||||
ax.clear()
|
||||
ax.set_title(status)
|
||||
handle_cam = plot_cameras(ax, cameras, color="#FF7D1E")
|
||||
|
||||
@@ -4,10 +4,11 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import argparse
|
||||
import os.path
|
||||
import runpy
|
||||
import subprocess
|
||||
from typing import List
|
||||
from typing import List, Tuple
|
||||
|
||||
# required env vars:
|
||||
# CU_VERSION: E.g. cu112
|
||||
@@ -23,7 +24,7 @@ pytorch_major_minor = tuple(int(i) for i in PYTORCH_VERSION.split(".")[:2])
|
||||
source_root_dir = os.environ["PWD"]
|
||||
|
||||
|
||||
def version_constraint(version):
|
||||
def version_constraint(version) -> str:
|
||||
"""
|
||||
Given version "11.3" returns " >=11.3,<11.4"
|
||||
"""
|
||||
@@ -32,7 +33,7 @@ def version_constraint(version):
|
||||
return f" >={version},<{upper}"
|
||||
|
||||
|
||||
def get_cuda_major_minor():
|
||||
def get_cuda_major_minor() -> Tuple[str, str]:
|
||||
if CU_VERSION == "cpu":
|
||||
raise ValueError("fn only for cuda builds")
|
||||
if len(CU_VERSION) != 5 or CU_VERSION[:2] != "cu":
|
||||
@@ -42,15 +43,13 @@ def get_cuda_major_minor():
|
||||
return major, minor
|
||||
|
||||
|
||||
def setup_cuda():
|
||||
def setup_cuda(use_conda_cuda: bool) -> List[str]:
|
||||
if CU_VERSION == "cpu":
|
||||
return
|
||||
return []
|
||||
major, minor = get_cuda_major_minor()
|
||||
os.environ["CUDA_HOME"] = f"/usr/local/cuda-{major}.{minor}/"
|
||||
os.environ["FORCE_CUDA"] = "1"
|
||||
|
||||
basic_nvcc_flags = (
|
||||
"-gencode=arch=compute_35,code=sm_35 "
|
||||
"-gencode=arch=compute_50,code=sm_50 "
|
||||
"-gencode=arch=compute_60,code=sm_60 "
|
||||
"-gencode=arch=compute_70,code=sm_70 "
|
||||
@@ -58,23 +57,44 @@ def setup_cuda():
|
||||
"-gencode=arch=compute_50,code=compute_50"
|
||||
)
|
||||
if CU_VERSION == "cu102":
|
||||
nvcc_flags = basic_nvcc_flags
|
||||
elif CU_VERSION == "cu110":
|
||||
nvcc_flags = "-gencode=arch=compute_80,code=sm_80 " + basic_nvcc_flags
|
||||
nvcc_flags = "-gencode=arch=compute_35,code=sm_35 " + basic_nvcc_flags
|
||||
elif CU_VERSION < ("cu118"):
|
||||
nvcc_flags = (
|
||||
"-gencode=arch=compute_35,code=sm_35 "
|
||||
+ "-gencode=arch=compute_80,code=sm_80 "
|
||||
+ "-gencode=arch=compute_86,code=sm_86 "
|
||||
+ basic_nvcc_flags
|
||||
)
|
||||
else:
|
||||
nvcc_flags = (
|
||||
"-gencode=arch=compute_80,code=sm_80 "
|
||||
+ "-gencode=arch=compute_86,code=sm_86 "
|
||||
+ "-gencode=arch=compute_90,code=sm_90 "
|
||||
+ basic_nvcc_flags
|
||||
)
|
||||
|
||||
if os.environ.get("JUST_TESTRUN", "0") != "1":
|
||||
os.environ["NVCC_FLAGS"] = nvcc_flags
|
||||
if use_conda_cuda:
|
||||
os.environ["CONDA_CUDA_TOOLKIT_BUILD_CONSTRAINT1"] = "- cuda-toolkit"
|
||||
os.environ["CONDA_CUDA_TOOLKIT_BUILD_CONSTRAINT2"] = (
|
||||
f"- cuda-version={major}.{minor}"
|
||||
)
|
||||
return ["-c", f"nvidia/label/cuda-{major}.{minor}.0"]
|
||||
else:
|
||||
os.environ["CUDA_HOME"] = f"/usr/local/cuda-{major}.{minor}/"
|
||||
return []
|
||||
|
||||
|
||||
def setup_conda_pytorch_constraint() -> List[str]:
|
||||
pytorch_constraint = f"- pytorch=={PYTORCH_VERSION}"
|
||||
os.environ["CONDA_PYTORCH_CONSTRAINT"] = pytorch_constraint
|
||||
if pytorch_major_minor < (2, 2):
|
||||
os.environ["CONDA_PYTORCH_MKL_CONSTRAINT"] = "- mkl!=2024.1.0"
|
||||
os.environ["SETUPTOOLS_CONSTRAINT"] = "- setuptools<70"
|
||||
else:
|
||||
os.environ["CONDA_PYTORCH_MKL_CONSTRAINT"] = ""
|
||||
os.environ["SETUPTOOLS_CONSTRAINT"] = "- setuptools"
|
||||
os.environ["CONDA_PYTORCH_BUILD_CONSTRAINT"] = pytorch_constraint
|
||||
os.environ["PYTORCH_VERSION_NODOT"] = PYTORCH_VERSION.replace(".", "")
|
||||
|
||||
@@ -84,7 +104,7 @@ def setup_conda_pytorch_constraint() -> List[str]:
|
||||
return ["-c", "pytorch", "-c", "nvidia"]
|
||||
|
||||
|
||||
def setup_conda_cudatoolkit_constraint():
|
||||
def setup_conda_cudatoolkit_constraint() -> None:
|
||||
if CU_VERSION == "cpu":
|
||||
os.environ["CONDA_CPUONLY_FEATURE"] = "- cpuonly"
|
||||
os.environ["CONDA_CUDATOOLKIT_CONSTRAINT"] = ""
|
||||
@@ -105,14 +125,14 @@ def setup_conda_cudatoolkit_constraint():
|
||||
os.environ["CONDA_CUDATOOLKIT_CONSTRAINT"] = toolkit
|
||||
|
||||
|
||||
def do_build(start_args: List[str]):
|
||||
def do_build(start_args: List[str]) -> None:
|
||||
args = start_args.copy()
|
||||
|
||||
test_flag = os.environ.get("TEST_FLAG")
|
||||
if test_flag is not None:
|
||||
args.append(test_flag)
|
||||
|
||||
args.extend(["-c", "bottler", "-c", "fvcore", "-c", "iopath", "-c", "conda-forge"])
|
||||
args.extend(["-c", "bottler", "-c", "iopath", "-c", "conda-forge"])
|
||||
args.append("--no-anaconda-upload")
|
||||
args.extend(["--python", os.environ["PYTHON_VERSION"]])
|
||||
args.append("packaging/pytorch3d")
|
||||
@@ -121,8 +141,16 @@ def do_build(start_args: List[str]):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Build the conda package.")
|
||||
parser.add_argument(
|
||||
"--use-conda-cuda",
|
||||
action="store_true",
|
||||
help="get cuda from conda ignoring local cuda",
|
||||
)
|
||||
our_args = parser.parse_args()
|
||||
|
||||
args = ["conda", "build"]
|
||||
setup_cuda()
|
||||
args += setup_cuda(use_conda_cuda=our_args.use_conda_cuda)
|
||||
|
||||
init_path = source_root_dir + "/pytorch3d/__init__.py"
|
||||
build_version = runpy.run_path(init_path)["__version__"]
|
||||
|
||||
@@ -26,6 +26,6 @@ version_str="".join([
|
||||
torch.version.cuda.replace(".",""),
|
||||
f"_pyt{pyt_version_str}"
|
||||
])
|
||||
!pip install fvcore iopath
|
||||
!pip install iopath
|
||||
!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
|
||||
```
|
||||
|
||||
@@ -5,7 +5,13 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
sudo docker run --rm -v "$PWD/../../:/inside" pytorch/conda-cuda bash inside/packaging/linux_wheels/inside.sh
|
||||
sudo docker run --rm -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu113 pytorch/conda-builder:cuda113 bash inside/packaging/linux_wheels/inside.sh
|
||||
sudo docker run --rm -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu115 pytorch/conda-builder:cuda115 bash inside/packaging/linux_wheels/inside.sh
|
||||
sudo docker run --rm -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu116 pytorch/conda-builder:cuda116 bash inside/packaging/linux_wheels/inside.sh
|
||||
# Some directory to persist downloaded conda packages
|
||||
conda_cache=/raid/$USER/building_conda_cache
|
||||
|
||||
mkdir -p "$conda_cache"
|
||||
|
||||
sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu113 pytorch/conda-builder:cuda113 bash inside/packaging/linux_wheels/inside.sh
|
||||
sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu115 pytorch/conda-builder:cuda115 bash inside/packaging/linux_wheels/inside.sh
|
||||
sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu116 pytorch/conda-builder:cuda116 bash inside/packaging/linux_wheels/inside.sh
|
||||
sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu117 pytorch/conda-builder:cuda117 bash inside/packaging/linux_wheels/inside.sh
|
||||
sudo docker run --rm -v "$conda_cache:/conda_cache" -v "$PWD/../../:/inside" -e SELECTED_CUDA=cu118 pytorch/conda-builder:cuda118 bash inside/packaging/linux_wheels/inside.sh
|
||||
|
||||
@@ -16,23 +16,32 @@ VERSION=$(python -c "exec(open('pytorch3d/__init__.py').read()); print(__version
|
||||
|
||||
export BUILD_VERSION=$VERSION
|
||||
export FORCE_CUDA=1
|
||||
export MAX_JOBS=8
|
||||
export CONDA_PKGS_DIRS=/conda_cache
|
||||
|
||||
wget --no-verbose https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
tar xzf 1.10.0.tar.gz
|
||||
CUB_HOME=$(realpath ./cub-1.10.0)
|
||||
export CUB_HOME
|
||||
echo "CUB_HOME is now $CUB_HOME"
|
||||
if false
|
||||
then
|
||||
# We used to have to do this for old versions of CUDA
|
||||
wget --no-verbose https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
tar xzf 1.10.0.tar.gz
|
||||
CUB_HOME=$(realpath ./cub-1.10.0)
|
||||
export CUB_HOME
|
||||
echo "CUB_HOME is now $CUB_HOME"
|
||||
fi
|
||||
|
||||
# As a rule, we want to build for any combination of dependencies which is supported by
|
||||
# PyTorch3D and not older than the current Google Colab set up.
|
||||
|
||||
PYTHON_VERSIONS="3.7 3.8 3.9 3.10"
|
||||
PYTHON_VERSIONS="3.8 3.9 3.10"
|
||||
# the keys are pytorch versions
|
||||
declare -A CONDA_CUDA_VERSIONS=(
|
||||
["1.10.1"]="cu111 cu113"
|
||||
["1.10.2"]="cu111 cu113"
|
||||
["1.10.0"]="cu111 cu113"
|
||||
["1.11.0"]="cu111 cu113 cu115"
|
||||
# ["1.11.0"]="cu113"
|
||||
# ["1.12.0"]="cu113"
|
||||
# ["1.12.1"]="cu113"
|
||||
# ["1.13.0"]="cu116"
|
||||
# ["1.13.1"]="cu116 cu117"
|
||||
# ["2.0.0"]="cu117 cu118"
|
||||
["2.0.1"]="cu117 cu118"
|
||||
)
|
||||
|
||||
|
||||
@@ -41,39 +50,43 @@ for python_version in $PYTHON_VERSIONS
|
||||
do
|
||||
for pytorch_version in "${!CONDA_CUDA_VERSIONS[@]}"
|
||||
do
|
||||
if [[ "3.7 3.8" != *$python_version* ]] && [[ "1.7.0" == *$pytorch_version* ]]
|
||||
then
|
||||
#python 3.9 and later not supported by pytorch 1.7.0 and before
|
||||
continue
|
||||
fi
|
||||
if [[ "3.7 3.8 3.9" != *$python_version* ]] && [[ "1.7.0 1.7.1 1.8.0 1.8.1 1.9.0 1.9.1 1.10.0 1.10.1 1.10.2" == *$pytorch_version* ]]
|
||||
then
|
||||
#python 3.10 and later not supported by pytorch 1.10.2 and before
|
||||
continue
|
||||
fi
|
||||
|
||||
extra_channel="-c conda-forge"
|
||||
extra_channel="-c nvidia"
|
||||
cudatools="pytorch-cuda"
|
||||
if [[ "1.11.0" == "$pytorch_version" ]]
|
||||
then
|
||||
extra_channel=""
|
||||
cudatools="cudatoolkit"
|
||||
fi
|
||||
if [[ "1.12.0" == "$pytorch_version" ]] || [[ "1.12.1" == "$pytorch_version" ]]
|
||||
then
|
||||
extra_channel="-c conda-forge"
|
||||
cudatools="cudatoolkit"
|
||||
fi
|
||||
|
||||
for cu_version in ${CONDA_CUDA_VERSIONS[$pytorch_version]}
|
||||
do
|
||||
if [[ "cu113 cu115 cu116" == *$cu_version* ]]
|
||||
# ^^^ CUDA versions listed here have to be built
|
||||
# in their own containers.
|
||||
then
|
||||
if [[ $SELECTED_CUDA != "$cu_version" ]]
|
||||
then
|
||||
continue
|
||||
fi
|
||||
elif [[ $SELECTED_CUDA != "" ]]
|
||||
then
|
||||
continue
|
||||
fi
|
||||
|
||||
case "$cu_version" in
|
||||
cu118)
|
||||
export CUDA_HOME=/usr/local/cuda-11.8/
|
||||
export CUDA_TAG=11.8
|
||||
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
||||
;;
|
||||
cu117)
|
||||
export CUDA_HOME=/usr/local/cuda-11.7/
|
||||
export CUDA_TAG=11.7
|
||||
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_50,code=compute_50"
|
||||
;;
|
||||
cu116)
|
||||
export CUDA_HOME=/usr/local/cuda-11.6/
|
||||
export CUDA_TAG=11.6
|
||||
@@ -130,8 +143,8 @@ do
|
||||
conda create -y -n "$tag" "python=$python_version"
|
||||
conda activate "$tag"
|
||||
# shellcheck disable=SC2086
|
||||
conda install -y -c pytorch $extra_channel "pytorch=$pytorch_version" "cudatoolkit=$CUDA_TAG" torchvision
|
||||
pip install fvcore iopath
|
||||
conda install -y -c pytorch $extra_channel "pytorch=$pytorch_version" "$cudatools=$CUDA_TAG"
|
||||
pip install iopath
|
||||
echo "python version" "$python_version" "pytorch version" "$pytorch_version" "cuda version" "$cu_version" "tag" "$tag"
|
||||
|
||||
rm -rf dist
|
||||
|
||||
@@ -8,12 +8,16 @@ source:
|
||||
requirements:
|
||||
build:
|
||||
- {{ compiler('c') }} # [win]
|
||||
{{ environ.get('CONDA_CUDA_TOOLKIT_BUILD_CONSTRAINT1', '') }}
|
||||
{{ environ.get('CONDA_CUDA_TOOLKIT_BUILD_CONSTRAINT2', '') }}
|
||||
{{ environ.get('CONDA_CUB_CONSTRAINT') }}
|
||||
|
||||
host:
|
||||
- python
|
||||
- setuptools
|
||||
- mkl =2023 # [x86_64]
|
||||
{{ environ.get('SETUPTOOLS_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_PYTORCH_BUILD_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_PYTORCH_MKL_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_CPUONLY_FEATURE') }}
|
||||
|
||||
@@ -21,13 +25,14 @@ requirements:
|
||||
- python
|
||||
- numpy >=1.11
|
||||
- torchvision >=0.5
|
||||
- fvcore
|
||||
- mkl =2023 # [x86_64]
|
||||
- iopath
|
||||
{{ environ.get('CONDA_PYTORCH_CONSTRAINT') }}
|
||||
{{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }}
|
||||
|
||||
build:
|
||||
string: py{{py}}_{{ environ['CU_VERSION'] }}_pyt{{ environ['PYTORCH_VERSION_NODOT']}}
|
||||
# script: LD_LIBRARY_PATH=$PREFIX/lib:$BUILD_PREFIX/lib:$LD_LIBRARY_PATH python setup.py install --single-version-externally-managed --record=record.txt # [not win]
|
||||
script: python setup.py install --single-version-externally-managed --record=record.txt # [not win]
|
||||
script_env:
|
||||
- CUDA_HOME
|
||||
@@ -47,6 +52,10 @@ test:
|
||||
- imageio
|
||||
- hydra-core
|
||||
- accelerate
|
||||
- matplotlib
|
||||
- tabulate
|
||||
- pandas
|
||||
- sqlalchemy
|
||||
commands:
|
||||
#pytest .
|
||||
python -m unittest discover -v -s tests -t .
|
||||
|
||||
@@ -3,3 +3,5 @@
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
""""
|
||||
This file is the entry point for launching experiments with Implicitron.
|
||||
|
||||
@@ -97,7 +99,7 @@ except ModuleNotFoundError:
|
||||
no_accelerate = os.environ.get("PYTORCH3D_NO_ACCELERATE") is not None
|
||||
|
||||
|
||||
class Experiment(Configurable): # pyre-ignore: 13
|
||||
class Experiment(Configurable):
|
||||
"""
|
||||
This class is at the top level of Implicitron's config hierarchy. Its
|
||||
members are high-level components necessary for training an implicit rende-
|
||||
@@ -118,12 +120,16 @@ class Experiment(Configurable): # pyre-ignore: 13
|
||||
will be saved here.
|
||||
"""
|
||||
|
||||
# pyre-fixme[13]: Attribute `data_source` is never initialized.
|
||||
data_source: DataSourceBase
|
||||
data_source_class_type: str = "ImplicitronDataSource"
|
||||
# pyre-fixme[13]: Attribute `model_factory` is never initialized.
|
||||
model_factory: ModelFactoryBase
|
||||
model_factory_class_type: str = "ImplicitronModelFactory"
|
||||
# pyre-fixme[13]: Attribute `optimizer_factory` is never initialized.
|
||||
optimizer_factory: OptimizerFactoryBase
|
||||
optimizer_factory_class_type: str = "ImplicitronOptimizerFactory"
|
||||
# pyre-fixme[13]: Attribute `training_loop` is never initialized.
|
||||
training_loop: TrainingLoopBase
|
||||
training_loop_class_type: str = "ImplicitronTrainingLoop"
|
||||
|
||||
|
||||
@@ -3,3 +3,5 @@
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Optional
|
||||
@@ -43,7 +45,7 @@ class ModelFactoryBase(ReplaceableBase):
|
||||
|
||||
|
||||
@registry.register
|
||||
class ImplicitronModelFactory(ModelFactoryBase): # pyre-ignore [13]
|
||||
class ImplicitronModelFactory(ModelFactoryBase):
|
||||
"""
|
||||
A factory class that initializes an implicit rendering model.
|
||||
|
||||
@@ -59,6 +61,7 @@ class ImplicitronModelFactory(ModelFactoryBase): # pyre-ignore [13]
|
||||
|
||||
"""
|
||||
|
||||
# pyre-fixme[13]: Attribute `model` is never initialized.
|
||||
model: ImplicitronModelBase
|
||||
model_class_type: str = "GenericModel"
|
||||
resume: bool = True
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
import os
|
||||
@@ -121,7 +123,6 @@ class ImplicitronOptimizerFactory(OptimizerFactoryBase):
|
||||
"""
|
||||
# Get the parameters to optimize
|
||||
if hasattr(model, "_get_param_groups"): # use the model function
|
||||
# pyre-ignore[29]
|
||||
p_groups = model._get_param_groups(self.lr, wd=self.weight_decay)
|
||||
else:
|
||||
p_groups = [
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
@@ -21,7 +23,6 @@ from pytorch3d.implicitron.tools.config import (
|
||||
run_auto_creation,
|
||||
)
|
||||
from pytorch3d.implicitron.tools.stats import Stats
|
||||
from pytorch3d.renderer.cameras import CamerasBase
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
|
||||
from .utils import seed_all_random_engines
|
||||
@@ -29,13 +30,13 @@ from .utils import seed_all_random_engines
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# pyre-fixme[13]: Attribute `evaluator` is never initialized.
|
||||
class TrainingLoopBase(ReplaceableBase):
|
||||
"""
|
||||
Members:
|
||||
evaluator: An EvaluatorBase instance, used to evaluate training results.
|
||||
"""
|
||||
|
||||
# pyre-fixme[13]: Attribute `evaluator` is never initialized.
|
||||
evaluator: Optional[EvaluatorBase]
|
||||
evaluator_class_type: Optional[str] = "ImplicitronEvaluator"
|
||||
|
||||
@@ -111,6 +112,8 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
def __post_init__(self):
|
||||
run_auto_creation(self)
|
||||
|
||||
# pyre-fixme[14]: `run` overrides method defined in `TrainingLoopBase`
|
||||
# inconsistently.
|
||||
def run(
|
||||
self,
|
||||
*,
|
||||
@@ -256,7 +259,6 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
list(log_vars),
|
||||
plot_file=os.path.join(exp_dir, "train_stats.pdf"),
|
||||
visdom_env=visdom_env_charts,
|
||||
verbose=False,
|
||||
visdom_server=self.visdom_server,
|
||||
visdom_port=self.visdom_port,
|
||||
)
|
||||
@@ -382,7 +384,8 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
|
||||
# print textual status update
|
||||
if it % self.metric_print_interval == 0 or last_iter:
|
||||
stats.print(stat_set=trainmode, max_it=n_batches)
|
||||
std_out = stats.get_status_string(stat_set=trainmode, max_it=n_batches)
|
||||
logger.info(std_out)
|
||||
|
||||
# visualize results
|
||||
if (
|
||||
@@ -392,7 +395,6 @@ class ImplicitronTrainingLoop(TrainingLoopBase):
|
||||
):
|
||||
prefix = f"e{stats.epoch}_it{stats.it[trainmode]}"
|
||||
if hasattr(model, "visualize"):
|
||||
# pyre-ignore [29]
|
||||
model.visualize(
|
||||
viz,
|
||||
visdom_env_imgs,
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
|
||||
import random
|
||||
|
||||
|
||||
@@ -3,3 +3,5 @@
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
@@ -129,6 +129,19 @@ data_source_ImplicitronDataSource_args:
|
||||
dataset_length_train: 0
|
||||
dataset_length_val: 0
|
||||
dataset_length_test: 0
|
||||
data_loader_map_provider_TrainEvalDataLoaderMapProvider_args:
|
||||
batch_size: 1
|
||||
num_workers: 0
|
||||
dataset_length_train: 0
|
||||
dataset_length_val: 0
|
||||
dataset_length_test: 0
|
||||
train_conditioning_type: SAME
|
||||
val_conditioning_type: SAME
|
||||
test_conditioning_type: KNOWN
|
||||
images_per_seq_options: []
|
||||
sample_consecutive_frames: false
|
||||
consecutive_frames_max_gap: 0
|
||||
consecutive_frames_max_gap_seconds: 0.1
|
||||
model_factory_ImplicitronModelFactory_args:
|
||||
resume: true
|
||||
model_class_type: GenericModel
|
||||
@@ -203,6 +216,7 @@ model_factory_ImplicitronModelFactory_args:
|
||||
n_rays_total_training: null
|
||||
stratified_point_sampling_training: true
|
||||
stratified_point_sampling_evaluation: false
|
||||
cast_ray_bundle_as_cone: false
|
||||
scene_extent: 8.0
|
||||
scene_center:
|
||||
- 0.0
|
||||
@@ -215,6 +229,7 @@ model_factory_ImplicitronModelFactory_args:
|
||||
n_rays_total_training: null
|
||||
stratified_point_sampling_training: true
|
||||
stratified_point_sampling_evaluation: false
|
||||
cast_ray_bundle_as_cone: false
|
||||
min_depth: 0.1
|
||||
max_depth: 8.0
|
||||
renderer_LSTMRenderer_args:
|
||||
@@ -234,6 +249,8 @@ model_factory_ImplicitronModelFactory_args:
|
||||
append_coarse_samples_to_fine: true
|
||||
density_noise_std_train: 0.0
|
||||
return_weights: false
|
||||
blurpool_weights: false
|
||||
sample_pdf_eps: 1.0e-05
|
||||
raymarcher_CumsumRaymarcher_args:
|
||||
surface_thickness: 1
|
||||
bg_color:
|
||||
@@ -346,6 +363,7 @@ model_factory_ImplicitronModelFactory_args:
|
||||
n_hidden_neurons_dir: 128
|
||||
input_xyz: true
|
||||
xyz_ray_dir_in_camera_coords: false
|
||||
use_integrated_positional_encoding: false
|
||||
transformer_dim_down_factor: 2.0
|
||||
n_hidden_neurons_xyz: 80
|
||||
n_layers_xyz: 2
|
||||
@@ -357,6 +375,7 @@ model_factory_ImplicitronModelFactory_args:
|
||||
n_hidden_neurons_dir: 128
|
||||
input_xyz: true
|
||||
xyz_ray_dir_in_camera_coords: false
|
||||
use_integrated_positional_encoding: false
|
||||
transformer_dim_down_factor: 1.0
|
||||
n_hidden_neurons_xyz: 256
|
||||
n_layers_xyz: 8
|
||||
@@ -629,6 +648,7 @@ model_factory_ImplicitronModelFactory_args:
|
||||
n_rays_total_training: null
|
||||
stratified_point_sampling_training: true
|
||||
stratified_point_sampling_evaluation: false
|
||||
cast_ray_bundle_as_cone: false
|
||||
scene_extent: 8.0
|
||||
scene_center:
|
||||
- 0.0
|
||||
@@ -641,6 +661,7 @@ model_factory_ImplicitronModelFactory_args:
|
||||
n_rays_total_training: null
|
||||
stratified_point_sampling_training: true
|
||||
stratified_point_sampling_evaluation: false
|
||||
cast_ray_bundle_as_cone: false
|
||||
min_depth: 0.1
|
||||
max_depth: 8.0
|
||||
renderer_LSTMRenderer_args:
|
||||
@@ -660,6 +681,8 @@ model_factory_ImplicitronModelFactory_args:
|
||||
append_coarse_samples_to_fine: true
|
||||
density_noise_std_train: 0.0
|
||||
return_weights: false
|
||||
blurpool_weights: false
|
||||
sample_pdf_eps: 1.0e-05
|
||||
raymarcher_CumsumRaymarcher_args:
|
||||
surface_thickness: 1
|
||||
bg_color:
|
||||
@@ -724,6 +747,7 @@ model_factory_ImplicitronModelFactory_args:
|
||||
n_hidden_neurons_dir: 128
|
||||
input_xyz: true
|
||||
xyz_ray_dir_in_camera_coords: false
|
||||
use_integrated_positional_encoding: false
|
||||
transformer_dim_down_factor: 2.0
|
||||
n_hidden_neurons_xyz: 80
|
||||
n_layers_xyz: 2
|
||||
@@ -735,6 +759,7 @@ model_factory_ImplicitronModelFactory_args:
|
||||
n_hidden_neurons_dir: 128
|
||||
input_xyz: true
|
||||
xyz_ray_dir_in_camera_coords: false
|
||||
use_integrated_positional_encoding: false
|
||||
transformer_dim_down_factor: 1.0
|
||||
n_hidden_neurons_xyz: 256
|
||||
n_layers_xyz: 8
|
||||
@@ -962,6 +987,7 @@ model_factory_ImplicitronModelFactory_args:
|
||||
n_hidden_neurons_dir: 128
|
||||
input_xyz: true
|
||||
xyz_ray_dir_in_camera_coords: false
|
||||
use_integrated_positional_encoding: false
|
||||
transformer_dim_down_factor: 2.0
|
||||
n_hidden_neurons_xyz: 80
|
||||
n_layers_xyz: 2
|
||||
@@ -973,6 +999,7 @@ model_factory_ImplicitronModelFactory_args:
|
||||
n_hidden_neurons_dir: 128
|
||||
input_xyz: true
|
||||
xyz_ray_dir_in_camera_coords: false
|
||||
use_integrated_positional_encoding: false
|
||||
transformer_dim_down_factor: 1.0
|
||||
n_hidden_neurons_xyz: 256
|
||||
n_layers_xyz: 8
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
import unittest
|
||||
@@ -132,6 +134,13 @@ class TestExperiment(unittest.TestCase):
|
||||
# Check that the default config values, defined by Experiment and its
|
||||
# members, is what we expect it to be.
|
||||
cfg = OmegaConf.structured(experiment.Experiment)
|
||||
# the following removes the possible effect of env variables
|
||||
ds_arg = cfg.data_source_ImplicitronDataSource_args
|
||||
ds_arg.dataset_map_provider_JsonIndexDatasetMapProvider_args.dataset_root = ""
|
||||
ds_arg.dataset_map_provider_JsonIndexDatasetMapProviderV2_args.dataset_root = ""
|
||||
if "dataset_map_provider_SqlIndexDatasetMapProvider_args" in ds_arg:
|
||||
del ds_arg.dataset_map_provider_SqlIndexDatasetMapProvider_args
|
||||
cfg.training_loop_ImplicitronTrainingLoop_args.visdom_port = 8097
|
||||
yaml = OmegaConf.to_yaml(cfg, sort_keys=False)
|
||||
if DEBUG:
|
||||
(DATA_DIR / "experiment.yaml").write_text(yaml)
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import logging
|
||||
import os
|
||||
import unittest
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import os
|
||||
import unittest
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
"""
|
||||
Script to visualize a previously trained model. Example call:
|
||||
|
||||
|
||||
@@ -343,12 +343,14 @@ class RadianceFieldRenderer(torch.nn.Module):
|
||||
# For a full render pass concatenate the output chunks,
|
||||
# and reshape to image size.
|
||||
out = {
|
||||
k: torch.cat(
|
||||
[ch_o[k] for ch_o in chunk_outputs],
|
||||
dim=1,
|
||||
).view(-1, *self._image_size, 3)
|
||||
if chunk_outputs[0][k] is not None
|
||||
else None
|
||||
k: (
|
||||
torch.cat(
|
||||
[ch_o[k] for ch_o in chunk_outputs],
|
||||
dim=1,
|
||||
).view(-1, *self._image_size, 3)
|
||||
if chunk_outputs[0][k] is not None
|
||||
else None
|
||||
)
|
||||
for k in ("rgb_fine", "rgb_coarse", "rgb_gt")
|
||||
}
|
||||
else:
|
||||
|
||||
@@ -330,9 +330,9 @@ class NeRFRaysampler(torch.nn.Module):
|
||||
|
||||
if self.training:
|
||||
# During training we randomly subsample rays.
|
||||
sel_rays = torch.randperm(n_pixels, device=device)[
|
||||
: self._mc_raysampler._n_rays_per_image
|
||||
]
|
||||
sel_rays = torch.randperm(
|
||||
n_pixels, device=full_ray_bundle.lengths.device
|
||||
)[: self._mc_raysampler._n_rays_per_image]
|
||||
else:
|
||||
# In case we test, we take only the requested chunk.
|
||||
if chunksize is None:
|
||||
|
||||
@@ -4,4 +4,6 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
__version__ = "0.7.3"
|
||||
# pyre-unsafe
|
||||
|
||||
__version__ = "0.7.8"
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from .datatypes import Device, get_device, make_device
|
||||
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from typing import Sequence, Tuple, Union
|
||||
|
||||
import torch
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import sys
|
||||
# pyre-unsafe
|
||||
|
||||
from typing import Optional, Union
|
||||
|
||||
import torch
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import math
|
||||
from typing import Tuple
|
||||
|
||||
|
||||
@@ -4,5 +4,7 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from .symeig3x3 import symeig3x3
|
||||
from .utils import _safe_det_3x3
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import math
|
||||
from typing import Optional, Tuple
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
@@ -99,6 +99,7 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
||||
m.def("marching_cubes", &MarchingCubes);
|
||||
|
||||
// Pulsar.
|
||||
// Pulsar not enabled on AMD.
|
||||
#ifdef PULSAR_LOGGING_ENABLED
|
||||
c10::ShowLogInfoToStderr();
|
||||
#endif
|
||||
|
||||
@@ -266,6 +266,8 @@ at::Tensor FaceAreasNormalsBackwardCuda(
|
||||
grad_normals_t{grad_normals, "grad_normals", 4};
|
||||
at::CheckedFrom c = "FaceAreasNormalsBackwardCuda";
|
||||
at::checkAllSameGPU(c, {verts_t, faces_t, grad_areas_t, grad_normals_t});
|
||||
// This is nondeterministic because atomicAdd
|
||||
at::globalContext().alertNotDeterministic("FaceAreasNormalsBackwardCuda");
|
||||
|
||||
// Set the device for the kernel launch based on the device of verts
|
||||
at::cuda::CUDAGuard device_guard(verts.device());
|
||||
|
||||
@@ -130,6 +130,9 @@ std::tuple<at::Tensor, at::Tensor> InterpFaceAttrsBackwardCuda(
|
||||
at::checkAllSameType(
|
||||
c, {barycentric_coords_t, face_attrs_t, grad_pix_attrs_t});
|
||||
|
||||
// This is nondeterministic because atomicAdd
|
||||
at::globalContext().alertNotDeterministic("InterpFaceAttrsBackwardCuda");
|
||||
|
||||
// Set the device for the kernel launch based on the input
|
||||
at::cuda::CUDAGuard device_guard(pix_to_face.device());
|
||||
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <thrust/device_vector.h>
|
||||
#include <thrust/tuple.h>
|
||||
#include "iou_box3d/iou_utils.cuh"
|
||||
|
||||
// Parallelize over N*M computations which can each be done
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
|
||||
#include <float.h>
|
||||
#include <math.h>
|
||||
#include <thrust/device_vector.h>
|
||||
#include <cstdio>
|
||||
#include "utils/float_math.cuh"
|
||||
|
||||
|
||||
@@ -338,7 +338,7 @@ std::tuple<at::Tensor, at::Tensor> KNearestNeighborIdxCuda(
|
||||
|
||||
TORCH_CHECK((norm == 1) || (norm == 2), "Norm must be 1 or 2.");
|
||||
|
||||
TORCH_CHECK(p2.size(2) == D, "Point sets must have the same last dimension");
|
||||
TORCH_CHECK(p1.size(2) == D, "Point sets must have the same last dimension");
|
||||
auto long_dtype = lengths1.options().dtype(at::kLong);
|
||||
auto idxs = at::zeros({N, P1, K}, long_dtype);
|
||||
auto dists = at::zeros({N, P1, K}, p1.options());
|
||||
@@ -495,7 +495,7 @@ __global__ void KNearestNeighborBackwardKernel(
|
||||
if ((p1_idx < num1) && (k < num2)) {
|
||||
const float grad_dist = grad_dists[n * P1 * K + p1_idx * K + k];
|
||||
// index of point in p2 corresponding to the k-th nearest neighbor
|
||||
const size_t p2_idx = idxs[n * P1 * K + p1_idx * K + k];
|
||||
const int64_t p2_idx = idxs[n * P1 * K + p1_idx * K + k];
|
||||
// If the index is the pad value of -1 then ignore it
|
||||
if (p2_idx == -1) {
|
||||
continue;
|
||||
@@ -534,6 +534,9 @@ std::tuple<at::Tensor, at::Tensor> KNearestNeighborBackwardCuda(
|
||||
c, {p1_t, p2_t, lengths1_t, lengths2_t, idxs_t, grad_dists_t});
|
||||
at::checkAllSameType(c, {p1_t, p2_t, grad_dists_t});
|
||||
|
||||
// This is nondeterministic because atomicAdd
|
||||
at::globalContext().alertNotDeterministic("KNearestNeighborBackwardCuda");
|
||||
|
||||
// Set the device for the kernel launch based on the device of the input
|
||||
at::cuda::CUDAGuard device_guard(p1.device());
|
||||
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
|
||||
@@ -9,8 +9,6 @@
|
||||
#include <ATen/ATen.h>
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#include <c10/cuda/CUDAGuard.h>
|
||||
#include <thrust/device_vector.h>
|
||||
#include <thrust/scan.h>
|
||||
#include <cstdio>
|
||||
#include "marching_cubes/tables.h"
|
||||
|
||||
@@ -40,20 +38,6 @@ through" each cube in the grid.
|
||||
// EPS: Used to indicate if two float values are close
|
||||
__constant__ const float EPSILON = 1e-5;
|
||||
|
||||
// Thrust wrapper for exclusive scan
|
||||
//
|
||||
// Args:
|
||||
// output: pointer to on-device output array
|
||||
// input: pointer to on-device input array, where scan is performed
|
||||
// numElements: number of elements for the input array
|
||||
//
|
||||
void ThrustScanWrapper(int* output, int* input, int numElements) {
|
||||
thrust::exclusive_scan(
|
||||
thrust::device_ptr<int>(input),
|
||||
thrust::device_ptr<int>(input + numElements),
|
||||
thrust::device_ptr<int>(output));
|
||||
}
|
||||
|
||||
// Linearly interpolate the position where an isosurface cuts an edge
|
||||
// between two vertices, based on their scalar values
|
||||
//
|
||||
@@ -239,7 +223,7 @@ __global__ void CompactVoxelsKernel(
|
||||
compactedVoxelArray,
|
||||
const at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
||||
voxelOccupied,
|
||||
const at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
||||
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
|
||||
voxelOccupiedScan,
|
||||
uint numVoxels) {
|
||||
uint id = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
@@ -271,7 +255,8 @@ __global__ void GenerateFacesKernel(
|
||||
at::PackedTensorAccessor<int64_t, 1, at::RestrictPtrTraits> ids,
|
||||
at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits>
|
||||
compactedVoxelArray,
|
||||
at::PackedTensorAccessor32<int, 1, at::RestrictPtrTraits> numVertsScanned,
|
||||
at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
|
||||
numVertsScanned,
|
||||
const uint activeVoxels,
|
||||
const at::PackedTensorAccessor32<float, 3, at::RestrictPtrTraits> vol,
|
||||
const at::PackedTensorAccessor32<int, 2, at::RestrictPtrTraits> faceTable,
|
||||
@@ -397,6 +382,44 @@ __global__ void GenerateFacesKernel(
|
||||
} // end for grid-strided kernel
|
||||
}
|
||||
|
||||
// ATen/Torch does not have an exclusive-scan operator. Additionally, in the
|
||||
// code below we need to get the "total number of items to work on" after
|
||||
// a scan, which with an inclusive-scan would simply be the value of the last
|
||||
// element in the tensor.
|
||||
//
|
||||
// This utility function hits two birds with one stone, by running
|
||||
// an inclusive-scan into a right-shifted view of a tensor that's
|
||||
// allocated to be one element bigger than the input tensor.
|
||||
//
|
||||
// Note; return tensor is `int64_t` per element, even if the input
|
||||
// tensor is only 32-bit. Also, the return tensor is one element bigger
|
||||
// than the input one.
|
||||
//
|
||||
// Secondary optional argument is an output argument that gets the
|
||||
// value of the last element of the return tensor (because you almost
|
||||
// always need this CPU-side right after this function anyway).
|
||||
static at::Tensor ExclusiveScanAndTotal(
|
||||
const at::Tensor& inTensor,
|
||||
int64_t* optTotal = nullptr) {
|
||||
const auto inSize = inTensor.sizes()[0];
|
||||
auto retTensor = at::zeros({inSize + 1}, at::kLong).to(inTensor.device());
|
||||
|
||||
using at::indexing::None;
|
||||
using at::indexing::Slice;
|
||||
auto rightShiftedView = retTensor.index({Slice(1, None)});
|
||||
|
||||
// Do an (inclusive-scan) cumulative sum in to the view that's
|
||||
// shifted one element to the right...
|
||||
at::cumsum_out(rightShiftedView, inTensor, 0, at::kLong);
|
||||
|
||||
if (optTotal) {
|
||||
*optTotal = retTensor[inSize].cpu().item<int64_t>();
|
||||
}
|
||||
|
||||
// ...so that the not-shifted tensor holds the exclusive-scan
|
||||
return retTensor;
|
||||
}
|
||||
|
||||
// Entrance for marching cubes cuda extension. Marching Cubes is an algorithm to
|
||||
// create triangle meshes from an implicit function (one of the form f(x, y, z)
|
||||
// = 0). It works by iteratively checking a grid of cubes superimposed over a
|
||||
@@ -455,6 +478,9 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
grid.x = 65535;
|
||||
}
|
||||
|
||||
using at::indexing::None;
|
||||
using at::indexing::Slice;
|
||||
|
||||
auto d_voxelVerts =
|
||||
at::zeros({numVoxels}, at::TensorOptions().dtype(at::kInt))
|
||||
.to(vol.device());
|
||||
@@ -477,18 +503,9 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
// count for voxels in the grid and compute the number of active voxels.
|
||||
// If the number of active voxels is 0, return zero tensor for verts and
|
||||
// faces.
|
||||
int64_t activeVoxels = 0;
|
||||
auto d_voxelOccupiedScan =
|
||||
at::zeros({numVoxels}, at::TensorOptions().dtype(at::kInt))
|
||||
.to(vol.device());
|
||||
ThrustScanWrapper(
|
||||
d_voxelOccupiedScan.data_ptr<int>(),
|
||||
d_voxelOccupied.data_ptr<int>(),
|
||||
numVoxels);
|
||||
|
||||
// number of active voxels
|
||||
int lastElement = d_voxelVerts[numVoxels - 1].cpu().item<int>();
|
||||
int lastScan = d_voxelOccupiedScan[numVoxels - 1].cpu().item<int>();
|
||||
int activeVoxels = lastElement + lastScan;
|
||||
ExclusiveScanAndTotal(d_voxelOccupied, &activeVoxels);
|
||||
|
||||
const int device_id = vol.device().index();
|
||||
auto opt = at::TensorOptions().dtype(at::kInt).device(at::kCUDA, device_id);
|
||||
@@ -503,28 +520,21 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
return std::make_tuple(verts, faces, ids);
|
||||
}
|
||||
|
||||
// Execute "CompactVoxelsKernel" kernel to compress voxels for accleration.
|
||||
// Execute "CompactVoxelsKernel" kernel to compress voxels for acceleration.
|
||||
// This allows us to run triangle generation on only the occupied voxels.
|
||||
auto d_compVoxelArray = at::zeros({activeVoxels}, opt);
|
||||
CompactVoxelsKernel<<<grid, threads, 0, stream>>>(
|
||||
d_compVoxelArray.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupied.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupiedScan.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelOccupiedScan
|
||||
.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
|
||||
numVoxels);
|
||||
AT_CUDA_CHECK(cudaGetLastError());
|
||||
cudaDeviceSynchronize();
|
||||
|
||||
// Scan d_voxelVerts array to generate offsets of vertices for each voxel
|
||||
auto d_voxelVertsScan = at::zeros({numVoxels}, opt);
|
||||
ThrustScanWrapper(
|
||||
d_voxelVertsScan.data_ptr<int>(),
|
||||
d_voxelVerts.data_ptr<int>(),
|
||||
numVoxels);
|
||||
|
||||
// total number of vertices
|
||||
lastElement = d_voxelVerts[numVoxels - 1].cpu().item<int>();
|
||||
lastScan = d_voxelVertsScan[numVoxels - 1].cpu().item<int>();
|
||||
int totalVerts = lastElement + lastScan;
|
||||
int64_t totalVerts = 0;
|
||||
auto d_voxelVertsScan = ExclusiveScanAndTotal(d_voxelVerts, &totalVerts);
|
||||
|
||||
// Execute "GenerateFacesKernel" kernel
|
||||
// This runs only on the occupied voxels.
|
||||
@@ -544,7 +554,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCuda(
|
||||
faces.packed_accessor<int64_t, 2, at::RestrictPtrTraits>(),
|
||||
ids.packed_accessor<int64_t, 1, at::RestrictPtrTraits>(),
|
||||
d_compVoxelArray.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelVertsScan.packed_accessor32<int, 1, at::RestrictPtrTraits>(),
|
||||
d_voxelVertsScan.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
|
||||
activeVoxels,
|
||||
vol.packed_accessor32<float, 3, at::RestrictPtrTraits>(),
|
||||
faceTable.packed_accessor32<int, 2, at::RestrictPtrTraits>(),
|
||||
|
||||
@@ -71,8 +71,8 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> MarchingCubesCpu(
|
||||
if ((j + 1) % 3 == 0 && ps[0] != ps[1] && ps[1] != ps[2] &&
|
||||
ps[2] != ps[0]) {
|
||||
for (int k = 0; k < 3; k++) {
|
||||
int v = tri[k];
|
||||
edge_id_to_v[tri.at(k)] = ps.at(k);
|
||||
int64_t v = tri.at(k);
|
||||
edge_id_to_v[v] = ps.at(k);
|
||||
if (!uniq_edge_id.count(v)) {
|
||||
uniq_edge_id[v] = verts.size();
|
||||
verts.push_back(edge_id_to_v[v]);
|
||||
|
||||
@@ -305,6 +305,8 @@ std::tuple<at::Tensor, at::Tensor> DistanceBackwardCuda(
|
||||
at::CheckedFrom c = "DistanceBackwardCuda";
|
||||
at::checkAllSameGPU(c, {objects_t, targets_t, idx_objects_t, grad_dists_t});
|
||||
at::checkAllSameType(c, {objects_t, targets_t, grad_dists_t});
|
||||
// This is nondeterministic because atomicAdd
|
||||
at::globalContext().alertNotDeterministic("DistanceBackwardCuda");
|
||||
|
||||
// Set the device for the kernel launch based on the device of the input
|
||||
at::cuda::CUDAGuard device_guard(objects.device());
|
||||
@@ -624,6 +626,9 @@ std::tuple<at::Tensor, at::Tensor> PointFaceArrayDistanceBackwardCuda(
|
||||
at::CheckedFrom c = "PointFaceArrayDistanceBackwardCuda";
|
||||
at::checkAllSameGPU(c, {points_t, tris_t, grad_dists_t});
|
||||
at::checkAllSameType(c, {points_t, tris_t, grad_dists_t});
|
||||
// This is nondeterministic because atomicAdd
|
||||
at::globalContext().alertNotDeterministic(
|
||||
"PointFaceArrayDistanceBackwardCuda");
|
||||
|
||||
// Set the device for the kernel launch based on the device of the input
|
||||
at::cuda::CUDAGuard device_guard(points.device());
|
||||
@@ -787,6 +792,9 @@ std::tuple<at::Tensor, at::Tensor> PointEdgeArrayDistanceBackwardCuda(
|
||||
at::CheckedFrom c = "PointEdgeArrayDistanceBackwardCuda";
|
||||
at::checkAllSameGPU(c, {points_t, segms_t, grad_dists_t});
|
||||
at::checkAllSameType(c, {points_t, segms_t, grad_dists_t});
|
||||
// This is nondeterministic because atomicAdd
|
||||
at::globalContext().alertNotDeterministic(
|
||||
"PointEdgeArrayDistanceBackwardCuda");
|
||||
|
||||
// Set the device for the kernel launch based on the device of the input
|
||||
at::cuda::CUDAGuard device_guard(points.device());
|
||||
|
||||
@@ -141,6 +141,9 @@ void PointsToVolumesForwardCuda(
|
||||
grid_sizes_t,
|
||||
mask_t});
|
||||
|
||||
// This is nondeterministic because atomicAdd
|
||||
at::globalContext().alertNotDeterministic("PointsToVolumesForwardCuda");
|
||||
|
||||
// Set the device for the kernel launch based on the device of the input
|
||||
at::cuda::CUDAGuard device_guard(points_3d.device());
|
||||
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
|
||||
@@ -30,11 +30,20 @@
|
||||
#define GLOBAL __global__
|
||||
#define RESTRICT __restrict__
|
||||
#define DEBUGBREAK()
|
||||
#ifdef __NVCC_DIAG_PRAGMA_SUPPORT__
|
||||
#pragma nv_diag_suppress 1866
|
||||
#pragma nv_diag_suppress 2941
|
||||
#pragma nv_diag_suppress 2951
|
||||
#pragma nv_diag_suppress 2967
|
||||
#else
|
||||
#if !defined(USE_ROCM)
|
||||
#pragma diag_suppress = attribute_not_allowed
|
||||
#pragma diag_suppress = 1866
|
||||
#pragma diag_suppress = 2941
|
||||
#pragma diag_suppress = 2951
|
||||
#pragma diag_suppress = 2967
|
||||
#endif //! USE_ROCM
|
||||
#endif
|
||||
#else // __CUDACC__
|
||||
#define INLINE inline
|
||||
#define HOST
|
||||
@@ -49,6 +58,9 @@
|
||||
#pragma clang diagnostic pop
|
||||
#ifdef WITH_CUDA
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#if !defined(USE_ROCM)
|
||||
#include <vector_functions.h>
|
||||
#endif //! USE_ROCM
|
||||
#else
|
||||
#ifndef cudaStream_t
|
||||
typedef void* cudaStream_t;
|
||||
@@ -65,8 +77,6 @@ struct float2 {
|
||||
struct float3 {
|
||||
float x, y, z;
|
||||
};
|
||||
#endif
|
||||
namespace py = pybind11;
|
||||
inline float3 make_float3(const float& x, const float& y, const float& z) {
|
||||
float3 res;
|
||||
res.x = x;
|
||||
@@ -74,6 +84,8 @@ inline float3 make_float3(const float& x, const float& y, const float& z) {
|
||||
res.z = z;
|
||||
return res;
|
||||
}
|
||||
#endif
|
||||
namespace py = pybind11;
|
||||
|
||||
inline bool operator==(const float3& a, const float3& b) {
|
||||
return a.x == b.x && a.y == b.y && a.z == b.z;
|
||||
|
||||
@@ -59,6 +59,11 @@ getLastCudaError(const char* errorMessage, const char* file, const int line) {
|
||||
#define SHARED __shared__
|
||||
#define ACTIVEMASK() __activemask()
|
||||
#define BALLOT(mask, val) __ballot_sync((mask), val)
|
||||
|
||||
/* TODO (ROCM-6.2): None of the WARP_* are used anywhere and ROCM-6.2 natively
|
||||
* supports __shfl_*. Disabling until the move to ROCM-6.2.
|
||||
*/
|
||||
#if !defined(USE_ROCM)
|
||||
/**
|
||||
* Find the cumulative sum within a warp up to the current
|
||||
* thread lane, with each mask thread contributing base.
|
||||
@@ -115,6 +120,7 @@ INLINE DEVICE float3 WARP_SUM_FLOAT3(
|
||||
ret.z = WARP_SUM(group, mask, base.z);
|
||||
return ret;
|
||||
}
|
||||
#endif //! USE_ROCM
|
||||
|
||||
// Floating point.
|
||||
// #define FMUL(a, b) __fmul_rn((a), (b))
|
||||
@@ -142,6 +148,7 @@ INLINE DEVICE float3 WARP_SUM_FLOAT3(
|
||||
#define FMA(x, y, z) __fmaf_rn((x), (y), (z))
|
||||
#define I2F(a) __int2float_rn(a)
|
||||
#define FRCP(x) __frcp_rn(x)
|
||||
#if !defined(USE_ROCM)
|
||||
__device__ static float atomicMax(float* address, float val) {
|
||||
int* address_as_i = (int*)address;
|
||||
int old = *address_as_i, assumed;
|
||||
@@ -166,6 +173,7 @@ __device__ static float atomicMin(float* address, float val) {
|
||||
} while (assumed != old);
|
||||
return __int_as_float(old);
|
||||
}
|
||||
#endif //! USE_ROCM
|
||||
#define DMAX(a, b) FMAX(a, b)
|
||||
#define DMIN(a, b) FMIN(a, b)
|
||||
#define DSQRT(a) sqrt(a)
|
||||
@@ -357,11 +357,11 @@ void MAX_WS(
|
||||
//
|
||||
//
|
||||
#define END_PARALLEL() \
|
||||
end_parallel:; \
|
||||
end_parallel :; \
|
||||
}
|
||||
#define END_PARALLEL_NORET() }
|
||||
#define END_PARALLEL_2D() \
|
||||
end_parallel:; \
|
||||
end_parallel :; \
|
||||
} \
|
||||
}
|
||||
#define END_PARALLEL_2D_NORET() \
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include "./commands.h"
|
||||
|
||||
namespace pulsar {
|
||||
IHD CamGradInfo::CamGradInfo() {
|
||||
IHD CamGradInfo::CamGradInfo(int x) {
|
||||
cam_pos = make_float3(0.f, 0.f, 0.f);
|
||||
pixel_0_0_center = make_float3(0.f, 0.f, 0.f);
|
||||
pixel_dir_x = make_float3(0.f, 0.f, 0.f);
|
||||
|
||||
@@ -63,7 +63,7 @@ inline bool operator==(const CamInfo& a, const CamInfo& b) {
|
||||
};
|
||||
|
||||
struct CamGradInfo {
|
||||
HOST DEVICE CamGradInfo();
|
||||
HOST DEVICE CamGradInfo(int = 0);
|
||||
float3 cam_pos;
|
||||
float3 pixel_0_0_center;
|
||||
float3 pixel_dir_x;
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
// #pragma diag_suppress = 68
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
// #pragma pop
|
||||
#include "../cuda/commands.h"
|
||||
#include "../gpu/commands.h"
|
||||
#else
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Weverything"
|
||||
|
||||
@@ -46,6 +46,7 @@ IHD float3 outer_product_sum(const float3& a) {
|
||||
}
|
||||
|
||||
// TODO: put intrinsics here.
|
||||
#if !defined(USE_ROCM)
|
||||
IHD float3 operator+(const float3& a, const float3& b) {
|
||||
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
|
||||
}
|
||||
@@ -93,6 +94,7 @@ IHD float3 operator*(const float3& a, const float3& b) {
|
||||
IHD float3 operator*(const float& a, const float3& b) {
|
||||
return b * a;
|
||||
}
|
||||
#endif //! USE_ROCM
|
||||
|
||||
INLINE DEVICE float length(const float3& v) {
|
||||
// TODO: benchmark what's faster.
|
||||
|
||||
@@ -93,7 +93,7 @@ HOST void construct(
|
||||
MALLOC(self->di_sorted_d, DrawInfo, max_num_balls);
|
||||
MALLOC(self->region_flags_d, char, max_num_balls);
|
||||
MALLOC(self->num_selected_d, size_t, 1);
|
||||
MALLOC(self->forw_info_d, float, width* height*(3 + 2 * n_track));
|
||||
MALLOC(self->forw_info_d, float, width* height * (3 + 2 * n_track));
|
||||
MALLOC(self->min_max_pixels_d, IntersectInfo, 1);
|
||||
MALLOC(self->grad_pos_d, float3, max_num_balls);
|
||||
MALLOC(self->grad_col_d, float, max_num_balls* n_channels);
|
||||
|
||||
@@ -102,6 +102,7 @@ void forward(
|
||||
self->workspace_d,
|
||||
self->workspace_size,
|
||||
stream);
|
||||
CHECKLAUNCH();
|
||||
SORT_ASCENDING_WS(
|
||||
self->min_depth_d,
|
||||
self->min_depth_sorted_d,
|
||||
@@ -111,6 +112,7 @@ void forward(
|
||||
self->workspace_d,
|
||||
self->workspace_size,
|
||||
stream);
|
||||
CHECKLAUNCH();
|
||||
SORT_ASCENDING_WS(
|
||||
self->min_depth_d,
|
||||
self->min_depth_sorted_d,
|
||||
|
||||
@@ -99,7 +99,7 @@ GLOBAL void render(
|
||||
/** Whether loading of balls is completed. */
|
||||
SHARED bool loading_done;
|
||||
/** The number of balls loaded overall (just for statistics). */
|
||||
SHARED int n_balls_loaded;
|
||||
[[maybe_unused]] SHARED int n_balls_loaded;
|
||||
/** The area this thread block covers. */
|
||||
SHARED IntersectInfo block_area;
|
||||
if (thread_block.thread_rank() == 0) {
|
||||
@@ -283,9 +283,15 @@ GLOBAL void render(
|
||||
(percent_allowed_difference > 0.f &&
|
||||
max_closest_possible_intersection > depth_threshold) ||
|
||||
tracker.get_n_hits() >= max_n_hits;
|
||||
#if defined(__CUDACC__) && defined(__HIP_PLATFORM_AMD__)
|
||||
unsigned long long warp_done = __ballot(done);
|
||||
int warp_done_bit_cnt = __popcll(warp_done);
|
||||
#else
|
||||
uint warp_done = thread_warp.ballot(done);
|
||||
int warp_done_bit_cnt = POPC(warp_done);
|
||||
#endif //__CUDACC__ && __HIP_PLATFORM_AMD__
|
||||
if (thread_warp.thread_rank() == 0)
|
||||
ATOMICADD_B(&n_pixels_done, POPC(warp_done));
|
||||
ATOMICADD_B(&n_pixels_done, warp_done_bit_cnt);
|
||||
// This sync is necessary to keep n_loaded until all threads are done with
|
||||
// painting.
|
||||
thread_block.sync();
|
||||
|
||||
@@ -37,7 +37,7 @@ inline void fill_cam_vecs(
|
||||
res->pixel_dir_y.x = pixel_dir_y.data_ptr<float>()[0];
|
||||
res->pixel_dir_y.y = pixel_dir_y.data_ptr<float>()[1];
|
||||
res->pixel_dir_y.z = pixel_dir_y.data_ptr<float>()[2];
|
||||
auto sensor_dir_z = pixel_dir_y.cross(pixel_dir_x);
|
||||
auto sensor_dir_z = pixel_dir_y.cross(pixel_dir_x, -1);
|
||||
sensor_dir_z /= sensor_dir_z.norm();
|
||||
if (right_handed) {
|
||||
sensor_dir_z *= -1.f;
|
||||
|
||||
@@ -213,8 +213,8 @@ std::tuple<size_t, size_t, bool, torch::Tensor> Renderer::arg_check(
|
||||
const float& gamma,
|
||||
const float& max_depth,
|
||||
float& min_depth,
|
||||
const c10::optional<torch::Tensor>& bg_col,
|
||||
const c10::optional<torch::Tensor>& opacity,
|
||||
const std::optional<torch::Tensor>& bg_col,
|
||||
const std::optional<torch::Tensor>& opacity,
|
||||
const float& percent_allowed_difference,
|
||||
const uint& max_n_hits,
|
||||
const uint& mode) {
|
||||
@@ -668,8 +668,8 @@ std::tuple<torch::Tensor, torch::Tensor> Renderer::forward(
|
||||
const float& gamma,
|
||||
const float& max_depth,
|
||||
float min_depth,
|
||||
const c10::optional<torch::Tensor>& bg_col,
|
||||
const c10::optional<torch::Tensor>& opacity,
|
||||
const std::optional<torch::Tensor>& bg_col,
|
||||
const std::optional<torch::Tensor>& opacity,
|
||||
const float& percent_allowed_difference,
|
||||
const uint& max_n_hits,
|
||||
const uint& mode) {
|
||||
@@ -888,14 +888,14 @@ std::tuple<torch::Tensor, torch::Tensor> Renderer::forward(
|
||||
};
|
||||
|
||||
std::tuple<
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>>
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>>
|
||||
Renderer::backward(
|
||||
const torch::Tensor& grad_im,
|
||||
const torch::Tensor& image,
|
||||
@@ -912,8 +912,8 @@ Renderer::backward(
|
||||
const float& gamma,
|
||||
const float& max_depth,
|
||||
float min_depth,
|
||||
const c10::optional<torch::Tensor>& bg_col,
|
||||
const c10::optional<torch::Tensor>& opacity,
|
||||
const std::optional<torch::Tensor>& bg_col,
|
||||
const std::optional<torch::Tensor>& opacity,
|
||||
const float& percent_allowed_difference,
|
||||
const uint& max_n_hits,
|
||||
const uint& mode,
|
||||
@@ -922,7 +922,7 @@ Renderer::backward(
|
||||
const bool& dif_rad,
|
||||
const bool& dif_cam,
|
||||
const bool& dif_opy,
|
||||
const at::optional<std::pair<uint, uint>>& dbg_pos) {
|
||||
const std::optional<std::pair<uint, uint>>& dbg_pos) {
|
||||
this->ensure_on_device(this->device_tracker.device());
|
||||
size_t batch_size;
|
||||
size_t n_points;
|
||||
@@ -1045,14 +1045,14 @@ Renderer::backward(
|
||||
}
|
||||
// Prepare the return value.
|
||||
std::tuple<
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>>
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>>
|
||||
ret;
|
||||
if (mode == 1 || (!dif_pos && !dif_col && !dif_rad && !dif_cam && !dif_opy)) {
|
||||
return ret;
|
||||
|
||||
@@ -44,21 +44,21 @@ struct Renderer {
|
||||
const float& gamma,
|
||||
const float& max_depth,
|
||||
float min_depth,
|
||||
const c10::optional<torch::Tensor>& bg_col,
|
||||
const c10::optional<torch::Tensor>& opacity,
|
||||
const std::optional<torch::Tensor>& bg_col,
|
||||
const std::optional<torch::Tensor>& opacity,
|
||||
const float& percent_allowed_difference,
|
||||
const uint& max_n_hits,
|
||||
const uint& mode);
|
||||
|
||||
std::tuple<
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>,
|
||||
at::optional<torch::Tensor>>
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>,
|
||||
std::optional<torch::Tensor>>
|
||||
backward(
|
||||
const torch::Tensor& grad_im,
|
||||
const torch::Tensor& image,
|
||||
@@ -75,8 +75,8 @@ struct Renderer {
|
||||
const float& gamma,
|
||||
const float& max_depth,
|
||||
float min_depth,
|
||||
const c10::optional<torch::Tensor>& bg_col,
|
||||
const c10::optional<torch::Tensor>& opacity,
|
||||
const std::optional<torch::Tensor>& bg_col,
|
||||
const std::optional<torch::Tensor>& opacity,
|
||||
const float& percent_allowed_difference,
|
||||
const uint& max_n_hits,
|
||||
const uint& mode,
|
||||
@@ -85,7 +85,7 @@ struct Renderer {
|
||||
const bool& dif_rad,
|
||||
const bool& dif_cam,
|
||||
const bool& dif_opy,
|
||||
const at::optional<std::pair<uint, uint>>& dbg_pos);
|
||||
const std::optional<std::pair<uint, uint>>& dbg_pos);
|
||||
|
||||
// Infrastructure.
|
||||
/**
|
||||
@@ -115,8 +115,8 @@ struct Renderer {
|
||||
const float& gamma,
|
||||
const float& max_depth,
|
||||
float& min_depth,
|
||||
const c10::optional<torch::Tensor>& bg_col,
|
||||
const c10::optional<torch::Tensor>& opacity,
|
||||
const std::optional<torch::Tensor>& bg_col,
|
||||
const std::optional<torch::Tensor>& opacity,
|
||||
const float& percent_allowed_difference,
|
||||
const uint& max_n_hits,
|
||||
const uint& mode);
|
||||
|
||||
@@ -244,8 +244,7 @@ at::Tensor RasterizeCoarseCuda(
|
||||
if (num_bins_y >= kMaxItemsPerBin || num_bins_x >= kMaxItemsPerBin) {
|
||||
std::stringstream ss;
|
||||
ss << "In RasterizeCoarseCuda got num_bins_y: " << num_bins_y
|
||||
<< ", num_bins_x: " << num_bins_x << ", "
|
||||
<< "; that's too many!";
|
||||
<< ", num_bins_x: " << num_bins_x << ", " << "; that's too many!";
|
||||
AT_ERROR(ss.str());
|
||||
}
|
||||
auto opts = elems_per_batch.options().dtype(at::kInt);
|
||||
|
||||
@@ -144,7 +144,7 @@ __device__ void CheckPixelInsideFace(
|
||||
const bool zero_face_area =
|
||||
(face_area <= kEpsilon && face_area >= -1.0f * kEpsilon);
|
||||
|
||||
if (zmax < 0 || cull_backfaces && back_face || outside_bbox ||
|
||||
if (zmax < 0 || (cull_backfaces && back_face) || outside_bbox ||
|
||||
zero_face_area) {
|
||||
return;
|
||||
}
|
||||
@@ -583,6 +583,9 @@ at::Tensor RasterizeMeshesBackwardCuda(
|
||||
at::checkAllSameType(
|
||||
c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t});
|
||||
|
||||
// This is nondeterministic because atomicAdd
|
||||
at::globalContext().alertNotDeterministic("RasterizeMeshesBackwardCuda");
|
||||
|
||||
// Set the device for the kernel launch based on the device of the input
|
||||
at::cuda::CUDAGuard device_guard(face_verts.device());
|
||||
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
|
||||
@@ -423,7 +423,8 @@ at::Tensor RasterizePointsBackwardCuda(
|
||||
at::CheckedFrom c = "RasterizePointsBackwardCuda";
|
||||
at::checkAllSameGPU(c, {points_t, idxs_t, grad_zbuf_t, grad_dists_t});
|
||||
at::checkAllSameType(c, {points_t, grad_zbuf_t, grad_dists_t});
|
||||
|
||||
// This is nondeterministic because atomicAdd
|
||||
at::globalContext().alertNotDeterministic("RasterizePointsBackwardCuda");
|
||||
// Set the device for the kernel launch based on the device of the input
|
||||
at::cuda::CUDAGuard device_guard(points.device());
|
||||
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
|
||||
@@ -155,7 +155,7 @@ at::Tensor FarthestPointSamplingCuda(
|
||||
|
||||
// Max possible threads per block
|
||||
const int MAX_THREADS_PER_BLOCK = 1024;
|
||||
const size_t threads = max(min(1 << points_pow_2, MAX_THREADS_PER_BLOCK), 1);
|
||||
const size_t threads = max(min(1 << points_pow_2, MAX_THREADS_PER_BLOCK), 2);
|
||||
|
||||
// Create the accessors
|
||||
auto points_a = points.packed_accessor64<float, 3, at::RestrictPtrTraits>();
|
||||
@@ -215,10 +215,6 @@ at::Tensor FarthestPointSamplingCuda(
|
||||
FarthestPointSamplingKernel<2><<<threads, threads, shared_mem, stream>>>(
|
||||
points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a);
|
||||
break;
|
||||
case 1:
|
||||
FarthestPointSamplingKernel<1><<<threads, threads, shared_mem, stream>>>(
|
||||
points_a, lengths_a, K_a, idxs_a, min_point_dist_a, start_idxs_a);
|
||||
break;
|
||||
default:
|
||||
FarthestPointSamplingKernel<1024>
|
||||
<<<blocks, threads, shared_mem, stream>>>(
|
||||
|
||||
@@ -18,6 +18,8 @@ const auto vEpsilon = 1e-8;
|
||||
|
||||
// Common functions and operators for float2.
|
||||
|
||||
// Complex arithmetic is already defined for AMD.
|
||||
#if !defined(USE_ROCM)
|
||||
__device__ inline float2 operator-(const float2& a, const float2& b) {
|
||||
return make_float2(a.x - b.x, a.y - b.y);
|
||||
}
|
||||
@@ -41,6 +43,7 @@ __device__ inline float2 operator*(const float2& a, const float2& b) {
|
||||
__device__ inline float2 operator*(const float a, const float2& b) {
|
||||
return make_float2(a * b.x, a * b.y);
|
||||
}
|
||||
#endif
|
||||
|
||||
__device__ inline float FloatMin3(const float a, const float b, const float c) {
|
||||
return fminf(a, fminf(b, c));
|
||||
|
||||
@@ -23,37 +23,51 @@ WarpReduceMin(scalar_t* min_dists, int64_t* min_idxs, const size_t tid) {
|
||||
min_idxs[tid] = min_idxs[tid + 32];
|
||||
min_dists[tid] = min_dists[tid + 32];
|
||||
}
|
||||
// AMD does not use explicit syncwarp and instead automatically inserts memory
|
||||
// fences during compilation.
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
// s = 16
|
||||
if (min_dists[tid] > min_dists[tid + 16]) {
|
||||
min_idxs[tid] = min_idxs[tid + 16];
|
||||
min_dists[tid] = min_dists[tid + 16];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
// s = 8
|
||||
if (min_dists[tid] > min_dists[tid + 8]) {
|
||||
min_idxs[tid] = min_idxs[tid + 8];
|
||||
min_dists[tid] = min_dists[tid + 8];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
// s = 4
|
||||
if (min_dists[tid] > min_dists[tid + 4]) {
|
||||
min_idxs[tid] = min_idxs[tid + 4];
|
||||
min_dists[tid] = min_dists[tid + 4];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
// s = 2
|
||||
if (min_dists[tid] > min_dists[tid + 2]) {
|
||||
min_idxs[tid] = min_idxs[tid + 2];
|
||||
min_dists[tid] = min_dists[tid + 2];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
// s = 1
|
||||
if (min_dists[tid] > min_dists[tid + 1]) {
|
||||
min_idxs[tid] = min_idxs[tid + 1];
|
||||
min_dists[tid] = min_dists[tid + 1];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename scalar_t>
|
||||
@@ -65,30 +79,42 @@ __device__ void WarpReduceMax(
|
||||
dists[tid] = dists[tid + 32];
|
||||
dists_idx[tid] = dists_idx[tid + 32];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
if (dists[tid] < dists[tid + 16]) {
|
||||
dists[tid] = dists[tid + 16];
|
||||
dists_idx[tid] = dists_idx[tid + 16];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
if (dists[tid] < dists[tid + 8]) {
|
||||
dists[tid] = dists[tid + 8];
|
||||
dists_idx[tid] = dists_idx[tid + 8];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
if (dists[tid] < dists[tid + 4]) {
|
||||
dists[tid] = dists[tid + 4];
|
||||
dists_idx[tid] = dists_idx[tid + 4];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
if (dists[tid] < dists[tid + 2]) {
|
||||
dists[tid] = dists[tid + 2];
|
||||
dists_idx[tid] = dists_idx[tid + 2];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
if (dists[tid] < dists[tid + 1]) {
|
||||
dists[tid] = dists[tid + 1];
|
||||
dists_idx[tid] = dists_idx[tid + 1];
|
||||
}
|
||||
#if !defined(USE_ROCM)
|
||||
__syncwarp();
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from .r2n2 import BlenderCamera, collate_batched_R2N2, R2N2, render_cubified_voxels
|
||||
from .shapenet import ShapeNetCore
|
||||
from .utils import collate_batched_meshes
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
from .r2n2 import R2N2
|
||||
from .utils import BlenderCamera, collate_batched_R2N2, render_cubified_voxels
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
# pyre-unsafe
|
||||
|
||||
import json
|
||||
import warnings
|
||||
from os import path
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user