pytorch3d/.circleci/config.in.yml
Jeremy Reizenstein 5fbdb99aec builds for pytorch 1.10.0
Summary:
Add builds corresponding to the new pytorch 1.10.0. We omit CUDA 11.3 testing because it fails with current hardware, and omit the main build too for the moment.

Also move to the newer GPU circle CI executors.

Reviewed By: gkioxari

Differential Revision: D32335934

fbshipit-source-id: 416d92a8eecd06ef7fc742664a5f2d46f93415f8
2021-11-11 02:03:37 -08:00

250 lines
8.4 KiB
YAML

version: 2.1
#examples:
#https://github.com/facebookresearch/ParlAI/blob/master/.circleci/config.yml
#https://github.com/facebookresearch/hydra/blob/master/.circleci/config.yml
#https://github.com/facebookresearch/habitat-api/blob/master/.circleci/config.yml
#drive tests with nox or tox or pytest?
# -------------------------------------------------------------------------------------
# environments where we run our jobs
# -------------------------------------------------------------------------------------
setupcuda: &setupcuda
run:
name: Setup CUDA
working_directory: ~/
command: |
# download and install nvidia drivers, cuda, etc
wget --no-verbose --no-clobber -P ~/nvidia-downloads https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.19.01_linux.run
sudo sh ~/nvidia-downloads/cuda_11.3.1_465.19.01_linux.run --silent
echo "Done installing CUDA."
pyenv versions
nvidia-smi
pyenv global 3.9.1
binary_common: &binary_common
parameters:
# Edit these defaults to do a release`
build_version:
description: "version number of release binary; by default, build a nightly"
type: string
default: ""
pytorch_version:
description: "PyTorch version to build against; by default, use a nightly"
type: string
default: ""
# Don't edit these
python_version:
description: "Python version to build against (e.g., 3.7)"
type: string
cu_version:
description: "CUDA version to build against, in CU format (e.g., cpu or cu100)"
type: string
wheel_docker_image:
description: "Wheel only: what docker image to use"
type: string
default: "pytorch/manylinux-cuda101"
conda_docker_image:
description: "what docker image to use for docker"
type: string
default: "pytorch/conda-cuda"
environment:
PYTHON_VERSION: << parameters.python_version >>
BUILD_VERSION: << parameters.build_version >>
PYTORCH_VERSION: << parameters.pytorch_version >>
CU_VERSION: << parameters.cu_version >>
TESTRUN_DOCKER_IMAGE: << parameters.conda_docker_image >>
jobs:
main:
environment:
CUDA_VERSION: "11.3"
resource_class: gpu.nvidia.small.multi
machine:
image: ubuntu-2004:202101-01
steps:
- checkout
- <<: *setupcuda
- run: pip3 install --progress-bar off imageio wheel matplotlib 'pillow<7'
- run: pip3 install --progress-bar off torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
# - run: conda create -p ~/conda_env python=3.7 numpy
# - run: conda activate ~/conda_env
# - run: conda install -c pytorch pytorch torchvision
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/iopath'
- run:
name: build
command: |
export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64
python3 setup.py build_ext --inplace
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64 python -m unittest discover -v -s tests
- run: python3 setup.py bdist_wheel
binary_linux_wheel:
<<: *binary_common
docker:
- image: << parameters.wheel_docker_image >>
auth:
username: $DOCKERHUB_USERNAME
password: $DOCKERHUB_TOKEN
resource_class: 2xlarge+
steps:
- checkout
- run: MAX_JOBS=15 packaging/build_wheel.sh
- store_artifacts:
path: dist
- persist_to_workspace:
root: dist
paths:
- "*"
binary_linux_conda:
<<: *binary_common
docker:
- image: "<< parameters.conda_docker_image >>"
auth:
username: $DOCKERHUB_USERNAME
password: $DOCKERHUB_TOKEN
resource_class: 2xlarge+
steps:
- checkout
# This is building with cuda but no gpu present,
# so we aren't running the tests.
- run:
name: build
no_output_timeout: 20m
command: MAX_JOBS=15 TEST_FLAG=--no-test packaging/build_conda.sh
- store_artifacts:
path: /opt/conda/conda-bld/linux-64
- persist_to_workspace:
root: /opt/conda/conda-bld/linux-64
paths:
- "*"
binary_linux_conda_cuda:
<<: *binary_common
machine:
image: ubuntu-1604:201903-01
resource_class: gpu.nvidia.small.multi
steps:
- checkout
- run:
name: Setup environment
command: |
set -e
curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
curl -L https://dl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
sudo apt-get update
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
export DOCKER_VERSION="5:19.03.2~3-0~ubuntu-xenial"
sudo apt-get install docker-ce=${DOCKER_VERSION} docker-ce-cli=${DOCKER_VERSION} containerd.io=1.2.6-3
# Add the package repositories
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
export NVIDIA_CONTAINER_VERSION="1.0.3-1"
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit=${NVIDIA_CONTAINER_VERSION}
sudo systemctl restart docker
DRIVER_FN="NVIDIA-Linux-x86_64-460.84.run"
wget "https://us.download.nvidia.com/XFree86/Linux-x86_64/460.84/$DRIVER_FN"
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
nvidia-smi
- run:
name: Pull docker image
command: |
set -e
{ docker login -u="$DOCKERHUB_USERNAME" -p="$DOCKERHUB_TOKEN" ; } 2> /dev/null
echo Pulling docker image $TESTRUN_DOCKER_IMAGE
docker pull $TESTRUN_DOCKER_IMAGE
- run:
name: Build and run tests
no_output_timeout: 20m
command: |
set -e
cd ${HOME}/project/
export JUST_TESTRUN=1
VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION -e JUST_TESTRUN"
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${TESTRUN_DOCKER_IMAGE} ./packaging/build_conda.sh
binary_macos_wheel:
<<: *binary_common
macos:
xcode: "12.0"
steps:
- checkout
- run:
# Cannot easily deduplicate this as source'ing activate
# will set environment variables which we need to propagate
# to build_wheel.sh
command: |
curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
sh conda.sh -b
source $HOME/miniconda3/bin/activate
packaging/build_wheel.sh
- store_artifacts:
path: dist
workflows:
version: 2
build_and_test:
jobs:
# - main:
# context: DOCKERHUB_TOKEN
{{workflows()}}
- binary_linux_conda_cuda:
name: testrun_conda_cuda_py37_cu102_pyt170
context: DOCKERHUB_TOKEN
python_version: "3.7"
pytorch_version: '1.7.0'
cu_version: "cu102"
- binary_macos_wheel:
cu_version: cpu
name: macos_wheel_py36_cpu
python_version: '3.6'
pytorch_version: '1.9.0'
- binary_macos_wheel:
cu_version: cpu
name: macos_wheel_py37_cpu
python_version: '3.7'
pytorch_version: '1.9.0'
- binary_macos_wheel:
cu_version: cpu
name: macos_wheel_py38_cpu
python_version: '3.8'
pytorch_version: '1.9.0'
- binary_macos_wheel:
cu_version: cpu
name: macos_wheel_py39_cpu
python_version: '3.9'
pytorch_version: '1.9.0'