pytorch3d/.circleci/config.in.yml
Jeremy Reizenstein 0fecb2ddb9 pytorch version in package name
Summary:
Pytorch 1.5 is coming soon. I imagine we will want the ability to upload conda packages for pytorch3d to anaconda cloud for each of pytorch 1.4 and pytorch 1.5. This change adds the dependent pytorch version to the name of the conda package to make that feasible.

As an example, a built package after this change will have a name like `linux-64/pytorch3d-0.1.1-py38_cu100_pyt14.tar.bz2`, instead of simply `linux-64/pytorch3d-0.1.1-py38_cu100.tar.bz2`.

Also some tiny cleanup of circleci config.

Other alternatives: (1) forcing users to update pytorch and pytorch3d together, (2) trying to get away with one build for multiple pytorch versions.

Reviewed By: nikhilaravi

Differential Revision: D20599039

fbshipit-source-id: 20164eda4a5141afed47b3596e559950d796ffc9
2020-04-07 09:42:31 -07:00

229 lines
7.6 KiB
YAML

version: 2.1
#examples:
#https://github.com/facebookresearch/ParlAI/blob/master/.circleci/config.yml
#https://github.com/facebookresearch/hydra/blob/master/.circleci/config.yml
#https://github.com/facebookresearch/habitat-api/blob/master/.circleci/config.yml
#drive tests with nox or tox or pytest?
# -------------------------------------------------------------------------------------
# environments where we run our jobs
# -------------------------------------------------------------------------------------
setupcuda: &setupcuda
run:
name: Setup CUDA
working_directory: ~/
command: |
# download and install nvidia drivers, cuda, etc
wget --no-verbose --no-clobber -P ~/nvidia-downloads 'https://s3.amazonaws.com/ossci-linux/nvidia_driver/NVIDIA-Linux-x86_64-430.40.run'
wget --no-verbose --no-clobber -P ~/nvidia-downloads http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run
sudo /bin/bash ~/nvidia-downloads/NVIDIA-Linux-x86_64-430.40.run --no-drm -q --ui=none
sudo sh ~/nvidia-downloads/cuda_10.2.89_440.33.01_linux.run --silent
echo "Done installing CUDA."
pyenv versions
nvidia-smi
pyenv global 3.7.0
gpu: &gpu
environment:
CUDA_VERSION: "10.2"
machine:
image: default
resource_class: gpu.medium # tesla m60
binary_common: &binary_common
parameters:
# Edit these defaults to do a release`
build_version:
description: "version number of release binary; by default, build a nightly"
type: string
default: ""
pytorch_version:
description: "PyTorch version to build against; by default, use a nightly"
type: string
default: ""
# Don't edit these
python_version:
description: "Python version to build against (e.g., 3.7)"
type: string
cu_version:
description: "CUDA version to build against, in CU format (e.g., cpu or cu100)"
type: string
wheel_docker_image:
description: "Wheel only: what docker image to use"
type: string
default: "pytorch/manylinux-cuda101"
environment:
PYTHON_VERSION: << parameters.python_version >>
BUILD_VERSION: << parameters.build_version >>
PYTORCH_VERSION: << parameters.pytorch_version >>
CU_VERSION: << parameters.cu_version >>
jobs:
main:
<<: *gpu
machine:
image: ubuntu-1604:201903-01
steps:
- checkout
- <<: *setupcuda
- run: pip3 install --progress-bar off wheel matplotlib 'pillow<7'
- run: pip3 install --progress-bar off torch torchvision
# - run: conda create -p ~/conda_env python=3.7 numpy
# - run: conda activate ~/conda_env
# - run: conda install -c pytorch pytorch torchvision
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64 python3 setup.py build_ext --inplace
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64 python -m unittest discover -v -s tests
- run: python3 setup.py bdist_wheel
binary_linux_wheel:
<<: *binary_common
docker:
- image: << parameters.wheel_docker_image >>
resource_class: 2xlarge+
steps:
- checkout
- run: packaging/build_wheel.sh
- store_artifacts:
path: dist
- persist_to_workspace:
root: dist
paths:
- "*"
binary_linux_conda:
<<: *binary_common
docker:
- image: "pytorch/conda-cuda"
resource_class: 2xlarge+
steps:
- checkout
# This is building with cuda but no gpu present,
# so we aren't running the tests.
- run: TEST_FLAG=--no-test packaging/build_conda.sh
- store_artifacts:
path: /opt/conda/conda-bld/linux-64
- persist_to_workspace:
root: /opt/conda/conda-bld/linux-64
paths:
- "*"
binary_linux_conda_cuda:
<<: *binary_common
machine:
image: ubuntu-1604:201903-01
resource_class: gpu.medium
steps:
- checkout
- run:
name: Setup environment
command: |
set -e
curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
curl -L https://dl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
sudo apt-get update
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
export DOCKER_VERSION="5:19.03.2~3-0~ubuntu-xenial"
sudo apt-get install docker-ce=${DOCKER_VERSION} docker-ce-cli=${DOCKER_VERSION} containerd.io=1.2.6-3
# Add the package repositories
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
export NVIDIA_CONTAINER_VERSION="1.0.3-1"
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit=${NVIDIA_CONTAINER_VERSION}
sudo systemctl restart docker
DRIVER_FN="NVIDIA-Linux-x86_64-410.104.run"
wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
nvidia-smi
- run:
name: Pull docker image
command: |
set -e
export DOCKER_IMAGE=pytorch/conda-cuda
echo Pulling docker image $DOCKER_IMAGE
docker pull $DOCKER_IMAGE >/dev/null
- run:
name: Build and run tests
command: |
set -e
cd ${HOME}/project/
export DOCKER_IMAGE=pytorch/conda-cuda
export VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e UNICODE_ABI -e CU_VERSION"
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${DOCKER_IMAGE} ./packaging/build_conda.sh
binary_macos_wheel:
<<: *binary_common
macos:
xcode: "9.0"
steps:
- checkout
- run:
# Cannot easily deduplicate this as source'ing activate
# will set environment variables which we need to propagate
# to build_wheel.sh
command: |
curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
sh conda.sh -b
source $HOME/miniconda3/bin/activate
packaging/build_wheel.sh
- store_artifacts:
path: dist
workflows:
version: 2
build_and_test:
jobs:
- main
{{workflows()}}
- binary_linux_conda_cuda:
name: testrun_conda_cuda_py3.7_cu100
python_version: "3.7"
pytorch_version: "1.4"
cu_version: "cu100"
- binary_macos_wheel:
cu_version: cpu
name: macos_wheel_py3.6_cpu
python_version: '3.6'
pytorch_version: '1.4'
- binary_macos_wheel:
cu_version: cpu
name: macos_wheel_py3.7_cpu
python_version: '3.7'
pytorch_version: '1.4'
- binary_macos_wheel:
cu_version: cpu
name: macos_wheel_py3.8_cpu
python_version: '3.8'
pytorch_version: '1.4'