Initial commit

fbshipit-source-id: ad58e416e3ceeca85fae0583308968d04e78fe0d
This commit is contained in:
facebook-github-bot
2020-01-23 11:53:41 -08:00
commit dbf06b504b
211 changed files with 47362 additions and 0 deletions

6
.circleci/check.sh Normal file
View File

@@ -0,0 +1,6 @@
#!/bin/bash -e
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# Run this script before committing config.yml to verify it is valid yaml.
python -c 'import yaml; yaml.safe_load(open("config.yml"))' && echo OK

199
.circleci/config.in.yml Normal file
View File

@@ -0,0 +1,199 @@
version: 2.1
#examples:
#https://github.com/facebookresearch/ParlAI/blob/master/.circleci/config.yml
#https://github.com/facebookresearch/hydra/blob/master/.circleci/config.yml
#https://github.com/facebookresearch/habitat-api/blob/master/.circleci/config.yml
#drive tests with nox or tox or pytest?
# -------------------------------------------------------------------------------------
# environments where we run our jobs
# -------------------------------------------------------------------------------------
setupcuda: &setupcuda
run:
name: Setup CUDA
working_directory: ~/
command: |
# download and install nvidia drivers, cuda, etc
wget --no-verbose --no-clobber -P ~/nvidia-downloads 'https://s3.amazonaws.com/ossci-linux/nvidia_driver/NVIDIA-Linux-x86_64-430.40.run'
wget --no-verbose --no-clobber -P ~/nvidia-downloads http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run
sudo /bin/bash ~/nvidia-downloads/NVIDIA-Linux-x86_64-430.40.run --no-drm -q --ui=none
sudo sh ~/nvidia-downloads/cuda_10.2.89_440.33.01_linux.run --silent
echo "Done installing CUDA."
pyenv versions
nvidia-smi
pyenv global 3.7.0
gpu: &gpu
environment:
CUDA_VERSION: "10.2"
machine:
image: default
resource_class: gpu.medium # tesla m60
binary_common: &binary_common
parameters:
# Edit these defaults to do a release`
build_version:
description: "version number of release binary; by default, build a nightly"
type: string
default: ""
pytorch_version:
description: "PyTorch version to build against; by default, use a nightly"
type: string
default: ""
# Don't edit these
python_version:
description: "Python version to build against (e.g., 3.7)"
type: string
cu_version:
description: "CUDA version to build against, in CU format (e.g., cpu or cu100)"
type: string
wheel_docker_image:
description: "Wheel only: what docker image to use"
type: string
default: "pytorch/manylinux-cuda101"
environment:
PYTHON_VERSION: << parameters.python_version >>
BUILD_VERSION: << parameters.build_version >>
PYTORCH_VERSION: << parameters.pytorch_version >>
CU_VERSION: << parameters.cu_version >>
jobs:
main:
<<: *gpu
machine:
image: ubuntu-1604:201903-01
steps:
- checkout
- <<: *setupcuda
- run: pip3 install --progress-bar off wheel matplotlib 'pillow<7'
- run: pip3 install --progress-bar off torch torchvision
# - run: conda create -p ~/conda_env python=3.7 numpy
# - run: conda activate ~/conda_env
# - run: conda install -c pytorch pytorch torchvision
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64 python3 setup.py build_ext --inplace
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64 python -m unittest discover -v -s tests
- run: python3 setup.py bdist_wheel
binary_linux_wheel:
<<: *binary_common
docker:
- image: << parameters.wheel_docker_image >>
resource_class: 2xlarge+
steps:
- checkout
- run: packaging/build_wheel.sh
- store_artifacts:
path: dist
- persist_to_workspace:
root: dist
paths:
- "*"
binary_linux_conda:
<<: *binary_common
docker:
- image: "pytorch/conda-cuda"
resource_class: 2xlarge+
steps:
- checkout
# This is building with cuda but no gpu present,
# so we aren't running the tests.
- run: TEST_FLAG=--no-test packaging/build_conda.sh
- store_artifacts:
path: /opt/conda/conda-bld/linux-64
- persist_to_workspace:
root: /opt/conda/conda-bld/linux-64
paths:
- "*"
binary_linux_conda_cuda:
<<: *binary_common
machine:
image: ubuntu-1604:201903-01
resource_class: gpu.medium
steps:
- checkout
- run:
name: Setup environment
command: |
set -e
curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
curl -L https://dl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
sudo apt-get update
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
export DOCKER_VERSION="5:19.03.2~3-0~ubuntu-xenial"
sudo apt-get install docker-ce=${DOCKER_VERSION} docker-ce-cli=${DOCKER_VERSION} containerd.io=1.2.6-3
# Add the package repositories
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
export NVIDIA_CONTAINER_VERSION="1.0.3-1"
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit=${NVIDIA_CONTAINER_VERSION}
sudo systemctl restart docker
DRIVER_FN="NVIDIA-Linux-x86_64-410.104.run"
wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
nvidia-smi
- run:
name: Pull docker image
command: |
set -e
export DOCKER_IMAGE=pytorch/conda-cuda
echo Pulling docker image $DOCKER_IMAGE
docker pull $DOCKER_IMAGE >/dev/null
- run:
name: Build and run tests
command: |
set -e
cd ${HOME}/project/
export DOCKER_IMAGE=pytorch/conda-cuda
export VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e UNICODE_ABI -e CU_VERSION"
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${DOCKER_IMAGE} ./packaging/build_conda.sh
workflows:
version: 2
build_and_test:
jobs:
- main
{{workflows()}}
- binary_linux_conda:
cu_version: cu101
name: binary_linux_conda_py3.7_cu101
python_version: '3.7'
- binary_linux_conda_cuda:
name: testrun_conda_cuda_py3.7_cu100
python_version: "3.7"
pytorch_version: "1.4"
cu_version: "cu100"

258
.circleci/config.yml Normal file
View File

@@ -0,0 +1,258 @@
version: 2.1
#examples:
#https://github.com/facebookresearch/ParlAI/blob/master/.circleci/config.yml
#https://github.com/facebookresearch/hydra/blob/master/.circleci/config.yml
#https://github.com/facebookresearch/habitat-api/blob/master/.circleci/config.yml
#drive tests with nox or tox or pytest?
# -------------------------------------------------------------------------------------
# environments where we run our jobs
# -------------------------------------------------------------------------------------
setupcuda: &setupcuda
run:
name: Setup CUDA
working_directory: ~/
command: |
# download and install nvidia drivers, cuda, etc
wget --no-verbose --no-clobber -P ~/nvidia-downloads 'https://s3.amazonaws.com/ossci-linux/nvidia_driver/NVIDIA-Linux-x86_64-430.40.run'
wget --no-verbose --no-clobber -P ~/nvidia-downloads http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run
sudo /bin/bash ~/nvidia-downloads/NVIDIA-Linux-x86_64-430.40.run --no-drm -q --ui=none
sudo sh ~/nvidia-downloads/cuda_10.2.89_440.33.01_linux.run --silent
echo "Done installing CUDA."
pyenv versions
nvidia-smi
pyenv global 3.7.0
gpu: &gpu
environment:
CUDA_VERSION: "10.2"
machine:
image: default
resource_class: gpu.medium # tesla m60
binary_common: &binary_common
parameters:
# Edit these defaults to do a release`
build_version:
description: "version number of release binary; by default, build a nightly"
type: string
default: ""
pytorch_version:
description: "PyTorch version to build against; by default, use a nightly"
type: string
default: ""
# Don't edit these
python_version:
description: "Python version to build against (e.g., 3.7)"
type: string
cu_version:
description: "CUDA version to build against, in CU format (e.g., cpu or cu100)"
type: string
wheel_docker_image:
description: "Wheel only: what docker image to use"
type: string
default: "pytorch/manylinux-cuda101"
environment:
PYTHON_VERSION: << parameters.python_version >>
BUILD_VERSION: << parameters.build_version >>
PYTORCH_VERSION: << parameters.pytorch_version >>
CU_VERSION: << parameters.cu_version >>
jobs:
main:
<<: *gpu
machine:
image: ubuntu-1604:201903-01
steps:
- checkout
- <<: *setupcuda
- run: pip3 install --progress-bar off wheel matplotlib 'pillow<7'
- run: pip3 install --progress-bar off torch torchvision
# - run: conda create -p ~/conda_env python=3.7 numpy
# - run: conda activate ~/conda_env
# - run: conda install -c pytorch pytorch torchvision
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64 python3 setup.py build_ext --inplace
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64 python -m unittest discover -v -s tests
- run: python3 setup.py bdist_wheel
binary_linux_wheel:
<<: *binary_common
docker:
- image: << parameters.wheel_docker_image >>
resource_class: 2xlarge+
steps:
- checkout
- run: packaging/build_wheel.sh
- store_artifacts:
path: dist
- persist_to_workspace:
root: dist
paths:
- "*"
binary_linux_conda:
<<: *binary_common
docker:
- image: "pytorch/conda-cuda"
resource_class: 2xlarge+
steps:
- checkout
# This is building with cuda but no gpu present,
# so we aren't running the tests.
- run: TEST_FLAG=--no-test packaging/build_conda.sh
- store_artifacts:
path: /opt/conda/conda-bld/linux-64
- persist_to_workspace:
root: /opt/conda/conda-bld/linux-64
paths:
- "*"
binary_linux_conda_cuda:
<<: *binary_common
machine:
image: ubuntu-1604:201903-01
resource_class: gpu.medium
steps:
- checkout
- run:
name: Setup environment
command: |
set -e
curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
curl -L https://dl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
sudo apt-get update
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
export DOCKER_VERSION="5:19.03.2~3-0~ubuntu-xenial"
sudo apt-get install docker-ce=${DOCKER_VERSION} docker-ce-cli=${DOCKER_VERSION} containerd.io=1.2.6-3
# Add the package repositories
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
export NVIDIA_CONTAINER_VERSION="1.0.3-1"
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit=${NVIDIA_CONTAINER_VERSION}
sudo systemctl restart docker
DRIVER_FN="NVIDIA-Linux-x86_64-410.104.run"
wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
nvidia-smi
- run:
name: Pull docker image
command: |
set -e
export DOCKER_IMAGE=pytorch/conda-cuda
echo Pulling docker image $DOCKER_IMAGE
docker pull $DOCKER_IMAGE >/dev/null
- run:
name: Build and run tests
command: |
set -e
cd ${HOME}/project/
export DOCKER_IMAGE=pytorch/conda-cuda
export VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e UNICODE_ABI -e CU_VERSION"
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${DOCKER_IMAGE} ./packaging/build_conda.sh
workflows:
version: 2
build_and_test:
jobs:
- main
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu92
name: binary_linux_conda_py3.6_cu92
python_version: '3.6'
pytorch_version: '1.4'
wheel_docker_image: pytorch/manylinux-cuda92
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu100
name: binary_linux_conda_py3.6_cu100
python_version: '3.6'
pytorch_version: '1.4'
wheel_docker_image: pytorch/manylinux-cuda100
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu101
name: binary_linux_conda_py3.6_cu101
python_version: '3.6'
pytorch_version: '1.4'
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu92
name: binary_linux_conda_py3.7_cu92
python_version: '3.7'
pytorch_version: '1.4'
wheel_docker_image: pytorch/manylinux-cuda92
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu100
name: binary_linux_conda_py3.7_cu100
python_version: '3.7'
pytorch_version: '1.4'
wheel_docker_image: pytorch/manylinux-cuda100
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu101
name: binary_linux_conda_py3.7_cu101
python_version: '3.7'
pytorch_version: '1.4'
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu92
name: binary_linux_conda_py3.8_cu92
python_version: '3.8'
pytorch_version: '1.4'
wheel_docker_image: pytorch/manylinux-cuda92
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu100
name: binary_linux_conda_py3.8_cu100
python_version: '3.8'
pytorch_version: '1.4'
wheel_docker_image: pytorch/manylinux-cuda100
- binary_linux_conda:
build_version: 0.1.0
cu_version: cu101
name: binary_linux_conda_py3.8_cu101
python_version: '3.8'
pytorch_version: '1.4'
- binary_linux_conda:
cu_version: cu101
name: binary_linux_conda_py3.7_cu101
python_version: '3.7'
- binary_linux_conda_cuda:
name: testrun_conda_cuda_py3.7_cu100
python_version: "3.7"
pytorch_version: "1.4"
cu_version: "cu100"

119
.circleci/regenerate.py Normal file
View File

@@ -0,0 +1,119 @@
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
This script is adapted from the torchvision one.
There is no python2.7 nor macos.
TODO: python 3.8 when pytorch 1.4.
"""
import os.path
import jinja2
import yaml
def workflows(prefix="", filter_branch=None, upload=False, indentation=6):
w = []
# add "wheel" here for pypi
for btype in ["conda"]:
for python_version in ["3.6", "3.7", "3.8"]:
for cu_version in ["cu92", "cu100", "cu101"]:
w += workflow_pair(
btype=btype,
python_version=python_version,
cu_version=cu_version,
prefix=prefix,
upload=upload,
filter_branch=filter_branch,
)
return indent(indentation, w)
def workflow_pair(
*, btype, python_version, cu_version, prefix="", upload=False, filter_branch
):
w = []
base_workflow_name = (
f"{prefix}binary_linux_{btype}_py{python_version}_{cu_version}"
)
w.append(
generate_base_workflow(
base_workflow_name=base_workflow_name,
python_version=python_version,
cu_version=cu_version,
btype=btype,
filter_branch=filter_branch,
)
)
if upload:
w.append(
generate_upload_workflow(
base_workflow_name=base_workflow_name,
btype=btype,
cu_version=cu_version,
filter_branch=filter_branch,
)
)
return w
def generate_base_workflow(
*, base_workflow_name, python_version, cu_version, btype, filter_branch=None
):
d = {
"name": base_workflow_name,
"python_version": python_version,
"cu_version": cu_version,
"build_version": "0.1.0",
"pytorch_version": "1.4",
}
if cu_version == "cu92":
d["wheel_docker_image"] = "pytorch/manylinux-cuda92"
elif cu_version == "cu100":
d["wheel_docker_image"] = "pytorch/manylinux-cuda100"
if filter_branch is not None:
d["filters"] = {"branches": {"only": filter_branch}}
return {f"binary_linux_{btype}": d}
def generate_upload_workflow(
*, base_workflow_name, btype, cu_version, filter_branch
):
d = {
"name": f"{base_workflow_name}_upload",
"context": "org-member",
"requires": [base_workflow_name],
}
if btype == "wheel":
d["subfolder"] = cu_version + "/"
if filter_branch is not None:
d["filters"] = {"branches": {"only": filter_branch}}
return {f"binary_{btype}_upload": d}
def indent(indentation, data_list):
return ("\n" + " " * indentation).join(
yaml.dump(data_list, default_flow_style=False).splitlines()
)
if __name__ == "__main__":
d = os.path.dirname(__file__)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(d), lstrip_blocks=True, autoescape=False
)
with open(os.path.join(d, "config.yml"), "w") as f:
f.write(env.get_template("config.in.yml").render(workflows=workflows))