mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-08-02 03:42:50 +08:00
pytorch 1.7 support
Summary: CircleCI build configuration to support pytorch1.7, including binaries with cuda 11.0. Note that the default torch on pip is still on cuda 10.2, so I have left the `main` (non conda build) on cuda 10.2 with the existing driver. Reviewed By: gkioxari Differential Revision: D24623523 fbshipit-source-id: 59cfa1be06c16225f0f12ed336c07220e8a9a511
This commit is contained in:
parent
fdcf368708
commit
6f4697bc1b
@ -18,9 +18,7 @@ setupcuda: &setupcuda
|
||||
working_directory: ~/
|
||||
command: |
|
||||
# download and install nvidia drivers, cuda, etc
|
||||
wget --no-verbose --no-clobber -P ~/nvidia-downloads 'https://s3.amazonaws.com/ossci-linux/nvidia_driver/NVIDIA-Linux-x86_64-430.40.run'
|
||||
wget --no-verbose --no-clobber -P ~/nvidia-downloads http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run
|
||||
sudo /bin/bash ~/nvidia-downloads/NVIDIA-Linux-x86_64-430.40.run --no-drm -q --ui=none
|
||||
sudo sh ~/nvidia-downloads/cuda_10.2.89_440.33.01_linux.run --silent
|
||||
echo "Done installing CUDA."
|
||||
pyenv versions
|
||||
@ -157,8 +155,8 @@ jobs:
|
||||
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit=${NVIDIA_CONTAINER_VERSION}
|
||||
sudo systemctl restart docker
|
||||
|
||||
DRIVER_FN="NVIDIA-Linux-x86_64-440.59.run"
|
||||
wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
|
||||
DRIVER_FN="NVIDIA-Linux-x86_64-450.80.02.run"
|
||||
wget "https://us.download.nvidia.com/XFree86/Linux-x86_64/450.80.02/$DRIVER_FN"
|
||||
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
|
||||
nvidia-smi
|
||||
|
||||
@ -178,7 +176,7 @@ jobs:
|
||||
cd ${HOME}/project/
|
||||
|
||||
export DOCKER_IMAGE=pytorch/conda-cuda
|
||||
export VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e UNICODE_ABI -e CU_VERSION"
|
||||
export VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION"
|
||||
|
||||
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${DOCKER_IMAGE} ./packaging/build_conda.sh
|
||||
|
||||
@ -216,6 +214,11 @@ workflows:
|
||||
python_version: "3.7"
|
||||
pytorch_version: '1.6.0'
|
||||
cu_version: "cu102"
|
||||
- binary_linux_conda_cuda:
|
||||
name: testrun_conda_cuda_py37_cu110_pyt170
|
||||
python_version: "3.7"
|
||||
pytorch_version: '1.7.0'
|
||||
cu_version: "cu110"
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py36_cpu
|
||||
|
@ -18,9 +18,7 @@ setupcuda: &setupcuda
|
||||
working_directory: ~/
|
||||
command: |
|
||||
# download and install nvidia drivers, cuda, etc
|
||||
wget --no-verbose --no-clobber -P ~/nvidia-downloads 'https://s3.amazonaws.com/ossci-linux/nvidia_driver/NVIDIA-Linux-x86_64-430.40.run'
|
||||
wget --no-verbose --no-clobber -P ~/nvidia-downloads http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run
|
||||
sudo /bin/bash ~/nvidia-downloads/NVIDIA-Linux-x86_64-430.40.run --no-drm -q --ui=none
|
||||
sudo sh ~/nvidia-downloads/cuda_10.2.89_440.33.01_linux.run --silent
|
||||
echo "Done installing CUDA."
|
||||
pyenv versions
|
||||
@ -157,8 +155,8 @@ jobs:
|
||||
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit=${NVIDIA_CONTAINER_VERSION}
|
||||
sudo systemctl restart docker
|
||||
|
||||
DRIVER_FN="NVIDIA-Linux-x86_64-440.59.run"
|
||||
wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
|
||||
DRIVER_FN="NVIDIA-Linux-x86_64-450.80.02.run"
|
||||
wget "https://us.download.nvidia.com/XFree86/Linux-x86_64/450.80.02/$DRIVER_FN"
|
||||
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
|
||||
nvidia-smi
|
||||
|
||||
@ -178,7 +176,7 @@ jobs:
|
||||
cd ${HOME}/project/
|
||||
|
||||
export DOCKER_IMAGE=pytorch/conda-cuda
|
||||
export VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e UNICODE_ABI -e CU_VERSION"
|
||||
export VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION"
|
||||
|
||||
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${DOCKER_IMAGE} ./packaging/build_conda.sh
|
||||
|
||||
@ -260,6 +258,21 @@ workflows:
|
||||
name: linux_conda_py36_cu102_pyt160
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.6.0
|
||||
- binary_linux_conda:
|
||||
cu_version: cu101
|
||||
name: linux_conda_py36_cu101_pyt170
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_conda:
|
||||
cu_version: cu102
|
||||
name: linux_conda_py36_cu102_pyt170
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_conda:
|
||||
cu_version: cu110
|
||||
name: linux_conda_py36_cu110_pyt170
|
||||
python_version: '3.6'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_conda:
|
||||
cu_version: cu92
|
||||
name: linux_conda_py37_cu92_pyt14
|
||||
@ -315,6 +328,21 @@ workflows:
|
||||
name: linux_conda_py37_cu102_pyt160
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.6.0
|
||||
- binary_linux_conda:
|
||||
cu_version: cu101
|
||||
name: linux_conda_py37_cu101_pyt170
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_conda:
|
||||
cu_version: cu102
|
||||
name: linux_conda_py37_cu102_pyt170
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_conda:
|
||||
cu_version: cu110
|
||||
name: linux_conda_py37_cu110_pyt170
|
||||
python_version: '3.7'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_conda:
|
||||
cu_version: cu92
|
||||
name: linux_conda_py38_cu92_pyt14
|
||||
@ -370,6 +398,21 @@ workflows:
|
||||
name: linux_conda_py38_cu102_pyt160
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.6.0
|
||||
- binary_linux_conda:
|
||||
cu_version: cu101
|
||||
name: linux_conda_py38_cu101_pyt170
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_conda:
|
||||
cu_version: cu102
|
||||
name: linux_conda_py38_cu102_pyt170
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_conda:
|
||||
cu_version: cu110
|
||||
name: linux_conda_py38_cu110_pyt170
|
||||
python_version: '3.8'
|
||||
pytorch_version: 1.7.0
|
||||
- binary_linux_wheel:
|
||||
cu_version: cu101
|
||||
name: linux_wheel_py36_cu101_pyt160
|
||||
@ -395,6 +438,11 @@ workflows:
|
||||
python_version: "3.7"
|
||||
pytorch_version: '1.6.0'
|
||||
cu_version: "cu102"
|
||||
- binary_linux_conda_cuda:
|
||||
name: testrun_conda_cuda_py37_cu110_pyt170
|
||||
python_version: "3.7"
|
||||
pytorch_version: '1.7.0'
|
||||
cu_version: "cu110"
|
||||
- binary_macos_wheel:
|
||||
cu_version: cpu
|
||||
name: macos_wheel_py36_cpu
|
||||
|
@ -19,6 +19,7 @@ CONDA_CUDA_VERSIONS = {
|
||||
"1.5.0": ["cu92", "cu101", "cu102"],
|
||||
"1.5.1": ["cu92", "cu101", "cu102"],
|
||||
"1.6.0": ["cu92", "cu101", "cu102"],
|
||||
"1.7.0": ["cu101", "cu102", "cu110"],
|
||||
}
|
||||
|
||||
|
||||
@ -26,7 +27,7 @@ def workflows(prefix="", filter_branch=None, upload=False, indentation=6):
|
||||
w = []
|
||||
for btype in ["conda"]:
|
||||
for python_version in ["3.6", "3.7", "3.8"]:
|
||||
for pytorch_version in ["1.4", "1.5.0", "1.5.1", "1.6.0"]:
|
||||
for pytorch_version in ["1.4", "1.5.0", "1.5.1", "1.6.0", "1.7.0"]:
|
||||
for cu_version in CONDA_CUDA_VERSIONS[pytorch_version]:
|
||||
w += workflow_pair(
|
||||
btype=btype,
|
||||
|
@ -51,6 +51,17 @@ setup_cuda() {
|
||||
|
||||
# Now work out the CUDA settings
|
||||
case "$CU_VERSION" in
|
||||
cu110)
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0"
|
||||
else
|
||||
export CUDA_HOME=/usr/local/cuda-11.0/
|
||||
fi
|
||||
export FORCE_CUDA=1
|
||||
# Hard-coding gencode flags is temporary situation until
|
||||
# https://github.com/pytorch/pytorch/pull/23408 lands
|
||||
export NVCC_FLAGS="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_50,code=compute_50"
|
||||
;;
|
||||
cu102)
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2"
|
||||
@ -244,6 +255,9 @@ setup_conda_cudatoolkit_constraint() {
|
||||
export CONDA_CUDATOOLKIT_CONSTRAINT=""
|
||||
else
|
||||
case "$CU_VERSION" in
|
||||
cu110)
|
||||
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.0,<11.1 # [not osx]"
|
||||
;;
|
||||
cu102)
|
||||
export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.2,<10.3 # [not osx]"
|
||||
;;
|
||||
|
Loading…
x
Reference in New Issue
Block a user