Summary:
Update `main` build to latest CircleCI image - Ubuntu 2020.04.

Avoid torch.logical_or and logical_and for PyTorch 1.4 compatibility.

Also speed up the test run with Pytorch 1.4.0 (which has no ninja) by not setting NVCC_FLAGS for it.

Reviewed By: theschnitz

Differential Revision: D27262327

fbshipit-source-id: ddc359d134b1dc755f8b20bd3f33bb080cb3a0e1
This commit is contained in:
Jeremy Reizenstein 2021-03-23 14:24:06 -07:00 committed by Facebook GitHub Bot
parent 6c4151a820
commit cc08c6b288
7 changed files with 40 additions and 23 deletions

View File

@ -3,4 +3,7 @@
# Run this script before committing config.yml to verify it is valid yaml.
python -c 'import yaml; yaml.safe_load(open("config.yml"))' && echo OK
python -c 'import yaml; yaml.safe_load(open("config.yml"))' && echo OK - valid yaml
msg="circleci not installed so can't check schema"
command -v circleci > /dev/null && (cd ..; circleci config validate) || echo "$msg"

View File

@ -18,12 +18,12 @@ setupcuda: &setupcuda
working_directory: ~/
command: |
# download and install nvidia drivers, cuda, etc
wget --no-verbose --no-clobber -P ~/nvidia-downloads http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run
sudo sh ~/nvidia-downloads/cuda_10.2.89_440.33.01_linux.run --silent
wget --no-verbose --no-clobber -P ~/nvidia-downloads https://developer.download.nvidia.com/compute/cuda/11.2.2/local_installers/cuda_11.2.2_460.32.03_linux.run
sudo sh ~/nvidia-downloads/cuda_11.2.2_460.32.03_linux.run --silent
echo "Done installing CUDA."
pyenv versions
nvidia-smi
pyenv global 3.7.0
pyenv global 3.9.1
gpu: &gpu
environment:
@ -64,7 +64,7 @@ jobs:
main:
<<: *gpu
machine:
image: ubuntu-1604:201903-01
image: ubuntu-2004:202101-01
steps:
- checkout
- <<: *setupcuda
@ -86,10 +86,10 @@ jobs:
- run:
name: build
command: |
export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64
export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.2/lib64
export CUB_HOME=$(realpath ../cub-1.10.0)
python3 setup.py build_ext --inplace
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64 python -m unittest discover -v -s tests
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.2/lib64 python -m unittest discover -v -s tests
- run: python3 setup.py bdist_wheel
binary_linux_wheel:
@ -186,7 +186,7 @@ jobs:
{ docker login -u="$DOCKERHUB_USERNAME" -p="$DOCKERHUB_TOKEN" ; } 2> /dev/null
export DOCKER_IMAGE=pytorch/conda-cuda
DOCKER_IMAGE=pytorch/conda-cuda
echo Pulling docker image $DOCKER_IMAGE
docker pull $DOCKER_IMAGE
- run:
@ -196,8 +196,9 @@ jobs:
cd ${HOME}/project/
export DOCKER_IMAGE=pytorch/conda-cuda
export VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION"
DOCKER_IMAGE=pytorch/conda-cuda
export JUST_TESTRUN=1
VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION -e JUST_TESTRUN"
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${DOCKER_IMAGE} ./packaging/build_conda.sh

View File

@ -18,12 +18,12 @@ setupcuda: &setupcuda
working_directory: ~/
command: |
# download and install nvidia drivers, cuda, etc
wget --no-verbose --no-clobber -P ~/nvidia-downloads http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run
sudo sh ~/nvidia-downloads/cuda_10.2.89_440.33.01_linux.run --silent
wget --no-verbose --no-clobber -P ~/nvidia-downloads https://developer.download.nvidia.com/compute/cuda/11.2.2/local_installers/cuda_11.2.2_460.32.03_linux.run
sudo sh ~/nvidia-downloads/cuda_11.2.2_460.32.03_linux.run --silent
echo "Done installing CUDA."
pyenv versions
nvidia-smi
pyenv global 3.7.0
pyenv global 3.9.1
gpu: &gpu
environment:
@ -64,7 +64,7 @@ jobs:
main:
<<: *gpu
machine:
image: ubuntu-1604:201903-01
image: ubuntu-2004:202101-01
steps:
- checkout
- <<: *setupcuda
@ -86,10 +86,10 @@ jobs:
- run:
name: build
command: |
export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64
export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.2/lib64
export CUB_HOME=$(realpath ../cub-1.10.0)
python3 setup.py build_ext --inplace
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-10.2/lib64 python -m unittest discover -v -s tests
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.2/lib64 python -m unittest discover -v -s tests
- run: python3 setup.py bdist_wheel
binary_linux_wheel:
@ -186,7 +186,7 @@ jobs:
{ docker login -u="$DOCKERHUB_USERNAME" -p="$DOCKERHUB_TOKEN" ; } 2> /dev/null
export DOCKER_IMAGE=pytorch/conda-cuda
DOCKER_IMAGE=pytorch/conda-cuda
echo Pulling docker image $DOCKER_IMAGE
docker pull $DOCKER_IMAGE
- run:
@ -196,8 +196,9 @@ jobs:
cd ${HOME}/project/
export DOCKER_IMAGE=pytorch/conda-cuda
export VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION"
DOCKER_IMAGE=pytorch/conda-cuda
export JUST_TESTRUN=1
VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION -e JUST_TESTRUN"
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${DOCKER_IMAGE} ./packaging/build_conda.sh

View File

@ -131,6 +131,8 @@ def generate_upload_workflow(*, base_workflow_name, btype, cu_version, filter_br
def indent(indentation, data_list):
if len(data_list) == 0:
return ""
return ("\n" + " " * indentation).join(
yaml.dump(data_list, default_flow_style=False).splitlines()
)

View File

@ -16,5 +16,14 @@ export SOURCE_ROOT_DIR="$PWD"
setup_conda_pytorch_constraint
setup_conda_cudatoolkit_constraint
setup_visual_studio_constraint
if [[ "$JUST_TESTRUN" == "1" ]]
then
# We are not building for other users, we
# are only trying to see if the tests pass.
# So save time by only building for our own GPU.
unset NVCC_FLAGS
fi
# shellcheck disable=SC2086
conda build $CONDA_CHANNEL_FLAGS ${TEST_FLAG:-} -c bottler -c defaults -c conda-forge -c fvcore -c iopath --no-anaconda-upload --python "$PYTHON_VERSION" packaging/pytorch3d

View File

@ -397,15 +397,15 @@ def clip_faces(
# pyre-ignore[16]:
faces_unculled = ~faces_culled
# Case 1: no clipped verts or culled faces
cases1_unclipped = torch.logical_and(faces_num_clipped_verts == 0, faces_unculled)
cases1_unclipped = (faces_num_clipped_verts == 0) & faces_unculled
case1_unclipped_idx = cases1_unclipped.nonzero(as_tuple=True)[0]
# Case 2: all verts clipped
case2_unclipped = torch.logical_or(faces_num_clipped_verts == 3, faces_culled)
case2_unclipped = (faces_num_clipped_verts == 3) | faces_culled
# Case 3: two verts clipped
case3_unclipped = torch.logical_and(faces_num_clipped_verts == 2, faces_unculled)
case3_unclipped = (faces_num_clipped_verts == 2) & faces_unculled
case3_unclipped_idx = case3_unclipped.nonzero(as_tuple=True)[0]
# Case 4: one vert clipped
case4_unclipped = torch.logical_and(faces_num_clipped_verts == 1, faces_unculled)
case4_unclipped = (faces_num_clipped_verts == 1) & faces_unculled
case4_unclipped_idx = case4_unclipped.nonzero(as_tuple=True)[0]
# faces_unclipped_to_clipped_idx is an (F) dim tensor storing the index of each

View File

@ -158,6 +158,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
def test_pluggable_load_cube(self):
"""
This won't work on Windows due to NamedTemporaryFile being reopened.
Use the testpath package instead?
"""
ply_file = "\n".join(CUBE_PLY_LINES)
io = IO()