fix CPU-only hiding of cuda calls

Summary: CPU-only builds should be fixed by this change

Reviewed By: nikhilaravi

Differential Revision: D20598014

fbshipit-source-id: df098ec4c6c93d38515172805fe57cac7463c506
This commit is contained in:
Jeremy Reizenstein 2020-03-24 05:00:42 -07:00 committed by Facebook GitHub Bot
parent 595aca27ea
commit 8fa7678614
3 changed files with 12 additions and 13 deletions

View File

@ -63,10 +63,10 @@ torch::Tensor alphaCompositeForward(
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
CHECK_CONTIGUOUS_CUDA(points_idx);
return alphaCompositeCudaForward(features, alphas, points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
return alphaCompositeCudaForward(features, alphas, points_idx);
} else {
CHECK_CONTIGUOUS(features);
CHECK_CONTIGUOUS(alphas);
@ -92,12 +92,12 @@ std::tuple<torch::Tensor, torch::Tensor> alphaCompositeBackward(
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
CHECK_CONTIGUOUS_CUDA(points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
return alphaCompositeCudaBackward(
grad_outputs, features, alphas, points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
} else {
CHECK_CONTIGUOUS(grad_outputs);
CHECK_CONTIGUOUS(features);

View File

@ -61,11 +61,11 @@ torch::Tensor weightedSumNormForward(
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
CHECK_CONTIGUOUS_CUDA(points_idx);
return weightedSumNormCudaForward(features, alphas, points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
return weightedSumNormCudaForward(features, alphas, points_idx);
} else {
CHECK_CONTIGUOUS(features);
CHECK_CONTIGUOUS(alphas);
@ -91,12 +91,12 @@ std::tuple<torch::Tensor, torch::Tensor> weightedSumNormBackward(
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
CHECK_CONTIGUOUS_CUDA(points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
return weightedSumNormCudaBackward(
grad_outputs, features, alphas, points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
} else {
CHECK_CONTIGUOUS(grad_outputs);
CHECK_CONTIGUOUS(features);

View File

@ -61,11 +61,10 @@ torch::Tensor weightedSumForward(
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
CHECK_CONTIGUOUS_CUDA(points_idx);
return weightedSumCudaForward(features, alphas, points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
return weightedSumCudaForward(features, alphas, points_idx);
} else {
CHECK_CONTIGUOUS(features);
CHECK_CONTIGUOUS(alphas);
@ -91,11 +90,11 @@ std::tuple<torch::Tensor, torch::Tensor> weightedSumBackward(
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
CHECK_CONTIGUOUS_CUDA(points_idx);
return weightedSumCudaBackward(grad_outputs, features, alphas, points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
return weightedSumCudaBackward(grad_outputs, features, alphas, points_idx);
} else {
CHECK_CONTIGUOUS(grad_outputs);
CHECK_CONTIGUOUS(features);