mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-08-02 03:42:50 +08:00
fix CPU-only hiding of cuda calls
Summary: CPU-only builds should be fixed by this change Reviewed By: nikhilaravi Differential Revision: D20598014 fbshipit-source-id: df098ec4c6c93d38515172805fe57cac7463c506
This commit is contained in:
parent
595aca27ea
commit
8fa7678614
@ -63,10 +63,10 @@ torch::Tensor alphaCompositeForward(
|
||||
CHECK_CONTIGUOUS_CUDA(features);
|
||||
CHECK_CONTIGUOUS_CUDA(alphas);
|
||||
CHECK_CONTIGUOUS_CUDA(points_idx);
|
||||
return alphaCompositeCudaForward(features, alphas, points_idx);
|
||||
#else
|
||||
AT_ERROR("Not compiled with GPU support");
|
||||
#endif
|
||||
return alphaCompositeCudaForward(features, alphas, points_idx);
|
||||
} else {
|
||||
CHECK_CONTIGUOUS(features);
|
||||
CHECK_CONTIGUOUS(alphas);
|
||||
@ -92,12 +92,12 @@ std::tuple<torch::Tensor, torch::Tensor> alphaCompositeBackward(
|
||||
CHECK_CONTIGUOUS_CUDA(features);
|
||||
CHECK_CONTIGUOUS_CUDA(alphas);
|
||||
CHECK_CONTIGUOUS_CUDA(points_idx);
|
||||
#else
|
||||
AT_ERROR("Not compiled with GPU support");
|
||||
#endif
|
||||
|
||||
return alphaCompositeCudaBackward(
|
||||
grad_outputs, features, alphas, points_idx);
|
||||
#else
|
||||
AT_ERROR("Not compiled with GPU support");
|
||||
#endif
|
||||
} else {
|
||||
CHECK_CONTIGUOUS(grad_outputs);
|
||||
CHECK_CONTIGUOUS(features);
|
||||
|
@ -61,11 +61,11 @@ torch::Tensor weightedSumNormForward(
|
||||
CHECK_CONTIGUOUS_CUDA(features);
|
||||
CHECK_CONTIGUOUS_CUDA(alphas);
|
||||
CHECK_CONTIGUOUS_CUDA(points_idx);
|
||||
|
||||
return weightedSumNormCudaForward(features, alphas, points_idx);
|
||||
#else
|
||||
AT_ERROR("Not compiled with GPU support");
|
||||
#endif
|
||||
|
||||
return weightedSumNormCudaForward(features, alphas, points_idx);
|
||||
} else {
|
||||
CHECK_CONTIGUOUS(features);
|
||||
CHECK_CONTIGUOUS(alphas);
|
||||
@ -91,12 +91,12 @@ std::tuple<torch::Tensor, torch::Tensor> weightedSumNormBackward(
|
||||
CHECK_CONTIGUOUS_CUDA(features);
|
||||
CHECK_CONTIGUOUS_CUDA(alphas);
|
||||
CHECK_CONTIGUOUS_CUDA(points_idx);
|
||||
#else
|
||||
AT_ERROR("Not compiled with GPU support");
|
||||
#endif
|
||||
|
||||
return weightedSumNormCudaBackward(
|
||||
grad_outputs, features, alphas, points_idx);
|
||||
#else
|
||||
AT_ERROR("Not compiled with GPU support");
|
||||
#endif
|
||||
} else {
|
||||
CHECK_CONTIGUOUS(grad_outputs);
|
||||
CHECK_CONTIGUOUS(features);
|
||||
|
@ -61,11 +61,10 @@ torch::Tensor weightedSumForward(
|
||||
CHECK_CONTIGUOUS_CUDA(features);
|
||||
CHECK_CONTIGUOUS_CUDA(alphas);
|
||||
CHECK_CONTIGUOUS_CUDA(points_idx);
|
||||
return weightedSumCudaForward(features, alphas, points_idx);
|
||||
#else
|
||||
AT_ERROR("Not compiled with GPU support");
|
||||
#endif
|
||||
|
||||
return weightedSumCudaForward(features, alphas, points_idx);
|
||||
} else {
|
||||
CHECK_CONTIGUOUS(features);
|
||||
CHECK_CONTIGUOUS(alphas);
|
||||
@ -91,11 +90,11 @@ std::tuple<torch::Tensor, torch::Tensor> weightedSumBackward(
|
||||
CHECK_CONTIGUOUS_CUDA(features);
|
||||
CHECK_CONTIGUOUS_CUDA(alphas);
|
||||
CHECK_CONTIGUOUS_CUDA(points_idx);
|
||||
|
||||
return weightedSumCudaBackward(grad_outputs, features, alphas, points_idx);
|
||||
#else
|
||||
AT_ERROR("Not compiled with GPU support");
|
||||
#endif
|
||||
|
||||
return weightedSumCudaBackward(grad_outputs, features, alphas, points_idx);
|
||||
} else {
|
||||
CHECK_CONTIGUOUS(grad_outputs);
|
||||
CHECK_CONTIGUOUS(features);
|
||||
|
Loading…
x
Reference in New Issue
Block a user