From 7e095055388a4e965f4c8f9a0c32f984ba64dd79 Mon Sep 17 00:00:00 2001 From: Richard Barnes Date: Tue, 4 Mar 2025 17:49:30 -0800 Subject: [PATCH] Enable `-Wunused-value` in vision/PACKAGE +1 Summary: This diff enables compilation warning flags for the directory in question. Further details are in [this workplace post](https://fb.workplace.com/permalink.php?story_fbid=pfbid02XaWNiCVk69r1ghfvDVpujB8Hr9Y61uDvNakxiZFa2jwiPHscVdEQwCBHrmWZSyMRl&id=100051201402394). This is a low-risk diff. There are **no run-time effects** and the diff has already been observed to compile locally. **If the code compiles, it work; test errors are spurious.** Differential Revision: D70282347 fbshipit-source-id: e2fa55c002d7124b13450c812165d244b8a53f4e --- pytorch3d/csrc/pulsar/pytorch/tensor_util.cpp | 5 +++-- pytorch3d/csrc/pulsar/pytorch/util.cpp | 7 +++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/pytorch3d/csrc/pulsar/pytorch/tensor_util.cpp b/pytorch3d/csrc/pulsar/pytorch/tensor_util.cpp index 1dd41ed4..b372e0bf 100644 --- a/pytorch3d/csrc/pulsar/pytorch/tensor_util.cpp +++ b/pytorch3d/csrc/pulsar/pytorch/tensor_util.cpp @@ -8,6 +8,7 @@ #ifdef WITH_CUDA #include +#include #include #endif #include @@ -33,13 +34,13 @@ torch::Tensor sphere_ids_from_result_info_nograd( .contiguous(); if (forw_info.device().type() == c10::DeviceType::CUDA) { #ifdef WITH_CUDA - cudaMemcpyAsync( + C10_CUDA_CHECK(cudaMemcpyAsync( result.data_ptr(), tmp.data_ptr(), sizeof(uint32_t) * tmp.size(0) * tmp.size(1) * tmp.size(2) * tmp.size(3), cudaMemcpyDeviceToDevice, - at::cuda::getCurrentCUDAStream()); + at::cuda::getCurrentCUDAStream())); #else throw std::runtime_error( "Copy on CUDA device initiated but built " diff --git a/pytorch3d/csrc/pulsar/pytorch/util.cpp b/pytorch3d/csrc/pulsar/pytorch/util.cpp index 7d25b6e8..87eb8815 100644 --- a/pytorch3d/csrc/pulsar/pytorch/util.cpp +++ b/pytorch3d/csrc/pulsar/pytorch/util.cpp @@ -7,6 +7,7 @@ */ #ifdef WITH_CUDA +#include #include namespace pulsar { @@ -17,7 +18,8 @@ void cudaDevToDev( const void* src, const int& size, const cudaStream_t& stream) { - cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToDevice, stream); + C10_CUDA_CHECK( + cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToDevice, stream)); } void cudaDevToHost( @@ -25,7 +27,8 @@ void cudaDevToHost( const void* src, const int& size, const cudaStream_t& stream) { - cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToHost, stream); + C10_CUDA_CHECK( + cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToHost, stream)); } } // namespace pytorch