mirror of
				https://github.com/facebookresearch/pytorch3d.git
				synced 2025-11-04 18:02:14 +08:00 
			
		
		
		
	Enable -Wunused-value in vision/PACKAGE +1
				
					
				
			Summary: This diff enables compilation warning flags for the directory in question. Further details are in [this workplace post](https://fb.workplace.com/permalink.php?story_fbid=pfbid02XaWNiCVk69r1ghfvDVpujB8Hr9Y61uDvNakxiZFa2jwiPHscVdEQwCBHrmWZSyMRl&id=100051201402394). This is a low-risk diff. There are **no run-time effects** and the diff has already been observed to compile locally. **If the code compiles, it work; test errors are spurious.** Differential Revision: D70282347 fbshipit-source-id: e2fa55c002d7124b13450c812165d244b8a53f4e
This commit is contained in:
		
							parent
							
								
									20bd8b33f6
								
							
						
					
					
						commit
						7e09505538
					
				@ -8,6 +8,7 @@
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
#ifdef WITH_CUDA
 | 
					#ifdef WITH_CUDA
 | 
				
			||||||
#include <ATen/cuda/CUDAContext.h>
 | 
					#include <ATen/cuda/CUDAContext.h>
 | 
				
			||||||
 | 
					#include <c10/cuda/CUDAException.h>
 | 
				
			||||||
#include <cuda_runtime_api.h>
 | 
					#include <cuda_runtime_api.h>
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
#include <torch/extension.h>
 | 
					#include <torch/extension.h>
 | 
				
			||||||
@ -33,13 +34,13 @@ torch::Tensor sphere_ids_from_result_info_nograd(
 | 
				
			|||||||
          .contiguous();
 | 
					          .contiguous();
 | 
				
			||||||
  if (forw_info.device().type() == c10::DeviceType::CUDA) {
 | 
					  if (forw_info.device().type() == c10::DeviceType::CUDA) {
 | 
				
			||||||
#ifdef WITH_CUDA
 | 
					#ifdef WITH_CUDA
 | 
				
			||||||
    cudaMemcpyAsync(
 | 
					    C10_CUDA_CHECK(cudaMemcpyAsync(
 | 
				
			||||||
        result.data_ptr(),
 | 
					        result.data_ptr(),
 | 
				
			||||||
        tmp.data_ptr(),
 | 
					        tmp.data_ptr(),
 | 
				
			||||||
        sizeof(uint32_t) * tmp.size(0) * tmp.size(1) * tmp.size(2) *
 | 
					        sizeof(uint32_t) * tmp.size(0) * tmp.size(1) * tmp.size(2) *
 | 
				
			||||||
            tmp.size(3),
 | 
					            tmp.size(3),
 | 
				
			||||||
        cudaMemcpyDeviceToDevice,
 | 
					        cudaMemcpyDeviceToDevice,
 | 
				
			||||||
        at::cuda::getCurrentCUDAStream());
 | 
					        at::cuda::getCurrentCUDAStream()));
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
    throw std::runtime_error(
 | 
					    throw std::runtime_error(
 | 
				
			||||||
        "Copy on CUDA device initiated but built "
 | 
					        "Copy on CUDA device initiated but built "
 | 
				
			||||||
 | 
				
			|||||||
@ -7,6 +7,7 @@
 | 
				
			|||||||
 */
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef WITH_CUDA
 | 
					#ifdef WITH_CUDA
 | 
				
			||||||
 | 
					#include <c10/cuda/CUDAException.h>
 | 
				
			||||||
#include <cuda_runtime_api.h>
 | 
					#include <cuda_runtime_api.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
namespace pulsar {
 | 
					namespace pulsar {
 | 
				
			||||||
@ -17,7 +18,8 @@ void cudaDevToDev(
 | 
				
			|||||||
    const void* src,
 | 
					    const void* src,
 | 
				
			||||||
    const int& size,
 | 
					    const int& size,
 | 
				
			||||||
    const cudaStream_t& stream) {
 | 
					    const cudaStream_t& stream) {
 | 
				
			||||||
  cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToDevice, stream);
 | 
					  C10_CUDA_CHECK(
 | 
				
			||||||
 | 
					      cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToDevice, stream));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void cudaDevToHost(
 | 
					void cudaDevToHost(
 | 
				
			||||||
@ -25,7 +27,8 @@ void cudaDevToHost(
 | 
				
			|||||||
    const void* src,
 | 
					    const void* src,
 | 
				
			||||||
    const int& size,
 | 
					    const int& size,
 | 
				
			||||||
    const cudaStream_t& stream) {
 | 
					    const cudaStream_t& stream) {
 | 
				
			||||||
  cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToHost, stream);
 | 
					  C10_CUDA_CHECK(
 | 
				
			||||||
 | 
					      cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToHost, stream));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
} // namespace pytorch
 | 
					} // namespace pytorch
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user