From 9397cd872d85766e0fa00d732d3734257aecc6c7 Mon Sep 17 00:00:00 2001 From: Jeremy Reizenstein Date: Fri, 17 Apr 2020 10:37:10 -0700 Subject: [PATCH] torch C API warnings Summary: This is mostly replacing the old PackedTensorAccessor with the new PackedTensorAccessor64. Reviewed By: gkioxari Differential Revision: D21088773 fbshipit-source-id: 5973e5a29d934eafb7c70ec5ec154ca076b64d27 --- pytorch3d/csrc/compositing/alpha_composite.cu | 40 +++++++++---------- .../csrc/compositing/norm_weighted_sum.cu | 40 +++++++++---------- pytorch3d/csrc/compositing/weighted_sum.cu | 40 +++++++++---------- .../face_areas_normals_cpu.cpp | 1 - .../packed_to_padded_tensor_cpu.cpp | 1 - .../rasterize_meshes/rasterize_meshes_cpu.cpp | 1 - 6 files changed, 60 insertions(+), 63 deletions(-) diff --git a/pytorch3d/csrc/compositing/alpha_composite.cu b/pytorch3d/csrc/compositing/alpha_composite.cu index f4d741e7..6cc86f88 100644 --- a/pytorch3d/csrc/compositing/alpha_composite.cu +++ b/pytorch3d/csrc/compositing/alpha_composite.cu @@ -12,10 +12,10 @@ // Currently, support is for floats only. __global__ void alphaCompositeCudaForwardKernel( // clang-format off - torch::PackedTensorAccessor result, - const torch::PackedTensorAccessor features, - const torch::PackedTensorAccessor alphas, - const torch::PackedTensorAccessor points_idx) { + torch::PackedTensorAccessor64 result, + const torch::PackedTensorAccessor64 features, + const torch::PackedTensorAccessor64 alphas, + const torch::PackedTensorAccessor64 points_idx) { // clang-format on const int64_t batch_size = result.size(0); const int64_t C = features.size(0); @@ -61,12 +61,12 @@ __global__ void alphaCompositeCudaForwardKernel( // Currently, support is for floats only. __global__ void alphaCompositeCudaBackwardKernel( // clang-format off - torch::PackedTensorAccessor grad_features, - torch::PackedTensorAccessor grad_alphas, - const torch::PackedTensorAccessor grad_outputs, - const torch::PackedTensorAccessor features, - const torch::PackedTensorAccessor alphas, - const torch::PackedTensorAccessor points_idx) { + torch::PackedTensorAccessor64 grad_features, + torch::PackedTensorAccessor64 grad_alphas, + const torch::PackedTensorAccessor64 grad_outputs, + const torch::PackedTensorAccessor64 features, + const torch::PackedTensorAccessor64 alphas, + const torch::PackedTensorAccessor64 points_idx) { // clang-format on const int64_t batch_size = points_idx.size(0); const int64_t C = features.size(0); @@ -149,10 +149,10 @@ torch::Tensor alphaCompositeCudaForward( // doubles. Currently, support is for floats only. alphaCompositeCudaForwardKernel<<>>( // clang-format off - result.packed_accessor(), - features.packed_accessor(), - alphas.packed_accessor(), - points_idx.packed_accessor()); + result.packed_accessor64(), + features.packed_accessor64(), + alphas.packed_accessor64(), + points_idx.packed_accessor64()); // clang-format on return result; @@ -175,12 +175,12 @@ std::tuple alphaCompositeCudaBackward( // doubles. Currently, support is for floats only. alphaCompositeCudaBackwardKernel<<>>( // clang-format off - grad_features.packed_accessor(), - grad_alphas.packed_accessor(), - grad_outputs.packed_accessor(), - features.packed_accessor(), - alphas.packed_accessor(), - points_idx.packed_accessor()); + grad_features.packed_accessor64(), + grad_alphas.packed_accessor64(), + grad_outputs.packed_accessor64(), + features.packed_accessor64(), + alphas.packed_accessor64(), + points_idx.packed_accessor64()); // clang-format on return std::make_tuple(grad_features, grad_alphas); diff --git a/pytorch3d/csrc/compositing/norm_weighted_sum.cu b/pytorch3d/csrc/compositing/norm_weighted_sum.cu index 5771c4b2..90d2ca81 100644 --- a/pytorch3d/csrc/compositing/norm_weighted_sum.cu +++ b/pytorch3d/csrc/compositing/norm_weighted_sum.cu @@ -14,10 +14,10 @@ __constant__ const float kEpsilon = 1e-4; // Currently, support is for floats only. __global__ void weightedSumNormCudaForwardKernel( // clang-format off - torch::PackedTensorAccessor result, - const torch::PackedTensorAccessor features, - const torch::PackedTensorAccessor alphas, - const torch::PackedTensorAccessor points_idx) { + torch::PackedTensorAccessor64 result, + const torch::PackedTensorAccessor64 features, + const torch::PackedTensorAccessor64 alphas, + const torch::PackedTensorAccessor64 points_idx) { // clang-format on const int64_t batch_size = result.size(0); const int64_t C = features.size(0); @@ -76,12 +76,12 @@ __global__ void weightedSumNormCudaForwardKernel( // Currently, support is for floats only. __global__ void weightedSumNormCudaBackwardKernel( // clang-format off - torch::PackedTensorAccessor grad_features, - torch::PackedTensorAccessor grad_alphas, - const torch::PackedTensorAccessor grad_outputs, - const torch::PackedTensorAccessor features, - const torch::PackedTensorAccessor alphas, - const torch::PackedTensorAccessor points_idx) { + torch::PackedTensorAccessor64 grad_features, + torch::PackedTensorAccessor64 grad_alphas, + const torch::PackedTensorAccessor64 grad_outputs, + const torch::PackedTensorAccessor64 features, + const torch::PackedTensorAccessor64 alphas, + const torch::PackedTensorAccessor64 points_idx) { // clang-format on const int64_t batch_size = points_idx.size(0); const int64_t C = features.size(0); @@ -164,10 +164,10 @@ torch::Tensor weightedSumNormCudaForward( // doubles. Currently, support is for floats only. // clang-format off weightedSumNormCudaForwardKernel<<>>( - result.packed_accessor(), - features.packed_accessor(), - alphas.packed_accessor(), - points_idx.packed_accessor()); + result.packed_accessor64(), + features.packed_accessor64(), + alphas.packed_accessor64(), + points_idx.packed_accessor64()); // clang-format on return result; @@ -190,12 +190,12 @@ std::tuple weightedSumNormCudaBackward( // doubles. Currently, support is for floats only. weightedSumNormCudaBackwardKernel<<>>( // clang-format off - grad_features.packed_accessor(), - grad_alphas.packed_accessor(), - grad_outputs.packed_accessor(), - features.packed_accessor(), - alphas.packed_accessor(), - points_idx.packed_accessor()); + grad_features.packed_accessor64(), + grad_alphas.packed_accessor64(), + grad_outputs.packed_accessor64(), + features.packed_accessor64(), + alphas.packed_accessor64(), + points_idx.packed_accessor64()); // clang-format on return std::make_tuple(grad_features, grad_alphas); diff --git a/pytorch3d/csrc/compositing/weighted_sum.cu b/pytorch3d/csrc/compositing/weighted_sum.cu index 8b15a497..1bc72f23 100644 --- a/pytorch3d/csrc/compositing/weighted_sum.cu +++ b/pytorch3d/csrc/compositing/weighted_sum.cu @@ -12,10 +12,10 @@ // Currently, support is for floats only. __global__ void weightedSumCudaForwardKernel( // clang-format off - torch::PackedTensorAccessor result, - const torch::PackedTensorAccessor features, - const torch::PackedTensorAccessor alphas, - const torch::PackedTensorAccessor points_idx) { + torch::PackedTensorAccessor64 result, + const torch::PackedTensorAccessor64 features, + const torch::PackedTensorAccessor64 alphas, + const torch::PackedTensorAccessor64 points_idx) { // clang-format on const int64_t batch_size = result.size(0); const int64_t C = features.size(0); @@ -58,12 +58,12 @@ __global__ void weightedSumCudaForwardKernel( // Currently, support is for floats only. __global__ void weightedSumCudaBackwardKernel( // clang-format off - torch::PackedTensorAccessor grad_features, - torch::PackedTensorAccessor grad_alphas, - const torch::PackedTensorAccessor grad_outputs, - const torch::PackedTensorAccessor features, - const torch::PackedTensorAccessor alphas, - const torch::PackedTensorAccessor points_idx) { + torch::PackedTensorAccessor64 grad_features, + torch::PackedTensorAccessor64 grad_alphas, + const torch::PackedTensorAccessor64 grad_outputs, + const torch::PackedTensorAccessor64 features, + const torch::PackedTensorAccessor64 alphas, + const torch::PackedTensorAccessor64 points_idx) { // clang-format on const int64_t batch_size = points_idx.size(0); const int64_t C = features.size(0); @@ -123,10 +123,10 @@ torch::Tensor weightedSumCudaForward( // doubles. Currently, support is for floats only. weightedSumCudaForwardKernel<<>>( // clang-format off - result.packed_accessor(), - features.packed_accessor(), - alphas.packed_accessor(), - points_idx.packed_accessor()); + result.packed_accessor64(), + features.packed_accessor64(), + alphas.packed_accessor64(), + points_idx.packed_accessor64()); // clang-format on return result; @@ -149,12 +149,12 @@ std::tuple weightedSumCudaBackward( // doubles. Currently, support is for floats only. weightedSumCudaBackwardKernel<<>>( // clang-format off - grad_features.packed_accessor(), - grad_alphas.packed_accessor(), - grad_outputs.packed_accessor(), - features.packed_accessor(), - alphas.packed_accessor(), - points_idx.packed_accessor()); + grad_features.packed_accessor64(), + grad_alphas.packed_accessor64(), + grad_outputs.packed_accessor64(), + features.packed_accessor64(), + alphas.packed_accessor64(), + points_idx.packed_accessor64()); // clang-format on return std::make_tuple(grad_features, grad_alphas); diff --git a/pytorch3d/csrc/face_areas_normals/face_areas_normals_cpu.cpp b/pytorch3d/csrc/face_areas_normals/face_areas_normals_cpu.cpp index 09535947..4fa8ec55 100644 --- a/pytorch3d/csrc/face_areas_normals/face_areas_normals_cpu.cpp +++ b/pytorch3d/csrc/face_areas_normals/face_areas_normals_cpu.cpp @@ -6,7 +6,6 @@ std::tuple FaceAreasNormalsForwardCpu( const at::Tensor verts, const at::Tensor faces) { - const int V = verts.size(0); const int F = faces.size(0); at::Tensor areas = at::empty({F}, verts.options()); diff --git a/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor_cpu.cpp b/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor_cpu.cpp index dd872b78..283c54db 100644 --- a/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor_cpu.cpp +++ b/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor_cpu.cpp @@ -38,7 +38,6 @@ at::Tensor PaddedToPackedCpu( const at::Tensor first_idxs, const int64_t num_inputs) { const int64_t batch_size = inputs_padded.size(0); - const int64_t max_size = inputs_padded.size(1); AT_ASSERTM( inputs_padded.dim() == 3, "inputs_padded must be a 3-dimensional tensor"); diff --git a/pytorch3d/csrc/rasterize_meshes/rasterize_meshes_cpu.cpp b/pytorch3d/csrc/rasterize_meshes/rasterize_meshes_cpu.cpp index 65573633..90ccfec4 100644 --- a/pytorch3d/csrc/rasterize_meshes/rasterize_meshes_cpu.cpp +++ b/pytorch3d/csrc/rasterize_meshes/rasterize_meshes_cpu.cpp @@ -415,7 +415,6 @@ torch::Tensor RasterizeMeshesCoarseCpu( auto opts = face_verts.options().dtype(torch::kInt32); torch::Tensor faces_per_bin = torch::zeros({N, BH, BW}, opts); torch::Tensor bin_faces = torch::full({N, BH, BW, M}, -1, opts); - auto faces_per_bin_a = faces_per_bin.accessor(); auto bin_faces_a = bin_faces.accessor(); // Precompute all face bounding boxes.