diff --git a/pytorch3d/csrc/compositing/alpha_composite.h b/pytorch3d/csrc/compositing/alpha_composite.h index 7ec7115c..f5643538 100644 --- a/pytorch3d/csrc/compositing/alpha_composite.h +++ b/pytorch3d/csrc/compositing/alpha_composite.h @@ -58,7 +58,7 @@ torch::Tensor alphaCompositeForward( alphas = alphas.contiguous(); points_idx = points_idx.contiguous(); - if (features.type().is_cuda()) { + if (features.is_cuda()) { #ifdef WITH_CUDA CHECK_CONTIGUOUS_CUDA(features); CHECK_CONTIGUOUS_CUDA(alphas); @@ -86,7 +86,7 @@ std::tuple alphaCompositeBackward( alphas = alphas.contiguous(); points_idx = points_idx.contiguous(); - if (grad_outputs.type().is_cuda()) { + if (grad_outputs.is_cuda()) { #ifdef WITH_CUDA CHECK_CONTIGUOUS_CUDA(grad_outputs); CHECK_CONTIGUOUS_CUDA(features); diff --git a/pytorch3d/csrc/compositing/norm_weighted_sum.h b/pytorch3d/csrc/compositing/norm_weighted_sum.h index 339b56fa..0e10aa97 100644 --- a/pytorch3d/csrc/compositing/norm_weighted_sum.h +++ b/pytorch3d/csrc/compositing/norm_weighted_sum.h @@ -56,7 +56,7 @@ torch::Tensor weightedSumNormForward( alphas = alphas.contiguous(); points_idx = points_idx.contiguous(); - if (features.type().is_cuda()) { + if (features.is_cuda()) { #ifdef WITH_CUDA CHECK_CONTIGUOUS_CUDA(features); CHECK_CONTIGUOUS_CUDA(alphas); @@ -85,7 +85,7 @@ std::tuple weightedSumNormBackward( alphas = alphas.contiguous(); points_idx = points_idx.contiguous(); - if (grad_outputs.type().is_cuda()) { + if (grad_outputs.is_cuda()) { #ifdef WITH_CUDA CHECK_CONTIGUOUS_CUDA(grad_outputs); CHECK_CONTIGUOUS_CUDA(features); diff --git a/pytorch3d/csrc/compositing/weighted_sum.h b/pytorch3d/csrc/compositing/weighted_sum.h index 9c96e38c..368c8f80 100644 --- a/pytorch3d/csrc/compositing/weighted_sum.h +++ b/pytorch3d/csrc/compositing/weighted_sum.h @@ -56,7 +56,7 @@ torch::Tensor weightedSumForward( alphas = alphas.contiguous(); points_idx = points_idx.contiguous(); - if (features.type().is_cuda()) { + if (features.is_cuda()) { #ifdef WITH_CUDA CHECK_CONTIGUOUS_CUDA(features); CHECK_CONTIGUOUS_CUDA(alphas); @@ -84,7 +84,7 @@ std::tuple weightedSumBackward( alphas = alphas.contiguous(); points_idx = points_idx.contiguous(); - if (grad_outputs.type().is_cuda()) { + if (grad_outputs.is_cuda()) { #ifdef WITH_CUDA CHECK_CONTIGUOUS_CUDA(grad_outputs); CHECK_CONTIGUOUS_CUDA(features); diff --git a/pytorch3d/csrc/face_areas_normals/face_areas_normals.cu b/pytorch3d/csrc/face_areas_normals/face_areas_normals.cu index 6b5c44de..c500eb12 100644 --- a/pytorch3d/csrc/face_areas_normals/face_areas_normals.cu +++ b/pytorch3d/csrc/face_areas_normals/face_areas_normals.cu @@ -219,7 +219,7 @@ std::tuple FaceAreasNormalsForwardCuda( const int blocks = 64; const int threads = 512; AT_DISPATCH_FLOATING_TYPES( - verts.type(), "face_areas_normals_forward_cuda", ([&] { + verts.scalar_type(), "face_areas_normals_forward_cuda", ([&] { FaceAreasNormalsForwardKernel<<>>( verts.data_ptr(), faces.data_ptr(), diff --git a/pytorch3d/csrc/face_areas_normals/face_areas_normals.h b/pytorch3d/csrc/face_areas_normals/face_areas_normals.h index 0617368e..ad5d5065 100644 --- a/pytorch3d/csrc/face_areas_normals/face_areas_normals.h +++ b/pytorch3d/csrc/face_areas_normals/face_areas_normals.h @@ -44,7 +44,7 @@ at::Tensor FaceAreasNormalsBackwardCuda( std::tuple FaceAreasNormalsForward( const at::Tensor verts, const at::Tensor faces) { - if (verts.type().is_cuda() && faces.type().is_cuda()) { + if (verts.is_cuda() && faces.is_cuda()) { #ifdef WITH_CUDA return FaceAreasNormalsForwardCuda(verts, faces); #else @@ -60,7 +60,7 @@ at::Tensor FaceAreasNormalsBackward( const at::Tensor grad_normals, const at::Tensor verts, const at::Tensor faces) { - if (verts.type().is_cuda() && faces.type().is_cuda()) { + if (verts.is_cuda() && faces.is_cuda()) { #ifdef WITH_CUDA return FaceAreasNormalsBackwardCuda(grad_areas, grad_normals, verts, faces); #else diff --git a/pytorch3d/csrc/gather_scatter/gather_scatter.h b/pytorch3d/csrc/gather_scatter/gather_scatter.h index 6b88e38a..e5199c71 100644 --- a/pytorch3d/csrc/gather_scatter/gather_scatter.h +++ b/pytorch3d/csrc/gather_scatter/gather_scatter.h @@ -32,7 +32,7 @@ at::Tensor gather_scatter( const at::Tensor edges, bool directed, bool backward) { - if (input.type().is_cuda() && edges.type().is_cuda()) { + if (input.is_cuda() && edges.is_cuda()) { #ifdef WITH_CUDA return gather_scatter_cuda(input, edges, directed, backward); #else diff --git a/pytorch3d/csrc/nearest_neighbor_points/nearest_neighbor_points.cu b/pytorch3d/csrc/nearest_neighbor_points/nearest_neighbor_points.cu index d5bebb66..ca9ac1f3 100644 --- a/pytorch3d/csrc/nearest_neighbor_points/nearest_neighbor_points.cu +++ b/pytorch3d/csrc/nearest_neighbor_points/nearest_neighbor_points.cu @@ -228,22 +228,22 @@ at::Tensor NearestNeighborIdxCuda(at::Tensor p1, at::Tensor p2) { if (D == 3) { // Use the specialized kernel for D=3. - AT_DISPATCH_FLOATING_TYPES(p1.type(), "nearest_neighbor_v3_cuda", ([&] { - size_t shared_size = threads * sizeof(size_t) + - threads * sizeof(int64_t); - NearestNeighborKernelD3 - <<>>( - p1.data_ptr(), - p2.data_ptr(), - idx.data_ptr(), - N, - P1, - P2); - })); + AT_DISPATCH_FLOATING_TYPES( + p1.scalar_type(), "nearest_neighbor_v3_cuda", ([&] { + size_t shared_size = + threads * sizeof(size_t) + threads * sizeof(int64_t); + NearestNeighborKernelD3<<>>( + p1.data_ptr(), + p2.data_ptr(), + idx.data_ptr(), + N, + P1, + P2); + })); } else { // Use the general kernel for all other D. AT_DISPATCH_FLOATING_TYPES( - p1.type(), "nearest_neighbor_v3_cuda", ([&] { + p1.scalar_type(), "nearest_neighbor_v3_cuda", ([&] { // To avoid misaligned memory access, the size of shared buffers // need to be rounded to the next even size. size_t D_2 = D + (D % 2); diff --git a/pytorch3d/csrc/nearest_neighbor_points/nearest_neighbor_points.h b/pytorch3d/csrc/nearest_neighbor_points/nearest_neighbor_points.h index 99f3a944..7b447233 100644 --- a/pytorch3d/csrc/nearest_neighbor_points/nearest_neighbor_points.h +++ b/pytorch3d/csrc/nearest_neighbor_points/nearest_neighbor_points.h @@ -29,7 +29,7 @@ at::Tensor NearestNeighborIdxCuda(at::Tensor p1, at::Tensor p2); // Implementation which is exposed. at::Tensor NearestNeighborIdx(at::Tensor p1, at::Tensor p2) { - if (p1.type().is_cuda() && p2.type().is_cuda()) { + if (p1.is_cuda() && p2.is_cuda()) { #ifdef WITH_CUDA CHECK_CONTIGUOUS_CUDA(p1); CHECK_CONTIGUOUS_CUDA(p2); diff --git a/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.cu b/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.cu index e4fb881e..92447f8d 100644 --- a/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.cu +++ b/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.cu @@ -128,7 +128,7 @@ at::Tensor PackedToPaddedCuda( const int blocks = batch_size; if (D == 1) { AT_DISPATCH_FLOATING_TYPES( - inputs_packed.type(), "packed_to_padded_d1_kernel", ([&] { + inputs_packed.scalar_type(), "packed_to_padded_d1_kernel", ([&] { PackedToPaddedKernelD1<<>>( inputs_packed.data_ptr(), first_idxs.data_ptr(), @@ -139,7 +139,7 @@ at::Tensor PackedToPaddedCuda( })); } else { AT_DISPATCH_FLOATING_TYPES( - inputs_packed.type(), "packed_to_padded_kernel", ([&] { + inputs_packed.scalar_type(), "packed_to_padded_kernel", ([&] { PackedToPaddedKernel<<>>( inputs_packed.data_ptr(), first_idxs.data_ptr(), @@ -175,7 +175,7 @@ at::Tensor PaddedToPackedCuda( if (D == 1) { AT_DISPATCH_FLOATING_TYPES( - inputs_padded.type(), "padded_to_packed_d1_kernel", ([&] { + inputs_padded.scalar_type(), "padded_to_packed_d1_kernel", ([&] { PaddedToPackedKernelD1<<>>( inputs_padded.data_ptr(), first_idxs.data_ptr(), @@ -186,7 +186,7 @@ at::Tensor PaddedToPackedCuda( })); } else { AT_DISPATCH_FLOATING_TYPES( - inputs_padded.type(), "padded_to_packed_kernel", ([&] { + inputs_padded.scalar_type(), "padded_to_packed_kernel", ([&] { PaddedToPackedKernel<<>>( inputs_padded.data_ptr(), first_idxs.data_ptr(), diff --git a/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.h b/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.h index f9ef6ed1..c272bb3e 100644 --- a/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.h +++ b/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.h @@ -72,7 +72,7 @@ at::Tensor PackedToPadded( const at::Tensor inputs_packed, const at::Tensor first_idxs, const int64_t max_size) { - if (inputs_packed.type().is_cuda()) { + if (inputs_packed.is_cuda()) { #ifdef WITH_CUDA return PackedToPaddedCuda(inputs_packed, first_idxs, max_size); #else @@ -87,7 +87,7 @@ at::Tensor PaddedToPacked( const at::Tensor inputs_padded, const at::Tensor first_idxs, const int64_t num_inputs) { - if (inputs_padded.type().is_cuda()) { + if (inputs_padded.is_cuda()) { #ifdef WITH_CUDA return PaddedToPackedCuda(inputs_padded, first_idxs, num_inputs); #else diff --git a/pytorch3d/csrc/pytorch3d_cutils.h b/pytorch3d/csrc/pytorch3d_cutils.h index 585de032..c8d2853e 100644 --- a/pytorch3d/csrc/pytorch3d_cutils.h +++ b/pytorch3d/csrc/pytorch3d_cutils.h @@ -3,8 +3,7 @@ #pragma once #include -#define CHECK_CUDA(x) \ - AT_ASSERTM(x.type().is_cuda(), #x "must be a CUDA tensor.") +#define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x "must be a CUDA tensor.") #define CHECK_CONTIGUOUS(x) \ AT_ASSERTM(x.is_contiguous(), #x "must be contiguous.") #define CHECK_CONTIGUOUS_CUDA(x) \ diff --git a/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.h b/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.h index 34173019..1131d986 100644 --- a/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.h +++ b/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.h @@ -82,7 +82,7 @@ RasterizeMeshesNaive( const int faces_per_pixel, const bool perspective_correct) { // TODO: Better type checking. - if (face_verts.type().is_cuda()) { + if (face_verts.is_cuda()) { #ifdef WITH_CUDA return RasterizeMeshesNaiveCuda( face_verts, @@ -160,7 +160,7 @@ torch::Tensor RasterizeMeshesBackward( const torch::Tensor& grad_bary, const torch::Tensor& grad_dists, const bool perspective_correct) { - if (face_verts.type().is_cuda()) { + if (face_verts.is_cuda()) { #ifdef WITH_CUDA return RasterizeMeshesBackwardCuda( face_verts, @@ -236,7 +236,7 @@ torch::Tensor RasterizeMeshesCoarse( const float blur_radius, const int bin_size, const int max_faces_per_bin) { - if (face_verts.type().is_cuda()) { + if (face_verts.is_cuda()) { #ifdef WITH_CUDA return RasterizeMeshesCoarseCuda( face_verts, @@ -322,7 +322,7 @@ RasterizeMeshesFine( const int bin_size, const int faces_per_pixel, const bool perspective_correct) { - if (face_verts.type().is_cuda()) { + if (face_verts.is_cuda()) { #ifdef WITH_CUDA return RasterizeMeshesFineCuda( face_verts, diff --git a/pytorch3d/csrc/rasterize_points/rasterize_points.h b/pytorch3d/csrc/rasterize_points/rasterize_points.h index e171db4b..ea59732f 100644 --- a/pytorch3d/csrc/rasterize_points/rasterize_points.h +++ b/pytorch3d/csrc/rasterize_points/rasterize_points.h @@ -63,8 +63,8 @@ std::tuple RasterizePointsNaive( const int image_size, const float radius, const int points_per_pixel) { - if (points.type().is_cuda() && cloud_to_packed_first_idx.type().is_cuda() && - num_points_per_cloud.type().is_cuda()) { + if (points.is_cuda() && cloud_to_packed_first_idx.is_cuda() && + num_points_per_cloud.is_cuda()) { #ifdef WITH_CUDA return RasterizePointsNaiveCuda( points, @@ -137,8 +137,8 @@ torch::Tensor RasterizePointsCoarse( const float radius, const int bin_size, const int max_points_per_bin) { - if (points.type().is_cuda() && cloud_to_packed_first_idx.type().is_cuda() && - num_points_per_cloud.type().is_cuda()) { + if (points.is_cuda() && cloud_to_packed_first_idx.is_cuda() && + num_points_per_cloud.is_cuda()) { #ifdef WITH_CUDA return RasterizePointsCoarseCuda( points, @@ -206,7 +206,7 @@ std::tuple RasterizePointsFine( const float radius, const int bin_size, const int points_per_pixel) { - if (points.type().is_cuda()) { + if (points.is_cuda()) { #ifdef WITH_CUDA return RasterizePointsFineCuda( points, bin_points, image_size, radius, bin_size, points_per_pixel); @@ -255,7 +255,7 @@ torch::Tensor RasterizePointsBackward( const torch::Tensor& idxs, const torch::Tensor& grad_zbuf, const torch::Tensor& grad_dists) { - if (points.type().is_cuda()) { + if (points.is_cuda()) { #ifdef WITH_CUDA return RasterizePointsBackwardCuda(points, idxs, grad_zbuf, grad_dists); #else