type() deprecated

Summary:
Replace `tensor.type().is_cuda()` with the preferred `tensor.is_cuda()`.
Replace `AT_DISPATCH_FLOATING_TYPES(tensor.type(), ...` with `AT_DISPATCH_FLOATING_TYPES(tensor.scalar_type(), ...`.
These avoid deprecation warnings in future pytorch.

Reviewed By: nikhilaravi

Differential Revision: D20646565

fbshipit-source-id: 1a0c15978c871af816b1dd7d4a7ea78242abd95e
This commit is contained in:
Jeremy Reizenstein 2020-03-26 03:59:14 -07:00 committed by Facebook GitHub Bot
parent e22d431e5b
commit 81a4aa18ad
13 changed files with 41 additions and 42 deletions

View File

@ -58,7 +58,7 @@ torch::Tensor alphaCompositeForward(
alphas = alphas.contiguous(); alphas = alphas.contiguous();
points_idx = points_idx.contiguous(); points_idx = points_idx.contiguous();
if (features.type().is_cuda()) { if (features.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
CHECK_CONTIGUOUS_CUDA(features); CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas); CHECK_CONTIGUOUS_CUDA(alphas);
@ -86,7 +86,7 @@ std::tuple<torch::Tensor, torch::Tensor> alphaCompositeBackward(
alphas = alphas.contiguous(); alphas = alphas.contiguous();
points_idx = points_idx.contiguous(); points_idx = points_idx.contiguous();
if (grad_outputs.type().is_cuda()) { if (grad_outputs.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
CHECK_CONTIGUOUS_CUDA(grad_outputs); CHECK_CONTIGUOUS_CUDA(grad_outputs);
CHECK_CONTIGUOUS_CUDA(features); CHECK_CONTIGUOUS_CUDA(features);

View File

@ -56,7 +56,7 @@ torch::Tensor weightedSumNormForward(
alphas = alphas.contiguous(); alphas = alphas.contiguous();
points_idx = points_idx.contiguous(); points_idx = points_idx.contiguous();
if (features.type().is_cuda()) { if (features.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
CHECK_CONTIGUOUS_CUDA(features); CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas); CHECK_CONTIGUOUS_CUDA(alphas);
@ -85,7 +85,7 @@ std::tuple<torch::Tensor, torch::Tensor> weightedSumNormBackward(
alphas = alphas.contiguous(); alphas = alphas.contiguous();
points_idx = points_idx.contiguous(); points_idx = points_idx.contiguous();
if (grad_outputs.type().is_cuda()) { if (grad_outputs.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
CHECK_CONTIGUOUS_CUDA(grad_outputs); CHECK_CONTIGUOUS_CUDA(grad_outputs);
CHECK_CONTIGUOUS_CUDA(features); CHECK_CONTIGUOUS_CUDA(features);

View File

@ -56,7 +56,7 @@ torch::Tensor weightedSumForward(
alphas = alphas.contiguous(); alphas = alphas.contiguous();
points_idx = points_idx.contiguous(); points_idx = points_idx.contiguous();
if (features.type().is_cuda()) { if (features.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
CHECK_CONTIGUOUS_CUDA(features); CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas); CHECK_CONTIGUOUS_CUDA(alphas);
@ -84,7 +84,7 @@ std::tuple<torch::Tensor, torch::Tensor> weightedSumBackward(
alphas = alphas.contiguous(); alphas = alphas.contiguous();
points_idx = points_idx.contiguous(); points_idx = points_idx.contiguous();
if (grad_outputs.type().is_cuda()) { if (grad_outputs.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
CHECK_CONTIGUOUS_CUDA(grad_outputs); CHECK_CONTIGUOUS_CUDA(grad_outputs);
CHECK_CONTIGUOUS_CUDA(features); CHECK_CONTIGUOUS_CUDA(features);

View File

@ -219,7 +219,7 @@ std::tuple<at::Tensor, at::Tensor> FaceAreasNormalsForwardCuda(
const int blocks = 64; const int blocks = 64;
const int threads = 512; const int threads = 512;
AT_DISPATCH_FLOATING_TYPES( AT_DISPATCH_FLOATING_TYPES(
verts.type(), "face_areas_normals_forward_cuda", ([&] { verts.scalar_type(), "face_areas_normals_forward_cuda", ([&] {
FaceAreasNormalsForwardKernel<scalar_t><<<blocks, threads>>>( FaceAreasNormalsForwardKernel<scalar_t><<<blocks, threads>>>(
verts.data_ptr<scalar_t>(), verts.data_ptr<scalar_t>(),
faces.data_ptr<int64_t>(), faces.data_ptr<int64_t>(),

View File

@ -44,7 +44,7 @@ at::Tensor FaceAreasNormalsBackwardCuda(
std::tuple<at::Tensor, at::Tensor> FaceAreasNormalsForward( std::tuple<at::Tensor, at::Tensor> FaceAreasNormalsForward(
const at::Tensor verts, const at::Tensor verts,
const at::Tensor faces) { const at::Tensor faces) {
if (verts.type().is_cuda() && faces.type().is_cuda()) { if (verts.is_cuda() && faces.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
return FaceAreasNormalsForwardCuda(verts, faces); return FaceAreasNormalsForwardCuda(verts, faces);
#else #else
@ -60,7 +60,7 @@ at::Tensor FaceAreasNormalsBackward(
const at::Tensor grad_normals, const at::Tensor grad_normals,
const at::Tensor verts, const at::Tensor verts,
const at::Tensor faces) { const at::Tensor faces) {
if (verts.type().is_cuda() && faces.type().is_cuda()) { if (verts.is_cuda() && faces.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
return FaceAreasNormalsBackwardCuda(grad_areas, grad_normals, verts, faces); return FaceAreasNormalsBackwardCuda(grad_areas, grad_normals, verts, faces);
#else #else

View File

@ -32,7 +32,7 @@ at::Tensor gather_scatter(
const at::Tensor edges, const at::Tensor edges,
bool directed, bool directed,
bool backward) { bool backward) {
if (input.type().is_cuda() && edges.type().is_cuda()) { if (input.is_cuda() && edges.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
return gather_scatter_cuda(input, edges, directed, backward); return gather_scatter_cuda(input, edges, directed, backward);
#else #else

View File

@ -228,11 +228,11 @@ at::Tensor NearestNeighborIdxCuda(at::Tensor p1, at::Tensor p2) {
if (D == 3) { if (D == 3) {
// Use the specialized kernel for D=3. // Use the specialized kernel for D=3.
AT_DISPATCH_FLOATING_TYPES(p1.type(), "nearest_neighbor_v3_cuda", ([&] { AT_DISPATCH_FLOATING_TYPES(
size_t shared_size = threads * sizeof(size_t) + p1.scalar_type(), "nearest_neighbor_v3_cuda", ([&] {
threads * sizeof(int64_t); size_t shared_size =
NearestNeighborKernelD3<scalar_t> threads * sizeof(size_t) + threads * sizeof(int64_t);
<<<blocks, threads, shared_size>>>( NearestNeighborKernelD3<scalar_t><<<blocks, threads, shared_size>>>(
p1.data_ptr<scalar_t>(), p1.data_ptr<scalar_t>(),
p2.data_ptr<scalar_t>(), p2.data_ptr<scalar_t>(),
idx.data_ptr<int64_t>(), idx.data_ptr<int64_t>(),
@ -243,7 +243,7 @@ at::Tensor NearestNeighborIdxCuda(at::Tensor p1, at::Tensor p2) {
} else { } else {
// Use the general kernel for all other D. // Use the general kernel for all other D.
AT_DISPATCH_FLOATING_TYPES( AT_DISPATCH_FLOATING_TYPES(
p1.type(), "nearest_neighbor_v3_cuda", ([&] { p1.scalar_type(), "nearest_neighbor_v3_cuda", ([&] {
// To avoid misaligned memory access, the size of shared buffers // To avoid misaligned memory access, the size of shared buffers
// need to be rounded to the next even size. // need to be rounded to the next even size.
size_t D_2 = D + (D % 2); size_t D_2 = D + (D % 2);

View File

@ -29,7 +29,7 @@ at::Tensor NearestNeighborIdxCuda(at::Tensor p1, at::Tensor p2);
// Implementation which is exposed. // Implementation which is exposed.
at::Tensor NearestNeighborIdx(at::Tensor p1, at::Tensor p2) { at::Tensor NearestNeighborIdx(at::Tensor p1, at::Tensor p2) {
if (p1.type().is_cuda() && p2.type().is_cuda()) { if (p1.is_cuda() && p2.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
CHECK_CONTIGUOUS_CUDA(p1); CHECK_CONTIGUOUS_CUDA(p1);
CHECK_CONTIGUOUS_CUDA(p2); CHECK_CONTIGUOUS_CUDA(p2);

View File

@ -128,7 +128,7 @@ at::Tensor PackedToPaddedCuda(
const int blocks = batch_size; const int blocks = batch_size;
if (D == 1) { if (D == 1) {
AT_DISPATCH_FLOATING_TYPES( AT_DISPATCH_FLOATING_TYPES(
inputs_packed.type(), "packed_to_padded_d1_kernel", ([&] { inputs_packed.scalar_type(), "packed_to_padded_d1_kernel", ([&] {
PackedToPaddedKernelD1<scalar_t><<<blocks, threads>>>( PackedToPaddedKernelD1<scalar_t><<<blocks, threads>>>(
inputs_packed.data_ptr<scalar_t>(), inputs_packed.data_ptr<scalar_t>(),
first_idxs.data_ptr<int64_t>(), first_idxs.data_ptr<int64_t>(),
@ -139,7 +139,7 @@ at::Tensor PackedToPaddedCuda(
})); }));
} else { } else {
AT_DISPATCH_FLOATING_TYPES( AT_DISPATCH_FLOATING_TYPES(
inputs_packed.type(), "packed_to_padded_kernel", ([&] { inputs_packed.scalar_type(), "packed_to_padded_kernel", ([&] {
PackedToPaddedKernel<scalar_t><<<blocks, threads>>>( PackedToPaddedKernel<scalar_t><<<blocks, threads>>>(
inputs_packed.data_ptr<scalar_t>(), inputs_packed.data_ptr<scalar_t>(),
first_idxs.data_ptr<int64_t>(), first_idxs.data_ptr<int64_t>(),
@ -175,7 +175,7 @@ at::Tensor PaddedToPackedCuda(
if (D == 1) { if (D == 1) {
AT_DISPATCH_FLOATING_TYPES( AT_DISPATCH_FLOATING_TYPES(
inputs_padded.type(), "padded_to_packed_d1_kernel", ([&] { inputs_padded.scalar_type(), "padded_to_packed_d1_kernel", ([&] {
PaddedToPackedKernelD1<scalar_t><<<blocks, threads>>>( PaddedToPackedKernelD1<scalar_t><<<blocks, threads>>>(
inputs_padded.data_ptr<scalar_t>(), inputs_padded.data_ptr<scalar_t>(),
first_idxs.data_ptr<int64_t>(), first_idxs.data_ptr<int64_t>(),
@ -186,7 +186,7 @@ at::Tensor PaddedToPackedCuda(
})); }));
} else { } else {
AT_DISPATCH_FLOATING_TYPES( AT_DISPATCH_FLOATING_TYPES(
inputs_padded.type(), "padded_to_packed_kernel", ([&] { inputs_padded.scalar_type(), "padded_to_packed_kernel", ([&] {
PaddedToPackedKernel<scalar_t><<<blocks, threads>>>( PaddedToPackedKernel<scalar_t><<<blocks, threads>>>(
inputs_padded.data_ptr<scalar_t>(), inputs_padded.data_ptr<scalar_t>(),
first_idxs.data_ptr<int64_t>(), first_idxs.data_ptr<int64_t>(),

View File

@ -72,7 +72,7 @@ at::Tensor PackedToPadded(
const at::Tensor inputs_packed, const at::Tensor inputs_packed,
const at::Tensor first_idxs, const at::Tensor first_idxs,
const int64_t max_size) { const int64_t max_size) {
if (inputs_packed.type().is_cuda()) { if (inputs_packed.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
return PackedToPaddedCuda(inputs_packed, first_idxs, max_size); return PackedToPaddedCuda(inputs_packed, first_idxs, max_size);
#else #else
@ -87,7 +87,7 @@ at::Tensor PaddedToPacked(
const at::Tensor inputs_padded, const at::Tensor inputs_padded,
const at::Tensor first_idxs, const at::Tensor first_idxs,
const int64_t num_inputs) { const int64_t num_inputs) {
if (inputs_padded.type().is_cuda()) { if (inputs_padded.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
return PaddedToPackedCuda(inputs_padded, first_idxs, num_inputs); return PaddedToPackedCuda(inputs_padded, first_idxs, num_inputs);
#else #else

View File

@ -3,8 +3,7 @@
#pragma once #pragma once
#include <torch/extension.h> #include <torch/extension.h>
#define CHECK_CUDA(x) \ #define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x "must be a CUDA tensor.")
AT_ASSERTM(x.type().is_cuda(), #x "must be a CUDA tensor.")
#define CHECK_CONTIGUOUS(x) \ #define CHECK_CONTIGUOUS(x) \
AT_ASSERTM(x.is_contiguous(), #x "must be contiguous.") AT_ASSERTM(x.is_contiguous(), #x "must be contiguous.")
#define CHECK_CONTIGUOUS_CUDA(x) \ #define CHECK_CONTIGUOUS_CUDA(x) \

View File

@ -82,7 +82,7 @@ RasterizeMeshesNaive(
const int faces_per_pixel, const int faces_per_pixel,
const bool perspective_correct) { const bool perspective_correct) {
// TODO: Better type checking. // TODO: Better type checking.
if (face_verts.type().is_cuda()) { if (face_verts.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
return RasterizeMeshesNaiveCuda( return RasterizeMeshesNaiveCuda(
face_verts, face_verts,
@ -160,7 +160,7 @@ torch::Tensor RasterizeMeshesBackward(
const torch::Tensor& grad_bary, const torch::Tensor& grad_bary,
const torch::Tensor& grad_dists, const torch::Tensor& grad_dists,
const bool perspective_correct) { const bool perspective_correct) {
if (face_verts.type().is_cuda()) { if (face_verts.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
return RasterizeMeshesBackwardCuda( return RasterizeMeshesBackwardCuda(
face_verts, face_verts,
@ -236,7 +236,7 @@ torch::Tensor RasterizeMeshesCoarse(
const float blur_radius, const float blur_radius,
const int bin_size, const int bin_size,
const int max_faces_per_bin) { const int max_faces_per_bin) {
if (face_verts.type().is_cuda()) { if (face_verts.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
return RasterizeMeshesCoarseCuda( return RasterizeMeshesCoarseCuda(
face_verts, face_verts,
@ -322,7 +322,7 @@ RasterizeMeshesFine(
const int bin_size, const int bin_size,
const int faces_per_pixel, const int faces_per_pixel,
const bool perspective_correct) { const bool perspective_correct) {
if (face_verts.type().is_cuda()) { if (face_verts.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
return RasterizeMeshesFineCuda( return RasterizeMeshesFineCuda(
face_verts, face_verts,

View File

@ -63,8 +63,8 @@ std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsNaive(
const int image_size, const int image_size,
const float radius, const float radius,
const int points_per_pixel) { const int points_per_pixel) {
if (points.type().is_cuda() && cloud_to_packed_first_idx.type().is_cuda() && if (points.is_cuda() && cloud_to_packed_first_idx.is_cuda() &&
num_points_per_cloud.type().is_cuda()) { num_points_per_cloud.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
return RasterizePointsNaiveCuda( return RasterizePointsNaiveCuda(
points, points,
@ -137,8 +137,8 @@ torch::Tensor RasterizePointsCoarse(
const float radius, const float radius,
const int bin_size, const int bin_size,
const int max_points_per_bin) { const int max_points_per_bin) {
if (points.type().is_cuda() && cloud_to_packed_first_idx.type().is_cuda() && if (points.is_cuda() && cloud_to_packed_first_idx.is_cuda() &&
num_points_per_cloud.type().is_cuda()) { num_points_per_cloud.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
return RasterizePointsCoarseCuda( return RasterizePointsCoarseCuda(
points, points,
@ -206,7 +206,7 @@ std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsFine(
const float radius, const float radius,
const int bin_size, const int bin_size,
const int points_per_pixel) { const int points_per_pixel) {
if (points.type().is_cuda()) { if (points.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
return RasterizePointsFineCuda( return RasterizePointsFineCuda(
points, bin_points, image_size, radius, bin_size, points_per_pixel); points, bin_points, image_size, radius, bin_size, points_per_pixel);
@ -255,7 +255,7 @@ torch::Tensor RasterizePointsBackward(
const torch::Tensor& idxs, const torch::Tensor& idxs,
const torch::Tensor& grad_zbuf, const torch::Tensor& grad_zbuf,
const torch::Tensor& grad_dists) { const torch::Tensor& grad_dists) {
if (points.type().is_cuda()) { if (points.is_cuda()) {
#ifdef WITH_CUDA #ifdef WITH_CUDA
return RasterizePointsBackwardCuda(points, idxs, grad_zbuf, grad_dists); return RasterizePointsBackwardCuda(points, idxs, grad_zbuf, grad_dists);
#else #else