type() deprecated

Summary:
Replace `tensor.type().is_cuda()` with the preferred `tensor.is_cuda()`.
Replace `AT_DISPATCH_FLOATING_TYPES(tensor.type(), ...` with `AT_DISPATCH_FLOATING_TYPES(tensor.scalar_type(), ...`.
These avoid deprecation warnings in future pytorch.

Reviewed By: nikhilaravi

Differential Revision: D20646565

fbshipit-source-id: 1a0c15978c871af816b1dd7d4a7ea78242abd95e
This commit is contained in:
Jeremy Reizenstein 2020-03-26 03:59:14 -07:00 committed by Facebook GitHub Bot
parent e22d431e5b
commit 81a4aa18ad
13 changed files with 41 additions and 42 deletions

View File

@ -58,7 +58,7 @@ torch::Tensor alphaCompositeForward(
alphas = alphas.contiguous();
points_idx = points_idx.contiguous();
if (features.type().is_cuda()) {
if (features.is_cuda()) {
#ifdef WITH_CUDA
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
@ -86,7 +86,7 @@ std::tuple<torch::Tensor, torch::Tensor> alphaCompositeBackward(
alphas = alphas.contiguous();
points_idx = points_idx.contiguous();
if (grad_outputs.type().is_cuda()) {
if (grad_outputs.is_cuda()) {
#ifdef WITH_CUDA
CHECK_CONTIGUOUS_CUDA(grad_outputs);
CHECK_CONTIGUOUS_CUDA(features);

View File

@ -56,7 +56,7 @@ torch::Tensor weightedSumNormForward(
alphas = alphas.contiguous();
points_idx = points_idx.contiguous();
if (features.type().is_cuda()) {
if (features.is_cuda()) {
#ifdef WITH_CUDA
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
@ -85,7 +85,7 @@ std::tuple<torch::Tensor, torch::Tensor> weightedSumNormBackward(
alphas = alphas.contiguous();
points_idx = points_idx.contiguous();
if (grad_outputs.type().is_cuda()) {
if (grad_outputs.is_cuda()) {
#ifdef WITH_CUDA
CHECK_CONTIGUOUS_CUDA(grad_outputs);
CHECK_CONTIGUOUS_CUDA(features);

View File

@ -56,7 +56,7 @@ torch::Tensor weightedSumForward(
alphas = alphas.contiguous();
points_idx = points_idx.contiguous();
if (features.type().is_cuda()) {
if (features.is_cuda()) {
#ifdef WITH_CUDA
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
@ -84,7 +84,7 @@ std::tuple<torch::Tensor, torch::Tensor> weightedSumBackward(
alphas = alphas.contiguous();
points_idx = points_idx.contiguous();
if (grad_outputs.type().is_cuda()) {
if (grad_outputs.is_cuda()) {
#ifdef WITH_CUDA
CHECK_CONTIGUOUS_CUDA(grad_outputs);
CHECK_CONTIGUOUS_CUDA(features);

View File

@ -219,7 +219,7 @@ std::tuple<at::Tensor, at::Tensor> FaceAreasNormalsForwardCuda(
const int blocks = 64;
const int threads = 512;
AT_DISPATCH_FLOATING_TYPES(
verts.type(), "face_areas_normals_forward_cuda", ([&] {
verts.scalar_type(), "face_areas_normals_forward_cuda", ([&] {
FaceAreasNormalsForwardKernel<scalar_t><<<blocks, threads>>>(
verts.data_ptr<scalar_t>(),
faces.data_ptr<int64_t>(),

View File

@ -44,7 +44,7 @@ at::Tensor FaceAreasNormalsBackwardCuda(
std::tuple<at::Tensor, at::Tensor> FaceAreasNormalsForward(
const at::Tensor verts,
const at::Tensor faces) {
if (verts.type().is_cuda() && faces.type().is_cuda()) {
if (verts.is_cuda() && faces.is_cuda()) {
#ifdef WITH_CUDA
return FaceAreasNormalsForwardCuda(verts, faces);
#else
@ -60,7 +60,7 @@ at::Tensor FaceAreasNormalsBackward(
const at::Tensor grad_normals,
const at::Tensor verts,
const at::Tensor faces) {
if (verts.type().is_cuda() && faces.type().is_cuda()) {
if (verts.is_cuda() && faces.is_cuda()) {
#ifdef WITH_CUDA
return FaceAreasNormalsBackwardCuda(grad_areas, grad_normals, verts, faces);
#else

View File

@ -32,7 +32,7 @@ at::Tensor gather_scatter(
const at::Tensor edges,
bool directed,
bool backward) {
if (input.type().is_cuda() && edges.type().is_cuda()) {
if (input.is_cuda() && edges.is_cuda()) {
#ifdef WITH_CUDA
return gather_scatter_cuda(input, edges, directed, backward);
#else

View File

@ -228,22 +228,22 @@ at::Tensor NearestNeighborIdxCuda(at::Tensor p1, at::Tensor p2) {
if (D == 3) {
// Use the specialized kernel for D=3.
AT_DISPATCH_FLOATING_TYPES(p1.type(), "nearest_neighbor_v3_cuda", ([&] {
size_t shared_size = threads * sizeof(size_t) +
threads * sizeof(int64_t);
NearestNeighborKernelD3<scalar_t>
<<<blocks, threads, shared_size>>>(
p1.data_ptr<scalar_t>(),
p2.data_ptr<scalar_t>(),
idx.data_ptr<int64_t>(),
N,
P1,
P2);
}));
AT_DISPATCH_FLOATING_TYPES(
p1.scalar_type(), "nearest_neighbor_v3_cuda", ([&] {
size_t shared_size =
threads * sizeof(size_t) + threads * sizeof(int64_t);
NearestNeighborKernelD3<scalar_t><<<blocks, threads, shared_size>>>(
p1.data_ptr<scalar_t>(),
p2.data_ptr<scalar_t>(),
idx.data_ptr<int64_t>(),
N,
P1,
P2);
}));
} else {
// Use the general kernel for all other D.
AT_DISPATCH_FLOATING_TYPES(
p1.type(), "nearest_neighbor_v3_cuda", ([&] {
p1.scalar_type(), "nearest_neighbor_v3_cuda", ([&] {
// To avoid misaligned memory access, the size of shared buffers
// need to be rounded to the next even size.
size_t D_2 = D + (D % 2);

View File

@ -29,7 +29,7 @@ at::Tensor NearestNeighborIdxCuda(at::Tensor p1, at::Tensor p2);
// Implementation which is exposed.
at::Tensor NearestNeighborIdx(at::Tensor p1, at::Tensor p2) {
if (p1.type().is_cuda() && p2.type().is_cuda()) {
if (p1.is_cuda() && p2.is_cuda()) {
#ifdef WITH_CUDA
CHECK_CONTIGUOUS_CUDA(p1);
CHECK_CONTIGUOUS_CUDA(p2);

View File

@ -128,7 +128,7 @@ at::Tensor PackedToPaddedCuda(
const int blocks = batch_size;
if (D == 1) {
AT_DISPATCH_FLOATING_TYPES(
inputs_packed.type(), "packed_to_padded_d1_kernel", ([&] {
inputs_packed.scalar_type(), "packed_to_padded_d1_kernel", ([&] {
PackedToPaddedKernelD1<scalar_t><<<blocks, threads>>>(
inputs_packed.data_ptr<scalar_t>(),
first_idxs.data_ptr<int64_t>(),
@ -139,7 +139,7 @@ at::Tensor PackedToPaddedCuda(
}));
} else {
AT_DISPATCH_FLOATING_TYPES(
inputs_packed.type(), "packed_to_padded_kernel", ([&] {
inputs_packed.scalar_type(), "packed_to_padded_kernel", ([&] {
PackedToPaddedKernel<scalar_t><<<blocks, threads>>>(
inputs_packed.data_ptr<scalar_t>(),
first_idxs.data_ptr<int64_t>(),
@ -175,7 +175,7 @@ at::Tensor PaddedToPackedCuda(
if (D == 1) {
AT_DISPATCH_FLOATING_TYPES(
inputs_padded.type(), "padded_to_packed_d1_kernel", ([&] {
inputs_padded.scalar_type(), "padded_to_packed_d1_kernel", ([&] {
PaddedToPackedKernelD1<scalar_t><<<blocks, threads>>>(
inputs_padded.data_ptr<scalar_t>(),
first_idxs.data_ptr<int64_t>(),
@ -186,7 +186,7 @@ at::Tensor PaddedToPackedCuda(
}));
} else {
AT_DISPATCH_FLOATING_TYPES(
inputs_padded.type(), "padded_to_packed_kernel", ([&] {
inputs_padded.scalar_type(), "padded_to_packed_kernel", ([&] {
PaddedToPackedKernel<scalar_t><<<blocks, threads>>>(
inputs_padded.data_ptr<scalar_t>(),
first_idxs.data_ptr<int64_t>(),

View File

@ -72,7 +72,7 @@ at::Tensor PackedToPadded(
const at::Tensor inputs_packed,
const at::Tensor first_idxs,
const int64_t max_size) {
if (inputs_packed.type().is_cuda()) {
if (inputs_packed.is_cuda()) {
#ifdef WITH_CUDA
return PackedToPaddedCuda(inputs_packed, first_idxs, max_size);
#else
@ -87,7 +87,7 @@ at::Tensor PaddedToPacked(
const at::Tensor inputs_padded,
const at::Tensor first_idxs,
const int64_t num_inputs) {
if (inputs_padded.type().is_cuda()) {
if (inputs_padded.is_cuda()) {
#ifdef WITH_CUDA
return PaddedToPackedCuda(inputs_padded, first_idxs, num_inputs);
#else

View File

@ -3,8 +3,7 @@
#pragma once
#include <torch/extension.h>
#define CHECK_CUDA(x) \
AT_ASSERTM(x.type().is_cuda(), #x "must be a CUDA tensor.")
#define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x "must be a CUDA tensor.")
#define CHECK_CONTIGUOUS(x) \
AT_ASSERTM(x.is_contiguous(), #x "must be contiguous.")
#define CHECK_CONTIGUOUS_CUDA(x) \

View File

@ -82,7 +82,7 @@ RasterizeMeshesNaive(
const int faces_per_pixel,
const bool perspective_correct) {
// TODO: Better type checking.
if (face_verts.type().is_cuda()) {
if (face_verts.is_cuda()) {
#ifdef WITH_CUDA
return RasterizeMeshesNaiveCuda(
face_verts,
@ -160,7 +160,7 @@ torch::Tensor RasterizeMeshesBackward(
const torch::Tensor& grad_bary,
const torch::Tensor& grad_dists,
const bool perspective_correct) {
if (face_verts.type().is_cuda()) {
if (face_verts.is_cuda()) {
#ifdef WITH_CUDA
return RasterizeMeshesBackwardCuda(
face_verts,
@ -236,7 +236,7 @@ torch::Tensor RasterizeMeshesCoarse(
const float blur_radius,
const int bin_size,
const int max_faces_per_bin) {
if (face_verts.type().is_cuda()) {
if (face_verts.is_cuda()) {
#ifdef WITH_CUDA
return RasterizeMeshesCoarseCuda(
face_verts,
@ -322,7 +322,7 @@ RasterizeMeshesFine(
const int bin_size,
const int faces_per_pixel,
const bool perspective_correct) {
if (face_verts.type().is_cuda()) {
if (face_verts.is_cuda()) {
#ifdef WITH_CUDA
return RasterizeMeshesFineCuda(
face_verts,

View File

@ -63,8 +63,8 @@ std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsNaive(
const int image_size,
const float radius,
const int points_per_pixel) {
if (points.type().is_cuda() && cloud_to_packed_first_idx.type().is_cuda() &&
num_points_per_cloud.type().is_cuda()) {
if (points.is_cuda() && cloud_to_packed_first_idx.is_cuda() &&
num_points_per_cloud.is_cuda()) {
#ifdef WITH_CUDA
return RasterizePointsNaiveCuda(
points,
@ -137,8 +137,8 @@ torch::Tensor RasterizePointsCoarse(
const float radius,
const int bin_size,
const int max_points_per_bin) {
if (points.type().is_cuda() && cloud_to_packed_first_idx.type().is_cuda() &&
num_points_per_cloud.type().is_cuda()) {
if (points.is_cuda() && cloud_to_packed_first_idx.is_cuda() &&
num_points_per_cloud.is_cuda()) {
#ifdef WITH_CUDA
return RasterizePointsCoarseCuda(
points,
@ -206,7 +206,7 @@ std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsFine(
const float radius,
const int bin_size,
const int points_per_pixel) {
if (points.type().is_cuda()) {
if (points.is_cuda()) {
#ifdef WITH_CUDA
return RasterizePointsFineCuda(
points, bin_points, image_size, radius, bin_size, points_per_pixel);
@ -255,7 +255,7 @@ torch::Tensor RasterizePointsBackward(
const torch::Tensor& idxs,
const torch::Tensor& grad_zbuf,
const torch::Tensor& grad_dists) {
if (points.type().is_cuda()) {
if (points.is_cuda()) {
#ifdef WITH_CUDA
return RasterizePointsBackwardCuda(points, idxs, grad_zbuf, grad_dists);
#else