mirror of
				https://github.com/facebookresearch/pytorch3d.git
				synced 2025-11-04 18:02:14 +08:00 
			
		
		
		
	Error instead of crash for tensors on exotic devices (#1986)
Summary: Pull Request resolved: https://github.com/facebookresearch/pytorch3d/pull/1986 Adds device checks to prevent crashes on unsupported devices in PyTorch3D. Updates the `pytorch3d_cutils.h` file to include new macro CHECK_CPU that checks if a tensor is on the CPU device. This macro is then used in the directories from `ball_query` to `face_area_normals` to ensure that tensors are not on unsupported devices like MPS. Note that this is the first part of a larger change, and to keep diffs better organized, subsequent diffs will update the remaining directories. Reviewed By: bottler Differential Revision: D77473296 fbshipit-source-id: 13dc84620dee667bddebad1dade2d2cb5a59c737
This commit is contained in:
		
							parent
							
								
									71db7a0ea2
								
							
						
					
					
						commit
						177eec6378
					
				@ -81,6 +81,8 @@ inline std::tuple<at::Tensor, at::Tensor> BallQuery(
 | 
			
		||||
    AT_ERROR("Not compiled with GPU support.");
 | 
			
		||||
#endif
 | 
			
		||||
  }
 | 
			
		||||
  CHECK_CPU(p1);
 | 
			
		||||
  CHECK_CPU(p2);
 | 
			
		||||
  return BallQueryCpu(
 | 
			
		||||
      p1.contiguous(),
 | 
			
		||||
      p2.contiguous(),
 | 
			
		||||
 | 
			
		||||
@ -98,6 +98,11 @@ at::Tensor SigmoidAlphaBlendBackward(
 | 
			
		||||
    AT_ERROR("Not compiled with GPU support.");
 | 
			
		||||
#endif
 | 
			
		||||
  }
 | 
			
		||||
  CHECK_CPU(distances);
 | 
			
		||||
  CHECK_CPU(pix_to_face);
 | 
			
		||||
  CHECK_CPU(alphas);
 | 
			
		||||
  CHECK_CPU(grad_alphas);
 | 
			
		||||
 | 
			
		||||
  return SigmoidAlphaBlendBackwardCpu(
 | 
			
		||||
      grad_alphas, alphas, distances, pix_to_face, sigma);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -74,6 +74,9 @@ torch::Tensor alphaCompositeForward(
 | 
			
		||||
    AT_ERROR("Not compiled with GPU support");
 | 
			
		||||
#endif
 | 
			
		||||
  } else {
 | 
			
		||||
    CHECK_CPU(features);
 | 
			
		||||
    CHECK_CPU(alphas);
 | 
			
		||||
    CHECK_CPU(points_idx);
 | 
			
		||||
    return alphaCompositeCpuForward(features, alphas, points_idx);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@ -101,6 +104,11 @@ std::tuple<torch::Tensor, torch::Tensor> alphaCompositeBackward(
 | 
			
		||||
    AT_ERROR("Not compiled with GPU support");
 | 
			
		||||
#endif
 | 
			
		||||
  } else {
 | 
			
		||||
    CHECK_CPU(grad_outputs);
 | 
			
		||||
    CHECK_CPU(features);
 | 
			
		||||
    CHECK_CPU(alphas);
 | 
			
		||||
    CHECK_CPU(points_idx);
 | 
			
		||||
 | 
			
		||||
    return alphaCompositeCpuBackward(
 | 
			
		||||
        grad_outputs, features, alphas, points_idx);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
@ -73,6 +73,10 @@ torch::Tensor weightedSumNormForward(
 | 
			
		||||
    AT_ERROR("Not compiled with GPU support");
 | 
			
		||||
#endif
 | 
			
		||||
  } else {
 | 
			
		||||
    CHECK_CPU(features);
 | 
			
		||||
    CHECK_CPU(alphas);
 | 
			
		||||
    CHECK_CPU(points_idx);
 | 
			
		||||
 | 
			
		||||
    return weightedSumNormCpuForward(features, alphas, points_idx);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@ -100,6 +104,11 @@ std::tuple<torch::Tensor, torch::Tensor> weightedSumNormBackward(
 | 
			
		||||
    AT_ERROR("Not compiled with GPU support");
 | 
			
		||||
#endif
 | 
			
		||||
  } else {
 | 
			
		||||
    CHECK_CPU(grad_outputs);
 | 
			
		||||
    CHECK_CPU(features);
 | 
			
		||||
    CHECK_CPU(alphas);
 | 
			
		||||
    CHECK_CPU(points_idx);
 | 
			
		||||
 | 
			
		||||
    return weightedSumNormCpuBackward(
 | 
			
		||||
        grad_outputs, features, alphas, points_idx);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
@ -72,6 +72,9 @@ torch::Tensor weightedSumForward(
 | 
			
		||||
    AT_ERROR("Not compiled with GPU support");
 | 
			
		||||
#endif
 | 
			
		||||
  } else {
 | 
			
		||||
    CHECK_CPU(features);
 | 
			
		||||
    CHECK_CPU(alphas);
 | 
			
		||||
    CHECK_CPU(points_idx);
 | 
			
		||||
    return weightedSumCpuForward(features, alphas, points_idx);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@ -98,6 +101,11 @@ std::tuple<torch::Tensor, torch::Tensor> weightedSumBackward(
 | 
			
		||||
    AT_ERROR("Not compiled with GPU support");
 | 
			
		||||
#endif
 | 
			
		||||
  } else {
 | 
			
		||||
    CHECK_CPU(grad_outputs);
 | 
			
		||||
    CHECK_CPU(features);
 | 
			
		||||
    CHECK_CPU(alphas);
 | 
			
		||||
    CHECK_CPU(points_idx);
 | 
			
		||||
 | 
			
		||||
    return weightedSumCpuBackward(grad_outputs, features, alphas, points_idx);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -60,6 +60,8 @@ std::tuple<at::Tensor, at::Tensor> FaceAreasNormalsForward(
 | 
			
		||||
    AT_ERROR("Not compiled with GPU support.");
 | 
			
		||||
#endif
 | 
			
		||||
  }
 | 
			
		||||
  CHECK_CPU(verts);
 | 
			
		||||
  CHECK_CPU(faces);
 | 
			
		||||
  return FaceAreasNormalsForwardCpu(verts, faces);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -80,5 +82,9 @@ at::Tensor FaceAreasNormalsBackward(
 | 
			
		||||
    AT_ERROR("Not compiled with GPU support.");
 | 
			
		||||
#endif
 | 
			
		||||
  }
 | 
			
		||||
  CHECK_CPU(grad_areas);
 | 
			
		||||
  CHECK_CPU(grad_normals);
 | 
			
		||||
  CHECK_CPU(verts);
 | 
			
		||||
  CHECK_CPU(faces);
 | 
			
		||||
  return FaceAreasNormalsBackwardCpu(grad_areas, grad_normals, verts, faces);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -15,3 +15,7 @@
 | 
			
		||||
#define CHECK_CONTIGUOUS_CUDA(x) \
 | 
			
		||||
  CHECK_CUDA(x);                 \
 | 
			
		||||
  CHECK_CONTIGUOUS(x)
 | 
			
		||||
#define CHECK_CPU(x)                    \
 | 
			
		||||
  TORCH_CHECK(                          \
 | 
			
		||||
      x.device().type() == torch::kCPU, \
 | 
			
		||||
      "Cannot use CPU implementation: " #x " not on CPU.")
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user