From d689baac5ede7be237645518d1b0575f93ac1ceb Mon Sep 17 00:00:00 2001 From: Georgia Gkioxari Date: Wed, 20 May 2020 09:25:44 -0700 Subject: [PATCH] fix alpha compositing Summary: Fix division by zero when alpha is 1.0 In this case, the nominator is already 0 and we need to make sure division with 0 does not occur which would produce nans Reviewed By: nikhilaravi Differential Revision: D21650478 fbshipit-source-id: bc457105b3050fef1c8bd4e58e7d6d15c0c81ffd --- pytorch3d/csrc/compositing/alpha_composite.cu | 4 +++- pytorch3d/csrc/compositing/alpha_composite_cpu.cpp | 6 +++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/pytorch3d/csrc/compositing/alpha_composite.cu b/pytorch3d/csrc/compositing/alpha_composite.cu index d9c33bee..389b95f8 100644 --- a/pytorch3d/csrc/compositing/alpha_composite.cu +++ b/pytorch3d/csrc/compositing/alpha_composite.cu @@ -11,6 +11,8 @@ #include #include +__constant__ const float kEpsilon = 1e-9; + // TODO(gkioxari) support all data types once AtomicAdd supports doubles. // Currently, support is for floats only. __global__ void alphaCompositeCudaForwardKernel( @@ -126,7 +128,7 @@ __global__ void alphaCompositeCudaBackwardKernel( atomicAdd( &grad_alphas[batch][t][j][i], -grad_outputs[batch][ch][j][i] * features[ch][n_idx] * cum_alpha * - alpha / (1 - alpha_tvalue)); + alpha / (1 - alpha_tvalue + kEpsilon)); } cum_alpha = cum_alpha * (1 - alphas[batch][k][j][i]); diff --git a/pytorch3d/csrc/compositing/alpha_composite_cpu.cpp b/pytorch3d/csrc/compositing/alpha_composite_cpu.cpp index cc500c53..a10e0faa 100644 --- a/pytorch3d/csrc/compositing/alpha_composite_cpu.cpp +++ b/pytorch3d/csrc/compositing/alpha_composite_cpu.cpp @@ -5,6 +5,9 @@ #include #include +// Epsilon float +const float kEps = 1e-9; + torch::Tensor alphaCompositeCpuForward( const torch::Tensor& features, const torch::Tensor& alphas, @@ -101,7 +104,8 @@ std::tuple alphaCompositeCpuBackward( } float alpha_tvalue = alphas_a[b][t][j][i]; grad_alphas_a[b][t][j][i] -= grad_outputs_a[b][c][j][i] * - features_a[c][n_idx] * cum_alpha * alpha / (1 - alpha_tvalue); + features_a[c][n_idx] * cum_alpha * alpha / + (1 - alpha_tvalue + kEps); } cum_alpha = cum_alpha * (1 - alpha);