diff --git a/pytorch3d/csrc/compositing/alpha_composite.cu b/pytorch3d/csrc/compositing/alpha_composite.cu index d9c33bee..389b95f8 100644 --- a/pytorch3d/csrc/compositing/alpha_composite.cu +++ b/pytorch3d/csrc/compositing/alpha_composite.cu @@ -11,6 +11,8 @@ #include #include +__constant__ const float kEpsilon = 1e-9; + // TODO(gkioxari) support all data types once AtomicAdd supports doubles. // Currently, support is for floats only. __global__ void alphaCompositeCudaForwardKernel( @@ -126,7 +128,7 @@ __global__ void alphaCompositeCudaBackwardKernel( atomicAdd( &grad_alphas[batch][t][j][i], -grad_outputs[batch][ch][j][i] * features[ch][n_idx] * cum_alpha * - alpha / (1 - alpha_tvalue)); + alpha / (1 - alpha_tvalue + kEpsilon)); } cum_alpha = cum_alpha * (1 - alphas[batch][k][j][i]); diff --git a/pytorch3d/csrc/compositing/alpha_composite_cpu.cpp b/pytorch3d/csrc/compositing/alpha_composite_cpu.cpp index cc500c53..a10e0faa 100644 --- a/pytorch3d/csrc/compositing/alpha_composite_cpu.cpp +++ b/pytorch3d/csrc/compositing/alpha_composite_cpu.cpp @@ -5,6 +5,9 @@ #include #include +// Epsilon float +const float kEps = 1e-9; + torch::Tensor alphaCompositeCpuForward( const torch::Tensor& features, const torch::Tensor& alphas, @@ -101,7 +104,8 @@ std::tuple alphaCompositeCpuBackward( } float alpha_tvalue = alphas_a[b][t][j][i]; grad_alphas_a[b][t][j][i] -= grad_outputs_a[b][c][j][i] * - features_a[c][n_idx] * cum_alpha * alpha / (1 - alpha_tvalue); + features_a[c][n_idx] * cum_alpha * alpha / + (1 - alpha_tvalue + kEps); } cum_alpha = cum_alpha * (1 - alpha);