mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2026-04-07 13:05:58 +08:00
Initial commit
fbshipit-source-id: ad58e416e3ceeca85fae0583308968d04e78fe0d
This commit is contained in:
27
pytorch3d/csrc/ext.cpp
Normal file
27
pytorch3d/csrc/ext.cpp
Normal file
@@ -0,0 +1,27 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#include <torch/extension.h>
|
||||
#include "face_areas_normals/face_areas_normals.h"
|
||||
#include "gather_scatter/gather_scatter.h"
|
||||
#include "nearest_neighbor_points/nearest_neighbor_points.h"
|
||||
#include "packed_to_padded_tensor/packed_to_padded_tensor.h"
|
||||
#include "rasterize_meshes/rasterize_meshes.h"
|
||||
#include "rasterize_points/rasterize_points.h"
|
||||
|
||||
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
||||
m.def("face_areas_normals", &face_areas_normals);
|
||||
m.def("packed_to_padded_tensor", &packed_to_padded_tensor);
|
||||
m.def("nn_points_idx", &nn_points_idx);
|
||||
m.def("gather_scatter", &gather_scatter);
|
||||
m.def("rasterize_points", &RasterizePoints);
|
||||
m.def("rasterize_points_backward", &RasterizePointsBackward);
|
||||
m.def("rasterize_meshes_backward", &RasterizeMeshesBackward);
|
||||
m.def("rasterize_meshes", &RasterizeMeshes);
|
||||
|
||||
// These are only visible for testing; users should not call them directly
|
||||
m.def("_rasterize_points_coarse", &RasterizePointsCoarse);
|
||||
m.def("_rasterize_points_naive", &RasterizePointsNaive);
|
||||
m.def("_rasterize_meshes_naive", &RasterizeMeshesNaive);
|
||||
m.def("_rasterize_meshes_coarse", &RasterizeMeshesCoarse);
|
||||
m.def("_rasterize_meshes_fine", &RasterizeMeshesFine);
|
||||
}
|
||||
80
pytorch3d/csrc/face_areas_normals/face_areas_normals.cu
Normal file
80
pytorch3d/csrc/face_areas_normals/face_areas_normals.cu
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#include <ATen/ATen.h>
|
||||
#include <tuple>
|
||||
|
||||
template <typename scalar_t>
|
||||
__global__ void face_areas_kernel(
|
||||
const scalar_t* __restrict__ verts,
|
||||
const long* __restrict__ faces,
|
||||
scalar_t* __restrict__ face_areas,
|
||||
scalar_t* __restrict__ face_normals,
|
||||
const size_t V,
|
||||
const size_t F) {
|
||||
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const size_t stride = gridDim.x * blockDim.x;
|
||||
|
||||
// Faces split evenly over the number of threads in the grid.
|
||||
// Each thread computes the area & normal of its respective faces and adds it
|
||||
// to the global face_areas tensor.
|
||||
for (size_t f = tid; f < F; f += stride) {
|
||||
const long i0 = faces[3 * f + 0];
|
||||
const long i1 = faces[3 * f + 1];
|
||||
const long i2 = faces[3 * f + 2];
|
||||
|
||||
const scalar_t v0_x = verts[3 * i0 + 0];
|
||||
const scalar_t v0_y = verts[3 * i0 + 1];
|
||||
const scalar_t v0_z = verts[3 * i0 + 2];
|
||||
|
||||
const scalar_t v1_x = verts[3 * i1 + 0];
|
||||
const scalar_t v1_y = verts[3 * i1 + 1];
|
||||
const scalar_t v1_z = verts[3 * i1 + 2];
|
||||
|
||||
const scalar_t v2_x = verts[3 * i2 + 0];
|
||||
const scalar_t v2_y = verts[3 * i2 + 1];
|
||||
const scalar_t v2_z = verts[3 * i2 + 2];
|
||||
|
||||
const scalar_t ax = v1_x - v0_x;
|
||||
const scalar_t ay = v1_y - v0_y;
|
||||
const scalar_t az = v1_z - v0_z;
|
||||
|
||||
const scalar_t bx = v2_x - v0_x;
|
||||
const scalar_t by = v2_y - v0_y;
|
||||
const scalar_t bz = v2_z - v0_z;
|
||||
|
||||
const scalar_t cx = ay * bz - az * by;
|
||||
const scalar_t cy = az * bx - ax * bz;
|
||||
const scalar_t cz = ax * by - ay * bx;
|
||||
|
||||
scalar_t norm = sqrt(cx * cx + cy * cy + cz * cz);
|
||||
face_areas[f] = norm / 2.0;
|
||||
norm = (norm < 1e-6) ? 1e-6 : norm; // max(norm, 1e-6)
|
||||
face_normals[3 * f + 0] = cx / norm;
|
||||
face_normals[3 * f + 1] = cy / norm;
|
||||
face_normals[3 * f + 2] = cz / norm;
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<at::Tensor, at::Tensor> face_areas_cuda(
|
||||
at::Tensor verts,
|
||||
at::Tensor faces) {
|
||||
const auto V = verts.size(0);
|
||||
const auto F = faces.size(0);
|
||||
|
||||
at::Tensor areas = at::empty({F}, verts.options());
|
||||
at::Tensor normals = at::empty({F, 3}, verts.options());
|
||||
|
||||
const int blocks = 64;
|
||||
const int threads = 512;
|
||||
AT_DISPATCH_FLOATING_TYPES(verts.type(), "face_areas_kernel", ([&] {
|
||||
face_areas_kernel<scalar_t><<<blocks, threads>>>(
|
||||
verts.data_ptr<scalar_t>(),
|
||||
faces.data_ptr<long>(),
|
||||
areas.data_ptr<scalar_t>(),
|
||||
normals.data_ptr<scalar_t>(),
|
||||
V,
|
||||
F);
|
||||
}));
|
||||
|
||||
return std::make_tuple(areas, normals);
|
||||
}
|
||||
36
pytorch3d/csrc/face_areas_normals/face_areas_normals.h
Normal file
36
pytorch3d/csrc/face_areas_normals/face_areas_normals.h
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
#include <torch/extension.h>
|
||||
#include <tuple>
|
||||
|
||||
// Compute areas of mesh faces using packed representation.
|
||||
//
|
||||
// Inputs:
|
||||
// verts: FloatTensor of shape (V, 3) giving vertex positions.
|
||||
// faces: LongTensor of shape (F, 3) giving faces.
|
||||
//
|
||||
// Returns:
|
||||
// areas: FloatTensor of shape (F,) where areas[f] is the area of faces[f].
|
||||
// normals: FloatTensor of shape (F, 3) where normals[f] is the normal of
|
||||
// faces[f]
|
||||
//
|
||||
|
||||
// Cuda implementation.
|
||||
std::tuple<at::Tensor, at::Tensor> face_areas_cuda(
|
||||
at::Tensor verts,
|
||||
at::Tensor faces);
|
||||
|
||||
// Implementation which is exposed.
|
||||
std::tuple<at::Tensor, at::Tensor> face_areas_normals(
|
||||
at::Tensor verts,
|
||||
at::Tensor faces) {
|
||||
if (verts.type().is_cuda() && faces.type().is_cuda()) {
|
||||
#ifdef WITH_CUDA
|
||||
return face_areas_cuda(verts, faces);
|
||||
#else
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
AT_ERROR("Not implemented on the CPU.");
|
||||
}
|
||||
69
pytorch3d/csrc/gather_scatter/gather_scatter.cu
Normal file
69
pytorch3d/csrc/gather_scatter/gather_scatter.cu
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#include <ATen/ATen.h>
|
||||
|
||||
// TODO(T47953967) to make this cuda kernel support all datatypes.
|
||||
__global__ void gather_scatter_kernel(
|
||||
const float* __restrict__ input,
|
||||
const long* __restrict__ edges,
|
||||
float* __restrict__ output,
|
||||
bool directed,
|
||||
bool backward,
|
||||
const size_t V,
|
||||
const size_t D,
|
||||
const size_t E) {
|
||||
const int tid = threadIdx.x;
|
||||
|
||||
// Reverse the vertex order if backward.
|
||||
const int v0_idx = backward ? 1 : 0;
|
||||
const int v1_idx = backward ? 0 : 1;
|
||||
|
||||
// Edges are split evenly across the blocks.
|
||||
for (int e = blockIdx.x; e < E; e += gridDim.x) {
|
||||
// Get indices of vertices which form the edge.
|
||||
const long v0 = edges[2 * e + v0_idx];
|
||||
const long v1 = edges[2 * e + v1_idx];
|
||||
|
||||
// Split vertex features evenly across threads.
|
||||
// This implementation will be quite wasteful when D<128 since there will be
|
||||
// a lot of threads doing nothing.
|
||||
for (int d = tid; d < D; d += blockDim.x) {
|
||||
const float val = input[v1 * D + d];
|
||||
float* address = output + v0 * D + d;
|
||||
atomicAdd(address, val);
|
||||
if (!directed) {
|
||||
const float val = input[v0 * D + d];
|
||||
float* address = output + v1 * D + d;
|
||||
atomicAdd(address, val);
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
|
||||
at::Tensor gather_scatter_cuda(
|
||||
const at::Tensor input,
|
||||
const at::Tensor edges,
|
||||
bool directed,
|
||||
bool backward) {
|
||||
const auto num_vertices = input.size(0);
|
||||
const auto input_feature_dim = input.size(1);
|
||||
const auto num_edges = edges.size(0);
|
||||
|
||||
auto output = at::zeros({num_vertices, input_feature_dim}, input.options());
|
||||
const size_t threads = 128;
|
||||
const size_t max_blocks = 1920;
|
||||
const size_t blocks = num_edges < max_blocks ? num_edges : max_blocks;
|
||||
|
||||
gather_scatter_kernel<<<blocks, threads>>>(
|
||||
input.data_ptr<float>(),
|
||||
edges.data_ptr<long>(),
|
||||
output.data_ptr<float>(),
|
||||
directed,
|
||||
backward,
|
||||
num_vertices,
|
||||
input_feature_dim,
|
||||
num_edges);
|
||||
|
||||
return output;
|
||||
}
|
||||
43
pytorch3d/csrc/gather_scatter/gather_scatter.h
Normal file
43
pytorch3d/csrc/gather_scatter/gather_scatter.h
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
#include <torch/extension.h>
|
||||
|
||||
// Fused gather scatter operation for aggregating features of neighbor nodes
|
||||
// in a graph. This gather scatter operation is specific to graphs as edge
|
||||
// indices are used as input.
|
||||
//
|
||||
// Args:
|
||||
// input: float32 Tensor of shape (V, D) where V is the number of vertices
|
||||
// and D is the feature dimension.
|
||||
// edges: int64 Tensor of shape (E, 2) giving the indices of the vertices that
|
||||
// make up the edge. E is the number of edges.
|
||||
// directed: Bool indicating if edges in the graph are directed. For a
|
||||
// directed graph v0 -> v1 the updated feature for v0 depends on v1.
|
||||
// backward: Bool indicating if the operation is the backward pass.
|
||||
//
|
||||
// Returns:
|
||||
// output: float32 Tensor of same shape as input.
|
||||
|
||||
// Cuda implementation.
|
||||
at::Tensor gather_scatter_cuda(
|
||||
const at::Tensor input,
|
||||
const at::Tensor edges,
|
||||
bool directed,
|
||||
bool backward);
|
||||
|
||||
// Exposed implementation.
|
||||
at::Tensor gather_scatter(
|
||||
const at::Tensor input,
|
||||
const at::Tensor edges,
|
||||
bool directed,
|
||||
bool backward) {
|
||||
if (input.type().is_cuda() && edges.type().is_cuda()) {
|
||||
#ifdef WITH_CUDA
|
||||
return gather_scatter_cuda(input, edges, directed, backward);
|
||||
#else
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
AT_ERROR("Not implemented on the CPU");
|
||||
}
|
||||
@@ -0,0 +1,265 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#include <ATen/ATen.h>
|
||||
#include <float.h>
|
||||
|
||||
template <typename scalar_t>
|
||||
__device__ void warp_reduce(
|
||||
volatile scalar_t* min_dists,
|
||||
volatile long* min_idxs,
|
||||
const size_t tid) {
|
||||
// s = 32
|
||||
if (min_dists[tid] > min_dists[tid + 32]) {
|
||||
min_idxs[tid] = min_idxs[tid + 32];
|
||||
min_dists[tid] = min_dists[tid + 32];
|
||||
}
|
||||
// s = 16
|
||||
if (min_dists[tid] > min_dists[tid + 16]) {
|
||||
min_idxs[tid] = min_idxs[tid + 16];
|
||||
min_dists[tid] = min_dists[tid + 16];
|
||||
}
|
||||
// s = 8
|
||||
if (min_dists[tid] > min_dists[tid + 8]) {
|
||||
min_idxs[tid] = min_idxs[tid + 8];
|
||||
min_dists[tid] = min_dists[tid + 8];
|
||||
}
|
||||
// s = 4
|
||||
if (min_dists[tid] > min_dists[tid + 4]) {
|
||||
min_idxs[tid] = min_idxs[tid + 4];
|
||||
min_dists[tid] = min_dists[tid + 4];
|
||||
}
|
||||
// s = 2
|
||||
if (min_dists[tid] > min_dists[tid + 2]) {
|
||||
min_idxs[tid] = min_idxs[tid + 2];
|
||||
min_dists[tid] = min_dists[tid + 2];
|
||||
}
|
||||
// s = 1
|
||||
if (min_dists[tid] > min_dists[tid + 1]) {
|
||||
min_idxs[tid] = min_idxs[tid + 1];
|
||||
min_dists[tid] = min_dists[tid + 1];
|
||||
}
|
||||
}
|
||||
|
||||
// CUDA kernel to compute nearest neighbors between two batches of pointclouds
|
||||
// where each point is of dimension D.
|
||||
//
|
||||
// Args:
|
||||
// points1: First set of points, of shape (N, P1, D).
|
||||
// points2: Second set of points, of shape (N, P2, D).
|
||||
// idx: Output memory buffer of shape (N, P1).
|
||||
// N: Batch size.
|
||||
// P1: Number of points in points1.
|
||||
// P2: Number of points in points2.
|
||||
// D_2: Size of the shared buffer; this is D rounded up so that memory access
|
||||
// is aligned.
|
||||
//
|
||||
template <typename scalar_t>
|
||||
__global__ void nearest_neighbor_kernel(
|
||||
const scalar_t* __restrict__ points1,
|
||||
const scalar_t* __restrict__ points2,
|
||||
long* __restrict__ idx,
|
||||
const size_t N,
|
||||
const size_t P1,
|
||||
const size_t P2,
|
||||
const size_t D,
|
||||
const size_t D_2) {
|
||||
// Each block will compute one element of the output idx[n, i]. Within the
|
||||
// block we will use threads to compute the distances between points1[n, i]
|
||||
// and points2[n, j] for all 0 <= j < P2, then use a block reduction to
|
||||
// take an argmin of the distances.
|
||||
|
||||
// Shared buffers for the threads in the block. CUDA only allows declaration
|
||||
// of a single shared buffer, so it needs to be manually sliced and cast to
|
||||
// build several logical shared buffers of different types.
|
||||
extern __shared__ char shared_buf[];
|
||||
scalar_t* x = (scalar_t*)shared_buf; // scalar_t[DD]
|
||||
scalar_t* min_dists = &x[D_2]; // scalar_t[NUM_THREADS]
|
||||
long* min_idxs = (long*)&min_dists[blockDim.x]; // long[NUM_THREADS]
|
||||
|
||||
const size_t n = blockIdx.y; // index of batch element.
|
||||
const size_t i = blockIdx.x; // index of point within batch element.
|
||||
const size_t tid = threadIdx.x;
|
||||
|
||||
// Thread 0 copies points1[n, i, :] into x.
|
||||
if (tid == 0) {
|
||||
for (size_t d = 0; d < D; d++) {
|
||||
x[d] = points1[n * (P1 * D) + i * D + d];
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
// Compute the distances between points1[n, i] and points2[n, j] for
|
||||
// all 0 <= j < P2. Here each thread will reduce over P2 / blockDim.x
|
||||
// in serial, and store its result to shared memory
|
||||
scalar_t min_dist = FLT_MAX;
|
||||
size_t min_idx = 0;
|
||||
for (size_t j = tid; j < P2; j += blockDim.x) {
|
||||
scalar_t dist = 0;
|
||||
for (size_t d = 0; d < D; d++) {
|
||||
scalar_t x_d = x[d];
|
||||
scalar_t y_d = points2[n * (P2 * D) + j * D + d];
|
||||
scalar_t diff = x_d - y_d;
|
||||
dist += diff * diff;
|
||||
}
|
||||
min_dist = (j == tid) ? dist : min_dist;
|
||||
min_idx = (dist <= min_dist) ? j : min_idx;
|
||||
min_dist = (dist <= min_dist) ? dist : min_dist;
|
||||
}
|
||||
min_dists[tid] = min_dist;
|
||||
min_idxs[tid] = min_idx;
|
||||
__syncthreads();
|
||||
|
||||
// Perform reduction in shared memory.
|
||||
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
|
||||
if (tid < s) {
|
||||
if (min_dists[tid] > min_dists[tid + s]) {
|
||||
min_dists[tid] = min_dists[tid + s];
|
||||
min_idxs[tid] = min_idxs[tid + s];
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
// Unroll the last 6 iterations of the loop since they will happen
|
||||
// synchronized within a single warp.
|
||||
if (tid < 32)
|
||||
warp_reduce<scalar_t>(min_dists, min_idxs, tid);
|
||||
|
||||
// Finally thread 0 writes the result to the output buffer.
|
||||
if (tid == 0) {
|
||||
idx[n * P1 + i] = min_idxs[0];
|
||||
}
|
||||
}
|
||||
|
||||
// CUDA kernel to compute nearest neighbors between two sets of 3-dimensional
|
||||
// pointclouds. This is a specialization of the nearest_neighbor_kernel
|
||||
// to the case D=3.
|
||||
//
|
||||
// Args:
|
||||
// points1: First set of pointclouds, of shape (N, P1, 3).
|
||||
// points2: Second set of pointclouds, of shape (N, P2, 3).
|
||||
// idx: Output memory buffer of shape (N, P1).
|
||||
// N: Batch size.
|
||||
// P1: Number of points in points1.
|
||||
// P2: Number of points in points2.
|
||||
//
|
||||
template <typename scalar_t>
|
||||
__global__ void nearest_neighbor_kernel_D3(
|
||||
const scalar_t* __restrict__ points1,
|
||||
const scalar_t* __restrict__ points2,
|
||||
long* __restrict__ idx,
|
||||
const size_t N,
|
||||
const size_t P1,
|
||||
const size_t P2) {
|
||||
// Single shared memory buffer which is split and cast to different types.
|
||||
extern __shared__ char shared_buf[];
|
||||
scalar_t* min_dists = (scalar_t*)shared_buf; // scalar_t[NUM_THREADS]
|
||||
long* min_idxs = (long*)&min_dists[blockDim.x]; // long[NUM_THREADS]
|
||||
|
||||
const size_t D = 3;
|
||||
const size_t n = blockIdx.y; // index of batch element.
|
||||
const size_t i = blockIdx.x; // index of point within batch element.
|
||||
const size_t tid = threadIdx.x;
|
||||
|
||||
// Retrieve the coordinates of points1[n, i] from global memory; these
|
||||
// will be stored in registers for fast access.
|
||||
const scalar_t x = points1[n * (P1 * D) + i * D + 0];
|
||||
const scalar_t y = points1[n * (P1 * D) + i * D + 1];
|
||||
const scalar_t z = points1[n * (P1 * D) + i * D + 2];
|
||||
|
||||
// Compute distances between points1[n, i] and all points2[n, j]
|
||||
// for 0 <= j < P2
|
||||
scalar_t min_dist = FLT_MAX;
|
||||
size_t min_idx = 0;
|
||||
|
||||
// Distance computation for points in p2 spread across threads in the block.
|
||||
for (size_t j = tid; j < P2; j += blockDim.x) {
|
||||
scalar_t dx = x - points2[n * (P2 * D) + j * D + 0];
|
||||
scalar_t dy = y - points2[n * (P2 * D) + j * D + 1];
|
||||
scalar_t dz = z - points2[n * (P2 * D) + j * D + 2];
|
||||
scalar_t dist = dx * dx + dy * dy + dz * dz;
|
||||
min_dist = (j == tid) ? dist : min_dist;
|
||||
min_idx = (dist <= min_dist) ? j : min_idx;
|
||||
min_dist = (dist <= min_dist) ? dist : min_dist;
|
||||
}
|
||||
min_dists[tid] = min_dist;
|
||||
min_idxs[tid] = min_idx;
|
||||
|
||||
// Synchronize local threads writing to the shared memory buffer.
|
||||
__syncthreads();
|
||||
|
||||
// Perform reduction in shared memory.
|
||||
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
|
||||
if (tid < s) {
|
||||
if (min_dists[tid] > min_dists[tid + s]) {
|
||||
min_dists[tid] = min_dists[tid + s];
|
||||
min_idxs[tid] = min_idxs[tid + s];
|
||||
}
|
||||
}
|
||||
|
||||
// Synchronize local threads so that min_dists is correct.
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
// Unroll the last 6 iterations of the loop since they will happen
|
||||
// synchronized within a single warp.
|
||||
if (tid < 32)
|
||||
warp_reduce<scalar_t>(min_dists, min_idxs, tid);
|
||||
|
||||
// Finally thread 0 writes the result to the output buffer.
|
||||
if (tid == 0) {
|
||||
idx[n * P1 + i] = min_idxs[0];
|
||||
}
|
||||
}
|
||||
|
||||
at::Tensor nn_points_idx_cuda(at::Tensor p1, at::Tensor p2) {
|
||||
const auto N = p1.size(0);
|
||||
const auto P1 = p1.size(1);
|
||||
const auto P2 = p2.size(1);
|
||||
const auto D = p1.size(2);
|
||||
|
||||
AT_ASSERTM(p2.size(2) == D, "Point sets must have same last dimension.");
|
||||
auto idx = at::empty({N, P1}, p1.options().dtype(at::kLong));
|
||||
|
||||
// On P100 with pointclouds of size (16, 5000, 3), 128 threads per block
|
||||
// gives best results.
|
||||
const int threads = 128;
|
||||
const dim3 blocks(P1, N);
|
||||
|
||||
if (D == 3) {
|
||||
// Use the specialized kernel for D=3.
|
||||
AT_DISPATCH_FLOATING_TYPES(p1.type(), "nearest_neighbor_v3_cuda", ([&] {
|
||||
size_t shared_size = threads * sizeof(size_t) +
|
||||
threads * sizeof(long);
|
||||
nearest_neighbor_kernel_D3<scalar_t>
|
||||
<<<blocks, threads, shared_size>>>(
|
||||
p1.data_ptr<scalar_t>(),
|
||||
p2.data_ptr<scalar_t>(),
|
||||
idx.data_ptr<long>(),
|
||||
N,
|
||||
P1,
|
||||
P2);
|
||||
}));
|
||||
} else {
|
||||
// Use the general kernel for all other D.
|
||||
AT_DISPATCH_FLOATING_TYPES(
|
||||
p1.type(), "nearest_neighbor_v3_cuda", ([&] {
|
||||
// To avoid misaligned memory access, the size of shared buffers
|
||||
// need to be rounded to the next even size.
|
||||
size_t D_2 = D + (D % 2);
|
||||
size_t shared_size = (D_2 + threads) * sizeof(size_t);
|
||||
shared_size += threads * sizeof(long);
|
||||
nearest_neighbor_kernel<scalar_t><<<blocks, threads, shared_size>>>(
|
||||
p1.data_ptr<scalar_t>(),
|
||||
p2.data_ptr<scalar_t>(),
|
||||
idx.data_ptr<long>(),
|
||||
N,
|
||||
P1,
|
||||
P2,
|
||||
D,
|
||||
D_2);
|
||||
}));
|
||||
}
|
||||
|
||||
return idx;
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
#include <torch/extension.h>
|
||||
#include "pytorch3d_cutils.h"
|
||||
|
||||
// Compute indices of nearest neighbors in pointcloud p2 to points
|
||||
// in pointcloud p1.
|
||||
//
|
||||
// Args:
|
||||
// p1: FloatTensor of shape (N, P1, D) giving a batch of pointclouds each
|
||||
// containing P1 points of dimension D.
|
||||
// p2: FloatTensor of shape (N, P2, D) giving a batch of pointclouds each
|
||||
// containing P2 points of dimension D.
|
||||
//
|
||||
// Returns:
|
||||
// p1_neighbor_idx: LongTensor of shape (N, P1), where
|
||||
// p1_neighbor_idx[n, i] = j means that the nearest neighbor
|
||||
// to p1[n, i] in the cloud p2[n] is p2[n, j].
|
||||
//
|
||||
|
||||
// Cuda implementation.
|
||||
at::Tensor nn_points_idx_cuda(at::Tensor p1, at::Tensor p2);
|
||||
|
||||
// Implementation which is exposed.
|
||||
at::Tensor nn_points_idx(at::Tensor p1, at::Tensor p2) {
|
||||
if (p1.type().is_cuda() && p2.type().is_cuda()) {
|
||||
#ifdef WITH_CUDA
|
||||
CHECK_CONTIGUOUS_CUDA(p1);
|
||||
CHECK_CONTIGUOUS_CUDA(p2);
|
||||
return nn_points_idx_cuda(p1, p2);
|
||||
#else
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
AT_ERROR("Not implemented on the CPU.");
|
||||
};
|
||||
@@ -0,0 +1,52 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#include <ATen/ATen.h>
|
||||
|
||||
template <typename scalar_t>
|
||||
__global__ void packed_to_padded_tensor_kernel(
|
||||
const scalar_t* __restrict__ inputs,
|
||||
const long* __restrict__ first_idxs,
|
||||
scalar_t* __restrict__ inputs_padded,
|
||||
const size_t batch_size,
|
||||
const size_t max_size,
|
||||
const size_t num_inputs) {
|
||||
// Batch elements split evenly across blocks (num blocks = batch_size) and
|
||||
// values for each element split across threads in the block. Each thread adds
|
||||
// the values of its respective input elements to the global inputs_padded
|
||||
// tensor.
|
||||
const size_t tid = threadIdx.x;
|
||||
const size_t batch_idx = blockIdx.x;
|
||||
|
||||
const long start = first_idxs[batch_idx];
|
||||
const long end =
|
||||
batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs;
|
||||
const int num_faces = end - start;
|
||||
for (size_t f = tid; f < num_faces; f += blockDim.x) {
|
||||
inputs_padded[batch_idx * max_size + f] = inputs[start + f];
|
||||
}
|
||||
}
|
||||
|
||||
at::Tensor packed_to_padded_tensor_cuda(
|
||||
at::Tensor inputs,
|
||||
at::Tensor first_idxs,
|
||||
const long max_size) {
|
||||
const auto num_inputs = inputs.size(0);
|
||||
const auto batch_size = first_idxs.size(0);
|
||||
at::Tensor inputs_padded =
|
||||
at::zeros({batch_size, max_size}, inputs.options());
|
||||
|
||||
const int threads = 512;
|
||||
const int blocks = batch_size;
|
||||
AT_DISPATCH_FLOATING_TYPES(
|
||||
inputs.type(), "packed_to_padded_tensor_kernel", ([&] {
|
||||
packed_to_padded_tensor_kernel<scalar_t><<<blocks, threads>>>(
|
||||
inputs.data_ptr<scalar_t>(),
|
||||
first_idxs.data_ptr<long>(),
|
||||
inputs_padded.data_ptr<scalar_t>(),
|
||||
batch_size,
|
||||
max_size,
|
||||
num_inputs);
|
||||
}));
|
||||
|
||||
return inputs_padded;
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
#include <torch/extension.h>
|
||||
|
||||
// Converts a packed tensor into a padded tensor, restoring the batch dimension.
|
||||
// Refer to pytorch3d/structures/meshes.py for details on packed/padded tensors.
|
||||
//
|
||||
// Inputs:
|
||||
// inputs: FloatTensor of shape (F,), representing the packed batch tensor.
|
||||
// e.g. areas for faces in a batch of meshes.
|
||||
// first_idxs: LongTensor of shape (N,) where N is the number of
|
||||
// elements in the batch and `packed_first_idxs[i] = f`
|
||||
// means that the inputs for batch element i begin at
|
||||
// `inputs[f]`.
|
||||
// max_size: Max length of an element in the batch.
|
||||
// Returns:
|
||||
// inputs_padded: FloatTensor of shape (N, max_size) where max_size is max
|
||||
// of `sizes`. The values for batch element i which start at
|
||||
// `inputs[packed_first_idxs[i]]` will be copied to
|
||||
// `inputs_padded[i, :]``, with zeros padding out the extra
|
||||
// inputs.
|
||||
//
|
||||
|
||||
// Cuda implementation.
|
||||
at::Tensor packed_to_padded_tensor_cuda(
|
||||
at::Tensor inputs,
|
||||
at::Tensor first_idxs,
|
||||
const long max_size);
|
||||
|
||||
// Implementation which is exposed.
|
||||
at::Tensor packed_to_padded_tensor(
|
||||
at::Tensor inputs,
|
||||
at::Tensor first_idxs,
|
||||
const long max_size) {
|
||||
if (inputs.type().is_cuda()) {
|
||||
#ifdef WITH_CUDA
|
||||
return packed_to_padded_tensor_cuda(inputs, first_idxs, max_size);
|
||||
#else
|
||||
AT_ERROR("Not compiled with GPU support.");
|
||||
#endif
|
||||
}
|
||||
AT_ERROR("Not implemented on the CPU.");
|
||||
}
|
||||
12
pytorch3d/csrc/pytorch3d_cutils.h
Normal file
12
pytorch3d/csrc/pytorch3d_cutils.h
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
#include <torch/extension.h>
|
||||
|
||||
#define CHECK_CUDA(x) \
|
||||
AT_ASSERTM(x.type().is_cuda(), #x "must be a CUDA tensor.")
|
||||
#define CHECK_CONTIGUOUS(x) \
|
||||
AT_ASSERTM(x.is_contiguous(), #x "must be contiguous.")
|
||||
#define CHECK_CONTIGUOUS_CUDA(x) \
|
||||
CHECK_CUDA(x); \
|
||||
CHECK_CONTIGUOUS(x)
|
||||
86
pytorch3d/csrc/rasterize_meshes/float_math.cuh
Normal file
86
pytorch3d/csrc/rasterize_meshes/float_math.cuh
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
#include <thrust/tuple.h>
|
||||
|
||||
// Common functions and operators for float2.
|
||||
|
||||
__device__ inline float2 operator-(const float2& a, const float2& b) {
|
||||
return make_float2(a.x - b.x, a.y - b.y);
|
||||
}
|
||||
|
||||
__device__ inline float2 operator+(const float2& a, const float2& b) {
|
||||
return make_float2(a.x + b.x, a.y + b.y);
|
||||
}
|
||||
|
||||
__device__ inline float2 operator/(const float2& a, const float2& b) {
|
||||
return make_float2(a.x / b.x, a.y / b.y);
|
||||
}
|
||||
|
||||
__device__ inline float2 operator/(const float2& a, const float b) {
|
||||
return make_float2(a.x / b, a.y / b);
|
||||
}
|
||||
|
||||
__device__ inline float2 operator*(const float2& a, const float2& b) {
|
||||
return make_float2(a.x * b.x, a.y * b.y);
|
||||
}
|
||||
|
||||
__device__ inline float2 operator*(const float a, const float2& b) {
|
||||
return make_float2(a * b.x, a * b.y);
|
||||
}
|
||||
|
||||
__device__ inline float dot(const float2& a, const float2& b) {
|
||||
return a.x * b.x + a.y * b.y;
|
||||
}
|
||||
|
||||
// Backward pass for the dot product.
|
||||
// Args:
|
||||
// a, b: Coordinates of two points.
|
||||
// grad_dot: Upstream gradient for the output.
|
||||
//
|
||||
// Returns:
|
||||
// tuple of gradients for each of the input points:
|
||||
// (float2 grad_a, float2 grad_b)
|
||||
//
|
||||
__device__ inline thrust::tuple<float2, float2>
|
||||
DotBackward(const float2& a, const float2& b, const float& grad_dot) {
|
||||
return thrust::make_tuple(grad_dot * b, grad_dot * a);
|
||||
}
|
||||
|
||||
__device__ inline float sum(const float2& a) {
|
||||
return a.x + a.y;
|
||||
}
|
||||
|
||||
// Common functions and operators for float3.
|
||||
|
||||
__device__ inline float3 operator-(const float3& a, const float3& b) {
|
||||
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
|
||||
}
|
||||
|
||||
__device__ inline float3 operator+(const float3& a, const float3& b) {
|
||||
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
|
||||
}
|
||||
|
||||
__device__ inline float3 operator/(const float3& a, const float3& b) {
|
||||
return make_float3(a.x / b.x, a.y / b.y, a.z / b.z);
|
||||
}
|
||||
|
||||
__device__ inline float3 operator/(const float3& a, const float b) {
|
||||
return make_float3(a.x / b, a.y / b, a.z / b);
|
||||
}
|
||||
|
||||
__device__ inline float3 operator*(const float3& a, const float3& b) {
|
||||
return make_float3(a.x * b.x, a.y * b.y, a.z * b.z);
|
||||
}
|
||||
|
||||
__device__ inline float3 operator*(const float a, const float3& b) {
|
||||
return make_float3(a * b.x, a * b.y, a * b.z);
|
||||
}
|
||||
|
||||
__device__ inline float dot(const float3& a, const float3& b) {
|
||||
return a.x * b.x + a.y * b.y + a.z * b.z;
|
||||
}
|
||||
|
||||
__device__ inline float sum(const float3& a) {
|
||||
return a.x + a.y + a.z;
|
||||
}
|
||||
350
pytorch3d/csrc/rasterize_meshes/geometry_utils.cuh
Normal file
350
pytorch3d/csrc/rasterize_meshes/geometry_utils.cuh
Normal file
@@ -0,0 +1,350 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#include <float.h>
|
||||
#include <math.h>
|
||||
#include <torch/extension.h>
|
||||
#include <cstdio>
|
||||
#include "float_math.cuh"
|
||||
|
||||
// Set epsilon for preventing floating point errors and division by 0.
|
||||
const auto kEpsilon = 1e-30;
|
||||
|
||||
// Determines whether a point p is on the right side of a 2D line segment
|
||||
// given by the end points v0, v1.
|
||||
//
|
||||
// Args:
|
||||
// p: vec2 Coordinates of a point.
|
||||
// v0, v1: vec2 Coordinates of the end points of the edge.
|
||||
//
|
||||
// Returns:
|
||||
// area: The signed area of the parallelogram given by the vectors
|
||||
// A = p - v0
|
||||
// B = v1 - v0
|
||||
//
|
||||
__device__ inline float
|
||||
EdgeFunctionForward(const float2& p, const float2& v0, const float2& v1) {
|
||||
return (p.x - v0.x) * (v1.y - v0.y) - (p.y - v0.y) * (v1.x - v0.x);
|
||||
}
|
||||
|
||||
// Backward pass for the edge function returning partial dervivatives for each
|
||||
// of the input points.
|
||||
//
|
||||
// Args:
|
||||
// p: vec2 Coordinates of a point.
|
||||
// v0, v1: vec2 Coordinates of the end points of the edge.
|
||||
// grad_edge: Upstream gradient for output from edge function.
|
||||
//
|
||||
// Returns:
|
||||
// tuple of gradients for each of the input points:
|
||||
// (float2 d_edge_dp, float2 d_edge_dv0, float2 d_edge_dv1)
|
||||
//
|
||||
__device__ inline thrust::tuple<float2, float2, float2> EdgeFunctionBackward(
|
||||
const float2& p,
|
||||
const float2& v0,
|
||||
const float2& v1,
|
||||
const float& grad_edge) {
|
||||
const float2 dedge_dp = make_float2(v1.y - v0.y, v0.x - v1.x);
|
||||
const float2 dedge_dv0 = make_float2(p.y - v1.y, v1.x - p.x);
|
||||
const float2 dedge_dv1 = make_float2(v0.y - p.y, p.x - v0.x);
|
||||
return thrust::make_tuple(
|
||||
grad_edge * dedge_dp, grad_edge * dedge_dv0, grad_edge * dedge_dv1);
|
||||
}
|
||||
|
||||
// The forward pass for computing the barycentric coordinates of a point
|
||||
// relative to a triangle.
|
||||
//
|
||||
// Args:
|
||||
// p: Coordinates of a point.
|
||||
// v0, v1, v2: Coordinates of the triangle vertices.
|
||||
//
|
||||
// Returns
|
||||
// bary: (w0, w1, w2) barycentric coordinates in the range [0, 1].
|
||||
//
|
||||
__device__ inline float3 BarycentricCoordsForward(
|
||||
const float2& p,
|
||||
const float2& v0,
|
||||
const float2& v1,
|
||||
const float2& v2) {
|
||||
const float area = EdgeFunctionForward(v2, v0, v1) + kEpsilon;
|
||||
const float w0 = EdgeFunctionForward(p, v1, v2) / area;
|
||||
const float w1 = EdgeFunctionForward(p, v2, v0) / area;
|
||||
const float w2 = EdgeFunctionForward(p, v0, v1) / area;
|
||||
return make_float3(w0, w1, w2);
|
||||
}
|
||||
|
||||
// The backward pass for computing the barycentric coordinates of a point
|
||||
// relative to a triangle.
|
||||
//
|
||||
// Args:
|
||||
// p: Coordinates of a point.
|
||||
// v0, v1, v2: (x, y) coordinates of the triangle vertices.
|
||||
// grad_bary_upstream: vec3<T> Upstream gradient for each of the
|
||||
// barycentric coordaintes [grad_w0, grad_w1, grad_w2].
|
||||
//
|
||||
// Returns
|
||||
// tuple of gradients for each of the triangle vertices:
|
||||
// (float2 grad_v0, float2 grad_v1, float2 grad_v2)
|
||||
//
|
||||
__device__ inline thrust::tuple<float2, float2, float2, float2>
|
||||
BarycentricCoordsBackward(
|
||||
const float2& p,
|
||||
const float2& v0,
|
||||
const float2& v1,
|
||||
const float2& v2,
|
||||
const float3& grad_bary_upstream) {
|
||||
const float area = EdgeFunctionForward(v2, v0, v1) + kEpsilon;
|
||||
const float area2 = pow(area, 2.0);
|
||||
const float e0 = EdgeFunctionForward(p, v1, v2);
|
||||
const float e1 = EdgeFunctionForward(p, v2, v0);
|
||||
const float e2 = EdgeFunctionForward(p, v0, v1);
|
||||
|
||||
const float grad_w0 = grad_bary_upstream.x;
|
||||
const float grad_w1 = grad_bary_upstream.y;
|
||||
const float grad_w2 = grad_bary_upstream.z;
|
||||
|
||||
// Calculate component of the gradient from each of w0, w1 and w2.
|
||||
// e.g. for w0:
|
||||
// dloss/dw0_v = dl/dw0 * dw0/dw0_top * dw0_top/dv
|
||||
// + dl/dw0 * dw0/dw0_bot * dw0_bot/dv
|
||||
const float dw0_darea = -e0 / (area2);
|
||||
const float dw0_e0 = 1 / area;
|
||||
const float dloss_d_w0area = grad_w0 * dw0_darea;
|
||||
const float dloss_e0 = grad_w0 * dw0_e0;
|
||||
auto de0_dv = EdgeFunctionBackward(p, v1, v2, dloss_e0);
|
||||
auto dw0area_dv = EdgeFunctionBackward(v2, v0, v1, dloss_d_w0area);
|
||||
const float2 dw0_p = thrust::get<0>(de0_dv);
|
||||
const float2 dw0_dv0 = thrust::get<1>(dw0area_dv);
|
||||
const float2 dw0_dv1 = thrust::get<1>(de0_dv) + thrust::get<2>(dw0area_dv);
|
||||
const float2 dw0_dv2 = thrust::get<2>(de0_dv) + thrust::get<0>(dw0area_dv);
|
||||
|
||||
const float dw1_darea = -e1 / (area2);
|
||||
const float dw1_e1 = 1 / area;
|
||||
const float dloss_d_w1area = grad_w1 * dw1_darea;
|
||||
const float dloss_e1 = grad_w1 * dw1_e1;
|
||||
auto de1_dv = EdgeFunctionBackward(p, v2, v0, dloss_e1);
|
||||
auto dw1area_dv = EdgeFunctionBackward(v2, v0, v1, dloss_d_w1area);
|
||||
const float2 dw1_p = thrust::get<0>(de1_dv);
|
||||
const float2 dw1_dv0 = thrust::get<2>(de1_dv) + thrust::get<1>(dw1area_dv);
|
||||
const float2 dw1_dv1 = thrust::get<2>(dw1area_dv);
|
||||
const float2 dw1_dv2 = thrust::get<1>(de1_dv) + thrust::get<0>(dw1area_dv);
|
||||
|
||||
const float dw2_darea = -e2 / (area2);
|
||||
const float dw2_e2 = 1 / area;
|
||||
const float dloss_d_w2area = grad_w2 * dw2_darea;
|
||||
const float dloss_e2 = grad_w2 * dw2_e2;
|
||||
auto de2_dv = EdgeFunctionBackward(p, v0, v1, dloss_e2);
|
||||
auto dw2area_dv = EdgeFunctionBackward(v2, v0, v1, dloss_d_w2area);
|
||||
const float2 dw2_p = thrust::get<0>(de2_dv);
|
||||
const float2 dw2_dv0 = thrust::get<1>(de2_dv) + thrust::get<1>(dw2area_dv);
|
||||
const float2 dw2_dv1 = thrust::get<2>(de2_dv) + thrust::get<2>(dw2area_dv);
|
||||
const float2 dw2_dv2 = thrust::get<0>(dw2area_dv);
|
||||
|
||||
const float2 dbary_p = dw0_p + dw1_p + dw2_p;
|
||||
const float2 dbary_dv0 = dw0_dv0 + dw1_dv0 + dw2_dv0;
|
||||
const float2 dbary_dv1 = dw0_dv1 + dw1_dv1 + dw2_dv1;
|
||||
const float2 dbary_dv2 = dw0_dv2 + dw1_dv2 + dw2_dv2;
|
||||
|
||||
return thrust::make_tuple(dbary_p, dbary_dv0, dbary_dv1, dbary_dv2);
|
||||
}
|
||||
|
||||
// Forward pass for applying perspective correction to barycentric coordinates.
|
||||
//
|
||||
// Args:
|
||||
// bary: Screen-space barycentric coordinates for a point
|
||||
// z0, z1, z2: Camera-space z-coordinates of the triangle vertices
|
||||
//
|
||||
// Returns
|
||||
// World-space barycentric coordinates
|
||||
//
|
||||
__device__ inline float3 BarycentricPerspectiveCorrectionForward(
|
||||
const float3& bary,
|
||||
const float z0,
|
||||
const float z1,
|
||||
const float z2) {
|
||||
const float w0_top = bary.x * z1 * z2;
|
||||
const float w1_top = z0 * bary.y * z2;
|
||||
const float w2_top = z0 * z1 * bary.z;
|
||||
const float denom = w0_top + w1_top + w2_top;
|
||||
const float w0 = w0_top / denom;
|
||||
const float w1 = w1_top / denom;
|
||||
const float w2 = w2_top / denom;
|
||||
return make_float3(w0, w1, w2);
|
||||
}
|
||||
|
||||
// Backward pass for applying perspective correction to barycentric coordinates.
|
||||
//
|
||||
// Args:
|
||||
// bary: Screen-space barycentric coordinates for a point
|
||||
// z0, z1, z2: Camera-space z-coordinates of the triangle vertices
|
||||
// grad_out: Upstream gradient of the loss with respect to the corrected
|
||||
// barycentric coordinates.
|
||||
//
|
||||
// Returns a tuple of:
|
||||
// grad_bary: Downstream gradient of the loss with respect to the the
|
||||
// uncorrected barycentric coordinates.
|
||||
// grad_z0, grad_z1, grad_z2: Downstream gradient of the loss with respect
|
||||
// to the z-coordinates of the triangle verts
|
||||
__device__ inline thrust::tuple<float3, float, float, float>
|
||||
BarycentricPerspectiveCorrectionBackward(
|
||||
const float3& bary,
|
||||
const float z0,
|
||||
const float z1,
|
||||
const float z2,
|
||||
const float3& grad_out) {
|
||||
// Recompute forward pass
|
||||
const float w0_top = bary.x * z1 * z2;
|
||||
const float w1_top = z0 * bary.y * z2;
|
||||
const float w2_top = z0 * z1 * bary.z;
|
||||
const float denom = w0_top + w1_top + w2_top;
|
||||
|
||||
// Now do backward pass
|
||||
const float grad_denom_top =
|
||||
-w0_top * grad_out.x - w1_top * grad_out.y - w2_top * grad_out.z;
|
||||
const float grad_denom = grad_denom_top / (denom * denom);
|
||||
const float grad_w0_top = grad_denom + grad_out.x / denom;
|
||||
const float grad_w1_top = grad_denom + grad_out.y / denom;
|
||||
const float grad_w2_top = grad_denom + grad_out.z / denom;
|
||||
const float grad_bary_x = grad_w0_top * z1 * z2;
|
||||
const float grad_bary_y = grad_w1_top * z0 * z2;
|
||||
const float grad_bary_z = grad_w2_top * z0 * z1;
|
||||
const float3 grad_bary = make_float3(grad_bary_x, grad_bary_y, grad_bary_z);
|
||||
const float grad_z0 = grad_w1_top * bary.y * z2 + grad_w2_top * bary.z * z1;
|
||||
const float grad_z1 = grad_w0_top * bary.x * z2 + grad_w2_top * bary.z * z0;
|
||||
const float grad_z2 = grad_w0_top * bary.x * z1 + grad_w1_top * bary.y * z0;
|
||||
return thrust::make_tuple(grad_bary, grad_z0, grad_z1, grad_z2);
|
||||
}
|
||||
|
||||
// Return minimum distance between line segment (v1 - v0) and point p.
|
||||
//
|
||||
// Args:
|
||||
// p: Coordinates of a point.
|
||||
// v0, v1: Coordinates of the end points of the line segment.
|
||||
//
|
||||
// Returns:
|
||||
// non-square distance to the boundary of the triangle.
|
||||
//
|
||||
__device__ inline float
|
||||
PointLineDistanceForward(const float2& p, const float2& a, const float2& b) {
|
||||
const float2 ba = b - a;
|
||||
float l2 = dot(ba, ba);
|
||||
float t = dot(ba, p - a) / l2;
|
||||
if (l2 <= kEpsilon) {
|
||||
return dot(p - b, p - b);
|
||||
}
|
||||
t = __saturatef(t); // clamp to the interval [+0.0, 1.0]
|
||||
const float2 p_proj = a + t * ba;
|
||||
const float2 d = (p_proj - p);
|
||||
return dot(d, d); // squared distance
|
||||
}
|
||||
|
||||
// Backward pass for point to line distance in 2D.
|
||||
//
|
||||
// Args:
|
||||
// p: Coordinates of a point.
|
||||
// v0, v1: Coordinates of the end points of the line segment.
|
||||
// grad_dist: Upstream gradient for the distance.
|
||||
//
|
||||
// Returns:
|
||||
// tuple of gradients for each of the input points:
|
||||
// (float2 grad_p, float2 grad_v0, float2 grad_v1)
|
||||
//
|
||||
__device__ inline thrust::tuple<float2, float2, float2>
|
||||
PointLineDistanceBackward(
|
||||
const float2& p,
|
||||
const float2& v0,
|
||||
const float2& v1,
|
||||
const float& grad_dist) {
|
||||
// Redo some of the forward pass calculations.
|
||||
const float2 v1v0 = v1 - v0;
|
||||
const float2 pv0 = p - v0;
|
||||
const float t_bot = dot(v1v0, v1v0);
|
||||
const float t_top = dot(v1v0, pv0);
|
||||
float tt = t_top / t_bot;
|
||||
tt = __saturatef(tt);
|
||||
const float2 p_proj = (1.0f - tt) * v0 + tt * v1;
|
||||
const float2 d = p - p_proj;
|
||||
const float dist = sqrt(dot(d, d));
|
||||
|
||||
const float2 grad_p = -1.0f * grad_dist * 2.0f * (p_proj - p);
|
||||
const float2 grad_v0 = grad_dist * (1.0f - tt) * 2.0f * (p_proj - p);
|
||||
const float2 grad_v1 = grad_dist * tt * 2.0f * (p_proj - p);
|
||||
|
||||
return thrust::make_tuple(grad_p, grad_v0, grad_v1);
|
||||
}
|
||||
|
||||
// The forward pass for calculating the shortest distance between a point
|
||||
// and a triangle.
|
||||
//
|
||||
// Args:
|
||||
// p: Coordinates of a point.
|
||||
// v0, v1, v2: Coordinates of the three triangle vertices.
|
||||
//
|
||||
// Returns:
|
||||
// shortest absolute distance from a point to a triangle.
|
||||
//
|
||||
__device__ inline float PointTriangleDistanceForward(
|
||||
const float2& p,
|
||||
const float2& v0,
|
||||
const float2& v1,
|
||||
const float2& v2) {
|
||||
// Compute distance to all 3 edges of the triangle and return the min.
|
||||
const float e01_dist = PointLineDistanceForward(p, v0, v1);
|
||||
const float e02_dist = PointLineDistanceForward(p, v0, v2);
|
||||
const float e12_dist = PointLineDistanceForward(p, v1, v2);
|
||||
const float edge_dist = fminf(fminf(e01_dist, e02_dist), e12_dist);
|
||||
return edge_dist;
|
||||
}
|
||||
|
||||
// Backward pass for point triangle distance.
|
||||
//
|
||||
// Args:
|
||||
// p: Coordinates of a point.
|
||||
// v0, v1, v2: Coordinates of the three triangle vertices.
|
||||
// grad_dist: Upstream gradient for the distance.
|
||||
//
|
||||
// Returns:
|
||||
// tuple of gradients for each of the triangle vertices:
|
||||
// (float2 grad_v0, float2 grad_v1, float2 grad_v2)
|
||||
//
|
||||
__device__ inline thrust::tuple<float2, float2, float2, float2>
|
||||
PointTriangleDistanceBackward(
|
||||
const float2& p,
|
||||
const float2& v0,
|
||||
const float2& v1,
|
||||
const float2& v2,
|
||||
const float& grad_dist) {
|
||||
// Compute distance to all 3 edges of the triangle.
|
||||
const float e01_dist = PointLineDistanceForward(p, v0, v1);
|
||||
const float e02_dist = PointLineDistanceForward(p, v0, v2);
|
||||
const float e12_dist = PointLineDistanceForward(p, v1, v2);
|
||||
|
||||
// Initialize output tensors.
|
||||
float2 grad_v0 = make_float2(0.0f, 0.0f);
|
||||
float2 grad_v1 = make_float2(0.0f, 0.0f);
|
||||
float2 grad_v2 = make_float2(0.0f, 0.0f);
|
||||
float2 grad_p = make_float2(0.0f, 0.0f);
|
||||
|
||||
// Find which edge is the closest and return PointLineDistanceBackward for
|
||||
// that edge.
|
||||
if (e01_dist <= e02_dist && e01_dist <= e12_dist) {
|
||||
// Closest edge is v1 - v0.
|
||||
auto grad_e01 = PointLineDistanceBackward(p, v0, v1, grad_dist);
|
||||
grad_p = thrust::get<0>(grad_e01);
|
||||
grad_v0 = thrust::get<1>(grad_e01);
|
||||
grad_v1 = thrust::get<2>(grad_e01);
|
||||
} else if (e02_dist <= e01_dist && e02_dist <= e12_dist) {
|
||||
// Closest edge is v2 - v0.
|
||||
auto grad_e02 = PointLineDistanceBackward(p, v0, v2, grad_dist);
|
||||
grad_p = thrust::get<0>(grad_e02);
|
||||
grad_v0 = thrust::get<1>(grad_e02);
|
||||
grad_v2 = thrust::get<2>(grad_e02);
|
||||
} else if (e12_dist <= e01_dist && e12_dist <= e02_dist) {
|
||||
// Closest edge is v2 - v1.
|
||||
auto grad_e12 = PointLineDistanceBackward(p, v1, v2, grad_dist);
|
||||
grad_p = thrust::get<0>(grad_e12);
|
||||
grad_v1 = thrust::get<1>(grad_e12);
|
||||
grad_v2 = thrust::get<2>(grad_e12);
|
||||
}
|
||||
|
||||
return thrust::make_tuple(grad_p, grad_v0, grad_v1, grad_v2);
|
||||
}
|
||||
397
pytorch3d/csrc/rasterize_meshes/geometry_utils.h
Normal file
397
pytorch3d/csrc/rasterize_meshes/geometry_utils.h
Normal file
@@ -0,0 +1,397 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#include <ATen/ATen.h>
|
||||
#include <algorithm>
|
||||
#include <type_traits>
|
||||
#include "vec2.h"
|
||||
#include "vec3.h"
|
||||
|
||||
// Set epsilon for preventing floating point errors and division by 0.
|
||||
const auto kEpsilon = 1e-30;
|
||||
|
||||
// Determines whether a point p is on the right side of a 2D line segment
|
||||
// given by the end points v0, v1.
|
||||
//
|
||||
// Args:
|
||||
// p: vec2 Coordinates of a point.
|
||||
// v0, v1: vec2 Coordinates of the end points of the edge.
|
||||
//
|
||||
// Returns:
|
||||
// area: The signed area of the parallelogram given by the vectors
|
||||
// A = p - v0
|
||||
// B = v1 - v0
|
||||
//
|
||||
// v1 ________
|
||||
// /\ /
|
||||
// A / \ /
|
||||
// / \ /
|
||||
// v0 /______\/
|
||||
// B p
|
||||
//
|
||||
// The area can also be interpreted as the cross product A x B.
|
||||
// If the sign of the area is positive, the point p is on the
|
||||
// right side of the edge. Negative area indicates the point is on
|
||||
// the left side of the edge. i.e. for an edge v1 - v0:
|
||||
//
|
||||
// v1
|
||||
// /
|
||||
// /
|
||||
// - / +
|
||||
// /
|
||||
// /
|
||||
// v0
|
||||
//
|
||||
template <typename T>
|
||||
T EdgeFunctionForward(const vec2<T>& p, const vec2<T>& v0, const vec2<T>& v1) {
|
||||
const T edge = (p.x - v0.x) * (v1.y - v0.y) - (p.y - v0.y) * (v1.x - v0.x);
|
||||
return edge;
|
||||
}
|
||||
|
||||
// Backward pass for the edge function returning partial dervivatives for each
|
||||
// of the input points.
|
||||
//
|
||||
// Args:
|
||||
// p: vec2 Coordinates of a point.
|
||||
// v0, v1: vec2 Coordinates of the end points of the edge.
|
||||
// grad_edge: Upstream gradient for output from edge function.
|
||||
//
|
||||
// Returns:
|
||||
// tuple of gradients for each of the input points:
|
||||
// (vec2<T> d_edge_dp, vec2<T> d_edge_dv0, vec2<T> d_edge_dv1)
|
||||
//
|
||||
template <typename T>
|
||||
inline std::tuple<vec2<T>, vec2<T>, vec2<T>> EdgeFunctionBackward(
|
||||
const vec2<T>& p,
|
||||
const vec2<T>& v0,
|
||||
const vec2<T>& v1,
|
||||
const T grad_edge) {
|
||||
const vec2<T> dedge_dp(v1.y - v0.y, v0.x - v1.x);
|
||||
const vec2<T> dedge_dv0(p.y - v1.y, v1.x - p.x);
|
||||
const vec2<T> dedge_dv1(v0.y - p.y, p.x - v0.x);
|
||||
return std::make_tuple(
|
||||
grad_edge * dedge_dp, grad_edge * dedge_dv0, grad_edge * dedge_dv1);
|
||||
}
|
||||
|
||||
// The forward pass for computing the barycentric coordinates of a point
|
||||
// relative to a triangle.
|
||||
// Ref:
|
||||
// https://www.scratchapixel.com/lessons/3d-basic-rendering/ray-tracing-rendering-a-triangle/barycentric-coordinates
|
||||
//
|
||||
// Args:
|
||||
// p: Coordinates of a point.
|
||||
// v0, v1, v2: Coordinates of the triangle vertices.
|
||||
//
|
||||
// Returns
|
||||
// bary: (w0, w1, w2) barycentric coordinates in the range [0, 1].
|
||||
//
|
||||
template <typename T>
|
||||
vec3<T> BarycentricCoordinatesForward(
|
||||
const vec2<T>& p,
|
||||
const vec2<T>& v0,
|
||||
const vec2<T>& v1,
|
||||
const vec2<T>& v2) {
|
||||
const T area = EdgeFunctionForward(v2, v0, v1) + kEpsilon;
|
||||
const T w0 = EdgeFunctionForward(p, v1, v2) / area;
|
||||
const T w1 = EdgeFunctionForward(p, v2, v0) / area;
|
||||
const T w2 = EdgeFunctionForward(p, v0, v1) / area;
|
||||
return vec3<T>(w0, w1, w2);
|
||||
}
|
||||
|
||||
// The backward pass for computing the barycentric coordinates of a point
|
||||
// relative to a triangle.
|
||||
//
|
||||
// Args:
|
||||
// p: Coordinates of a point.
|
||||
// v0, v1, v2: (x, y) coordinates of the triangle vertices.
|
||||
// grad_bary_upstream: vec3<T> Upstream gradient for each of the
|
||||
// barycentric coordaintes [grad_w0, grad_w1, grad_w2].
|
||||
//
|
||||
// Returns
|
||||
// tuple of gradients for each of the triangle vertices:
|
||||
// (vec2<T> grad_v0, vec2<T> grad_v1, vec2<T> grad_v2)
|
||||
//
|
||||
template <typename T>
|
||||
inline std::tuple<vec2<T>, vec2<T>, vec2<T>, vec2<T>> BarycentricCoordsBackward(
|
||||
const vec2<T>& p,
|
||||
const vec2<T>& v0,
|
||||
const vec2<T>& v1,
|
||||
const vec2<T>& v2,
|
||||
const vec3<T>& grad_bary_upstream) {
|
||||
const T area = EdgeFunctionForward(v2, v0, v1) + kEpsilon;
|
||||
const T area2 = pow(area, 2.0f);
|
||||
const T area_inv = 1.0f / area;
|
||||
const T e0 = EdgeFunctionForward(p, v1, v2);
|
||||
const T e1 = EdgeFunctionForward(p, v2, v0);
|
||||
const T e2 = EdgeFunctionForward(p, v0, v1);
|
||||
|
||||
const T grad_w0 = grad_bary_upstream.x;
|
||||
const T grad_w1 = grad_bary_upstream.y;
|
||||
const T grad_w2 = grad_bary_upstream.z;
|
||||
|
||||
// Calculate component of the gradient from each of w0, w1 and w2.
|
||||
// e.g. for w0:
|
||||
// dloss/dw0_v = dl/dw0 * dw0/dw0_top * dw0_top/dv
|
||||
// + dl/dw0 * dw0/dw0_bot * dw0_bot/dv
|
||||
const T dw0_darea = -e0 / (area2);
|
||||
const T dw0_e0 = area_inv;
|
||||
const T dloss_d_w0area = grad_w0 * dw0_darea;
|
||||
const T dloss_e0 = grad_w0 * dw0_e0;
|
||||
auto de0_dv = EdgeFunctionBackward(p, v1, v2, dloss_e0);
|
||||
auto dw0area_dv = EdgeFunctionBackward(v2, v0, v1, dloss_d_w0area);
|
||||
const vec2<T> dw0_p = std::get<0>(de0_dv);
|
||||
const vec2<T> dw0_dv0 = std::get<1>(dw0area_dv);
|
||||
const vec2<T> dw0_dv1 = std::get<1>(de0_dv) + std::get<2>(dw0area_dv);
|
||||
const vec2<T> dw0_dv2 = std::get<2>(de0_dv) + std::get<0>(dw0area_dv);
|
||||
|
||||
const T dw1_darea = -e1 / (area2);
|
||||
const T dw1_e1 = area_inv;
|
||||
const T dloss_d_w1area = grad_w1 * dw1_darea;
|
||||
const T dloss_e1 = grad_w1 * dw1_e1;
|
||||
auto de1_dv = EdgeFunctionBackward(p, v2, v0, dloss_e1);
|
||||
auto dw1area_dv = EdgeFunctionBackward(v2, v0, v1, dloss_d_w1area);
|
||||
const vec2<T> dw1_p = std::get<0>(de1_dv);
|
||||
const vec2<T> dw1_dv0 = std::get<2>(de1_dv) + std::get<1>(dw1area_dv);
|
||||
const vec2<T> dw1_dv1 = std::get<2>(dw1area_dv);
|
||||
const vec2<T> dw1_dv2 = std::get<1>(de1_dv) + std::get<0>(dw1area_dv);
|
||||
|
||||
const T dw2_darea = -e2 / (area2);
|
||||
const T dw2_e2 = area_inv;
|
||||
const T dloss_d_w2area = grad_w2 * dw2_darea;
|
||||
const T dloss_e2 = grad_w2 * dw2_e2;
|
||||
auto de2_dv = EdgeFunctionBackward(p, v0, v1, dloss_e2);
|
||||
auto dw2area_dv = EdgeFunctionBackward(v2, v0, v1, dloss_d_w2area);
|
||||
const vec2<T> dw2_p = std::get<0>(de2_dv);
|
||||
const vec2<T> dw2_dv0 = std::get<1>(de2_dv) + std::get<1>(dw2area_dv);
|
||||
const vec2<T> dw2_dv1 = std::get<2>(de2_dv) + std::get<2>(dw2area_dv);
|
||||
const vec2<T> dw2_dv2 = std::get<0>(dw2area_dv);
|
||||
|
||||
const vec2<T> dbary_p = dw0_p + dw1_p + dw2_p;
|
||||
const vec2<T> dbary_dv0 = dw0_dv0 + dw1_dv0 + dw2_dv0;
|
||||
const vec2<T> dbary_dv1 = dw0_dv1 + dw1_dv1 + dw2_dv1;
|
||||
const vec2<T> dbary_dv2 = dw0_dv2 + dw1_dv2 + dw2_dv2;
|
||||
|
||||
return std::make_tuple(dbary_p, dbary_dv0, dbary_dv1, dbary_dv2);
|
||||
}
|
||||
|
||||
// Forward pass for applying perspective correction to barycentric coordinates.
|
||||
//
|
||||
// Args:
|
||||
// bary: Screen-space barycentric coordinates for a point
|
||||
// z0, z1, z2: Camera-space z-coordinates of the triangle vertices
|
||||
//
|
||||
// Returns
|
||||
// World-space barycentric coordinates
|
||||
//
|
||||
template <typename T>
|
||||
inline vec3<T> BarycentricPerspectiveCorrectionForward(
|
||||
const vec3<T>& bary,
|
||||
const T z0,
|
||||
const T z1,
|
||||
const T z2) {
|
||||
const T w0_top = bary.x * z1 * z2;
|
||||
const T w1_top = bary.y * z0 * z2;
|
||||
const T w2_top = bary.z * z0 * z1;
|
||||
const T denom = w0_top + w1_top + w2_top;
|
||||
const T w0 = w0_top / denom;
|
||||
const T w1 = w1_top / denom;
|
||||
const T w2 = w2_top / denom;
|
||||
return vec3<T>(w0, w1, w2);
|
||||
}
|
||||
|
||||
// Backward pass for applying perspective correction to barycentric coordinates.
|
||||
//
|
||||
// Args:
|
||||
// bary: Screen-space barycentric coordinates for a point
|
||||
// z0, z1, z2: Camera-space z-coordinates of the triangle vertices
|
||||
// grad_out: Upstream gradient of the loss with respect to the corrected
|
||||
// barycentric coordinates.
|
||||
//
|
||||
// Returns a tuple of:
|
||||
// grad_bary: Downstream gradient of the loss with respect to the the
|
||||
// uncorrected barycentric coordinates.
|
||||
// grad_z0, grad_z1, grad_z2: Downstream gradient of the loss with respect
|
||||
// to the z-coordinates of the triangle verts
|
||||
template <typename T>
|
||||
inline std::tuple<vec3<T>, T, T, T> BarycentricPerspectiveCorrectionBackward(
|
||||
const vec3<T>& bary,
|
||||
const T z0,
|
||||
const T z1,
|
||||
const T z2,
|
||||
const vec3<T>& grad_out) {
|
||||
// Recompute forward pass
|
||||
const T w0_top = bary.x * z1 * z2;
|
||||
const T w1_top = bary.y * z0 * z2;
|
||||
const T w2_top = bary.z * z0 * z1;
|
||||
const T denom = w0_top + w1_top + w2_top;
|
||||
|
||||
// Now do backward pass
|
||||
const T grad_denom_top =
|
||||
-w0_top * grad_out.x - w1_top * grad_out.y - w2_top * grad_out.z;
|
||||
const T grad_denom = grad_denom_top / (denom * denom);
|
||||
const T grad_w0_top = grad_denom + grad_out.x / denom;
|
||||
const T grad_w1_top = grad_denom + grad_out.y / denom;
|
||||
const T grad_w2_top = grad_denom + grad_out.z / denom;
|
||||
const T grad_bary_x = grad_w0_top * z1 * z2;
|
||||
const T grad_bary_y = grad_w1_top * z0 * z2;
|
||||
const T grad_bary_z = grad_w2_top * z0 * z1;
|
||||
const vec3<T> grad_bary(grad_bary_x, grad_bary_y, grad_bary_z);
|
||||
const T grad_z0 = grad_w1_top * bary.y * z2 + grad_w2_top * bary.z * z1;
|
||||
const T grad_z1 = grad_w0_top * bary.x * z2 + grad_w2_top * bary.z * z0;
|
||||
const T grad_z2 = grad_w0_top * bary.x * z1 + grad_w1_top * bary.y * z0;
|
||||
return std::make_tuple(grad_bary, grad_z0, grad_z1, grad_z2);
|
||||
}
|
||||
|
||||
// Calculate minimum distance between a line segment (v1 - v0) and point p.
|
||||
//
|
||||
// Args:
|
||||
// p: Coordinates of a point.
|
||||
// v0, v1: Coordinates of the end points of the line segment.
|
||||
//
|
||||
// Returns:
|
||||
// non-square distance of the point to the line.
|
||||
//
|
||||
// Consider the line extending the segment - this can be parameterized as:
|
||||
// v0 + t (v1 - v0).
|
||||
//
|
||||
// First find the projection of point p onto the line. It falls where:
|
||||
// t = [(p - v0) . (v1 - v0)] / |v1 - v0|^2
|
||||
// where . is the dot product.
|
||||
//
|
||||
// The parameter t is clamped from [0, 1] to handle points outside the
|
||||
// segment (v1 - v0).
|
||||
//
|
||||
// Once the projection of the point on the segment is known, the distance from
|
||||
// p to the projection gives the minimum distance to the segment.
|
||||
//
|
||||
template <typename T>
|
||||
T PointLineDistanceForward(
|
||||
const vec2<T>& p,
|
||||
const vec2<T>& v0,
|
||||
const vec2<T>& v1) {
|
||||
const vec2<T> v1v0 = v1 - v0;
|
||||
const T l2 = dot(v1v0, v1v0);
|
||||
if (l2 <= kEpsilon) {
|
||||
return sqrt(dot(p - v1, p - v1));
|
||||
}
|
||||
|
||||
const T t = dot(v1v0, p - v0) / l2;
|
||||
const T tt = std::min(std::max(t, 0.00f), 1.00f);
|
||||
const vec2<T> p_proj = v0 + tt * v1v0;
|
||||
return dot(p - p_proj, p - p_proj);
|
||||
}
|
||||
|
||||
// Backward pass for point to line distance in 2D.
|
||||
//
|
||||
// Args:
|
||||
// p: Coordinates of a point.
|
||||
// v0, v1: Coordinates of the end points of the line segment.
|
||||
// grad_dist: Upstream gradient for the distance.
|
||||
//
|
||||
// Returns:
|
||||
// tuple of gradients for each of the input points:
|
||||
// (vec2<T> grad_p, vec2<T> grad_v0, vec2<T> grad_v1)
|
||||
//
|
||||
template <typename T>
|
||||
inline std::tuple<vec2<T>, vec2<T>, vec2<T>> PointLineDistanceBackward(
|
||||
const vec2<T>& p,
|
||||
const vec2<T>& v0,
|
||||
const vec2<T>& v1,
|
||||
const T& grad_dist) {
|
||||
// Redo some of the forward pass calculations.
|
||||
const vec2<T> v1v0 = v1 - v0;
|
||||
const vec2<T> pv0 = p - v0;
|
||||
const T t_bot = dot(v1v0, v1v0);
|
||||
const T t_top = dot(v1v0, pv0);
|
||||
const T t = t_top / t_bot;
|
||||
const T tt = std::min(std::max(t, 0.00f), 1.00f);
|
||||
const vec2<T> p_proj = (1.0f - tt) * v0 + tt * v1;
|
||||
|
||||
const vec2<T> grad_v0 = grad_dist * (1.0f - tt) * 2.0f * (p_proj - p);
|
||||
const vec2<T> grad_v1 = grad_dist * tt * 2.0f * (p_proj - p);
|
||||
const vec2<T> grad_p = -1.0f * grad_dist * 2.0f * (p_proj - p);
|
||||
|
||||
return std::make_tuple(grad_p, grad_v0, grad_v1);
|
||||
}
|
||||
|
||||
// The forward pass for calculating the shortest distance between a point
|
||||
// and a triangle.
|
||||
// Ref: https://www.randygaul.net/2014/07/23/distance-point-to-line-segment/
|
||||
//
|
||||
// Args:
|
||||
// p: Coordinates of a point.
|
||||
// v0, v1, v2: Coordinates of the three triangle vertices.
|
||||
//
|
||||
// Returns:
|
||||
// shortest absolute distance from a point to a triangle.
|
||||
//
|
||||
//
|
||||
template <typename T>
|
||||
T PointTriangleDistanceForward(
|
||||
const vec2<T>& p,
|
||||
const vec2<T>& v0,
|
||||
const vec2<T>& v1,
|
||||
const vec2<T>& v2) {
|
||||
// Compute distance of point to 3 edges of the triangle and return the
|
||||
// minimum value.
|
||||
const T e01_dist = PointLineDistanceForward(p, v0, v1);
|
||||
const T e02_dist = PointLineDistanceForward(p, v0, v2);
|
||||
const T e12_dist = PointLineDistanceForward(p, v1, v2);
|
||||
const T edge_dist = std::min(std::min(e01_dist, e02_dist), e12_dist);
|
||||
|
||||
return edge_dist;
|
||||
}
|
||||
|
||||
// Backward pass for point triangle distance.
|
||||
//
|
||||
// Args:
|
||||
// p: Coordinates of a point.
|
||||
// v0, v1, v2: Coordinates of the three triangle vertices.
|
||||
// grad_dist: Upstream gradient for the distance.
|
||||
//
|
||||
// Returns:
|
||||
// tuple of gradients for each of the triangle vertices:
|
||||
// (vec2<T> grad_v0, vec2<T> grad_v1, vec2<T> grad_v2)
|
||||
//
|
||||
template <typename T>
|
||||
inline std::tuple<vec2<T>, vec2<T>, vec2<T>, vec2<T>>
|
||||
PointTriangleDistanceBackward(
|
||||
const vec2<T>& p,
|
||||
const vec2<T>& v0,
|
||||
const vec2<T>& v1,
|
||||
const vec2<T>& v2,
|
||||
const T& grad_dist) {
|
||||
// Compute distance to all 3 edges of the triangle.
|
||||
const T e01_dist = PointLineDistanceForward(p, v0, v1);
|
||||
const T e02_dist = PointLineDistanceForward(p, v0, v2);
|
||||
const T e12_dist = PointLineDistanceForward(p, v1, v2);
|
||||
|
||||
// Initialize output tensors.
|
||||
vec2<T> grad_v0(0.0f, 0.0f);
|
||||
vec2<T> grad_v1(0.0f, 0.0f);
|
||||
vec2<T> grad_v2(0.0f, 0.0f);
|
||||
vec2<T> grad_p(0.0f, 0.0f);
|
||||
|
||||
// Find which edge is the closest and return PointLineDistanceBackward for
|
||||
// that edge.
|
||||
if (e01_dist <= e02_dist && e01_dist <= e12_dist) {
|
||||
// Closest edge is v1 - v0.
|
||||
auto grad_e01 = PointLineDistanceBackward(p, v0, v1, grad_dist);
|
||||
grad_p = std::get<0>(grad_e01);
|
||||
grad_v0 = std::get<1>(grad_e01);
|
||||
grad_v1 = std::get<2>(grad_e01);
|
||||
} else if (e02_dist <= e01_dist && e02_dist <= e12_dist) {
|
||||
// Closest edge is v2 - v0.
|
||||
auto grad_e02 = PointLineDistanceBackward(p, v0, v2, grad_dist);
|
||||
grad_p = std::get<0>(grad_e02);
|
||||
grad_v0 = std::get<1>(grad_e02);
|
||||
grad_v2 = std::get<2>(grad_e02);
|
||||
} else if (e12_dist <= e01_dist && e12_dist <= e02_dist) {
|
||||
// Closest edge is v2 - v1.
|
||||
auto grad_e12 = PointLineDistanceBackward(p, v1, v2, grad_dist);
|
||||
grad_p = std::get<0>(grad_e12);
|
||||
grad_v1 = std::get<1>(grad_e12);
|
||||
grad_v2 = std::get<2>(grad_e12);
|
||||
}
|
||||
|
||||
return std::make_tuple(grad_p, grad_v0, grad_v1, grad_v2);
|
||||
}
|
||||
803
pytorch3d/csrc/rasterize_meshes/rasterize_meshes.cu
Normal file
803
pytorch3d/csrc/rasterize_meshes/rasterize_meshes.cu
Normal file
@@ -0,0 +1,803 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#include <float.h>
|
||||
#include <math.h>
|
||||
#include <thrust/tuple.h>
|
||||
#include <torch/extension.h>
|
||||
#include <cstdio>
|
||||
#include <tuple>
|
||||
#include "float_math.cuh"
|
||||
#include "geometry_utils.cuh"
|
||||
#include "rasterize_points/bitmask.cuh"
|
||||
#include "rasterize_points/rasterization_utils.cuh"
|
||||
|
||||
namespace {
|
||||
// A structure for holding details about a pixel.
|
||||
struct Pixel {
|
||||
float z;
|
||||
int64_t idx;
|
||||
float dist;
|
||||
float3 bary;
|
||||
};
|
||||
|
||||
__device__ bool operator<(const Pixel& a, const Pixel& b) {
|
||||
return a.z < b.z;
|
||||
}
|
||||
|
||||
__device__ float FloatMin3(const float p1, const float p2, const float p3) {
|
||||
return fminf(p1, fminf(p2, p3));
|
||||
}
|
||||
|
||||
__device__ float FloatMax3(const float p1, const float p2, const float p3) {
|
||||
return fmaxf(p1, fmaxf(p2, p3));
|
||||
}
|
||||
|
||||
// Get the xyz coordinates of the three vertices for the face given by the
|
||||
// index face_idx into face_verts.
|
||||
__device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts(
|
||||
const float* face_verts,
|
||||
int face_idx) {
|
||||
const float x0 = face_verts[face_idx * 9 + 0];
|
||||
const float y0 = face_verts[face_idx * 9 + 1];
|
||||
const float z0 = face_verts[face_idx * 9 + 2];
|
||||
const float x1 = face_verts[face_idx * 9 + 3];
|
||||
const float y1 = face_verts[face_idx * 9 + 4];
|
||||
const float z1 = face_verts[face_idx * 9 + 5];
|
||||
const float x2 = face_verts[face_idx * 9 + 6];
|
||||
const float y2 = face_verts[face_idx * 9 + 7];
|
||||
const float z2 = face_verts[face_idx * 9 + 8];
|
||||
|
||||
const float3 v0xyz = make_float3(x0, y0, z0);
|
||||
const float3 v1xyz = make_float3(x1, y1, z1);
|
||||
const float3 v2xyz = make_float3(x2, y2, z2);
|
||||
|
||||
return thrust::make_tuple(v0xyz, v1xyz, v2xyz);
|
||||
}
|
||||
|
||||
// Get the min/max x/y/z values for the face given by vertices v0, v1, v2.
|
||||
__device__ thrust::tuple<float2, float2, float2>
|
||||
GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) {
|
||||
const float xmin = FloatMin3(v0.x, v1.x, v2.x);
|
||||
const float ymin = FloatMin3(v0.y, v1.y, v2.y);
|
||||
const float zmin = FloatMin3(v0.z, v1.z, v2.z);
|
||||
const float xmax = FloatMax3(v0.x, v1.x, v2.x);
|
||||
const float ymax = FloatMax3(v0.y, v1.y, v2.y);
|
||||
const float zmax = FloatMax3(v0.z, v1.z, v2.z);
|
||||
|
||||
return thrust::make_tuple(
|
||||
make_float2(xmin, xmax),
|
||||
make_float2(ymin, ymax),
|
||||
make_float2(zmin, zmax));
|
||||
}
|
||||
|
||||
// Check if the point (px, py) lies outside the face bounding box face_bbox.
|
||||
// Return true if the point is outside.
|
||||
__device__ bool CheckPointOutsideBoundingBox(
|
||||
float3 v0,
|
||||
float3 v1,
|
||||
float3 v2,
|
||||
float blur_radius,
|
||||
float2 pxy) {
|
||||
const auto bbox = GetFaceBoundingBox(v0, v1, v2);
|
||||
const float2 xlims = thrust::get<0>(bbox);
|
||||
const float2 ylims = thrust::get<1>(bbox);
|
||||
const float2 zlims = thrust::get<2>(bbox);
|
||||
|
||||
const float x_min = xlims.x - blur_radius;
|
||||
const float y_min = ylims.x - blur_radius;
|
||||
const float x_max = xlims.y + blur_radius;
|
||||
const float y_max = ylims.y + blur_radius;
|
||||
|
||||
// Check if the current point is oustside the triangle bounding box.
|
||||
return (pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min);
|
||||
}
|
||||
|
||||
// This function checks if a pixel given by xy location pxy lies within the
|
||||
// face with index face_idx in face_verts. One of the inputs is a list (q)
|
||||
// which contains Pixel structs with the indices of the faces which intersect
|
||||
// with this pixel sorted by closest z distance. If the point pxy lies in the
|
||||
// face, the list (q) is updated and re-orderered in place. In addition
|
||||
// the auxillary variables q_size, q_max_z and q_max_idx are also modified.
|
||||
// This code is shared between RasterizeMeshesNaiveCudaKernel and
|
||||
// RasterizeMeshesFineCudaKernel.
|
||||
template <typename FaceQ>
|
||||
__device__ void CheckPixelInsideFace(
|
||||
const float* face_verts, // (N, P, 3)
|
||||
int face_idx,
|
||||
int& q_size,
|
||||
float& q_max_z,
|
||||
int& q_max_idx,
|
||||
FaceQ& q,
|
||||
float blur_radius,
|
||||
float2 pxy, // Coordinates of the pixel
|
||||
int K,
|
||||
bool perspective_correct) {
|
||||
const auto v012 = GetSingleFaceVerts(face_verts, face_idx);
|
||||
const float3 v0 = thrust::get<0>(v012);
|
||||
const float3 v1 = thrust::get<1>(v012);
|
||||
const float3 v2 = thrust::get<2>(v012);
|
||||
|
||||
// Only need xy for barycentric coordinates and distance calculations.
|
||||
const float2 v0xy = make_float2(v0.x, v0.y);
|
||||
const float2 v1xy = make_float2(v1.x, v1.y);
|
||||
const float2 v2xy = make_float2(v2.x, v2.y);
|
||||
|
||||
// Perform checks and skip if:
|
||||
// 1. the face is behind the camera
|
||||
// 2. the face has very small face area
|
||||
// 3. the pixel is outside the face bbox
|
||||
const float zmax = FloatMax3(v0.z, v1.z, v2.z);
|
||||
const bool outside_bbox = CheckPointOutsideBoundingBox(
|
||||
v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox
|
||||
const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy);
|
||||
const bool zero_face_area =
|
||||
(face_area <= kEpsilon && face_area >= -1.0f * kEpsilon);
|
||||
|
||||
if (zmax < 0 || outside_bbox || zero_face_area) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Calculate barycentric coords and euclidean dist to triangle.
|
||||
const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy);
|
||||
const float3 p_bary = !perspective_correct
|
||||
? p_bary0
|
||||
: BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z);
|
||||
|
||||
const float pz = p_bary.x * v0.z + p_bary.y * v1.z + p_bary.z * v2.z;
|
||||
if (pz < 0) {
|
||||
return; // Face is behind the image plane.
|
||||
}
|
||||
|
||||
// Get abs squared distance
|
||||
const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy);
|
||||
|
||||
// Use the bary coordinates to determine if the point is inside the face.
|
||||
const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f;
|
||||
const float signed_dist = inside ? -dist : dist;
|
||||
|
||||
// Check if pixel is outside blur region
|
||||
if (!inside && dist >= blur_radius) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (q_size < K) {
|
||||
// Just insert it.
|
||||
q[q_size] = {pz, face_idx, signed_dist, p_bary};
|
||||
if (pz > q_max_z) {
|
||||
q_max_z = pz;
|
||||
q_max_idx = q_size;
|
||||
}
|
||||
q_size++;
|
||||
} else if (pz < q_max_z) {
|
||||
// Overwrite the old max, and find the new max.
|
||||
q[q_max_idx] = {pz, face_idx, signed_dist, p_bary};
|
||||
q_max_z = pz;
|
||||
for (int i = 0; i < K; i++) {
|
||||
if (q[i].z > q_max_z) {
|
||||
q_max_z = q[i].z;
|
||||
q_max_idx = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// ****************************************************************************
|
||||
// * NAIVE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
__global__ void RasterizeMeshesNaiveCudaKernel(
|
||||
const float* face_verts,
|
||||
const int64_t* mesh_to_face_first_idx,
|
||||
const int64_t* num_faces_per_mesh,
|
||||
float blur_radius,
|
||||
bool perspective_correct,
|
||||
int N,
|
||||
int H,
|
||||
int W,
|
||||
int K,
|
||||
int64_t* face_idxs,
|
||||
float* zbuf,
|
||||
float* pix_dists,
|
||||
float* bary) {
|
||||
// Simple version: One thread per output pixel
|
||||
int num_threads = gridDim.x * blockDim.x;
|
||||
int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||
|
||||
for (int i = tid; i < N * H * W; i += num_threads) {
|
||||
// Convert linear index to 3D index
|
||||
const int n = i / (H * W); // batch index.
|
||||
const int pix_idx = i % (H * W);
|
||||
const int yi = pix_idx / H;
|
||||
const int xi = pix_idx % W;
|
||||
|
||||
// screen coordinates to ndc coordiantes of pixel.
|
||||
const float xf = PixToNdc(xi, W);
|
||||
const float yf = PixToNdc(yi, H);
|
||||
const float2 pxy = make_float2(xf, yf);
|
||||
|
||||
// For keeping track of the K closest points we want a data structure
|
||||
// that (1) gives O(1) access to the closest point for easy comparisons,
|
||||
// and (2) allows insertion of new elements. In the CPU version we use
|
||||
// std::priority_queue; then (2) is O(log K). We can't use STL
|
||||
// containers in CUDA; we could roll our own max heap in an array, but
|
||||
// that would likely have a lot of warp divergence so we do something
|
||||
// simpler instead: keep the elements in an unsorted array, but keep
|
||||
// track of the max value and the index of the max value. Then (1) is
|
||||
// still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8
|
||||
// this should be fast enough for our purposes.
|
||||
Pixel q[kMaxPointsPerPixel];
|
||||
int q_size = 0;
|
||||
float q_max_z = -1000;
|
||||
int q_max_idx = -1;
|
||||
|
||||
// Using the batch index of the thread get the start and stop
|
||||
// indices for the faces.
|
||||
const int64_t face_start_idx = mesh_to_face_first_idx[n];
|
||||
const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n];
|
||||
|
||||
// Loop through the faces in the mesh.
|
||||
for (int f = face_start_idx; f < face_stop_idx; ++f) {
|
||||
// Check if the pixel pxy is inside the face bounding box and if it is,
|
||||
// update q, q_size, q_max_z and q_max_idx in place.
|
||||
CheckPixelInsideFace(
|
||||
face_verts,
|
||||
f,
|
||||
q_size,
|
||||
q_max_z,
|
||||
q_max_idx,
|
||||
q,
|
||||
blur_radius,
|
||||
pxy,
|
||||
K,
|
||||
perspective_correct);
|
||||
}
|
||||
|
||||
// TODO: make sorting an option as only top k is needed, not sorted values.
|
||||
BubbleSort(q, q_size);
|
||||
int idx = n * H * W * K + yi * H * K + xi * K;
|
||||
for (int k = 0; k < q_size; ++k) {
|
||||
face_idxs[idx + k] = q[k].idx;
|
||||
zbuf[idx + k] = q[k].z;
|
||||
pix_dists[idx + k] = q[k].dist;
|
||||
bary[(idx + k) * 3 + 0] = q[k].bary.x;
|
||||
bary[(idx + k) * 3 + 1] = q[k].bary.y;
|
||||
bary[(idx + k) * 3 + 2] = q[k].bary.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
|
||||
RasterizeMeshesNaiveCuda(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& mesh_to_faces_packed_first_idx,
|
||||
const torch::Tensor& num_faces_per_mesh,
|
||||
const int image_size,
|
||||
const float blur_radius,
|
||||
const int num_closest,
|
||||
bool perspective_correct) {
|
||||
if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 ||
|
||||
face_verts.size(2) != 3) {
|
||||
AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)");
|
||||
}
|
||||
if (num_faces_per_mesh.size(0) != mesh_to_faces_packed_first_idx.size(0)) {
|
||||
AT_ERROR(
|
||||
"num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx");
|
||||
}
|
||||
|
||||
if (num_closest > kMaxPointsPerPixel) {
|
||||
std::stringstream ss;
|
||||
ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel;
|
||||
AT_ERROR(ss.str());
|
||||
}
|
||||
|
||||
const int N = num_faces_per_mesh.size(0); // batch size.
|
||||
const int H = image_size; // Assume square images.
|
||||
const int W = image_size;
|
||||
const int K = num_closest;
|
||||
|
||||
auto long_opts = face_verts.options().dtype(torch::kInt64);
|
||||
auto float_opts = face_verts.options().dtype(torch::kFloat32);
|
||||
|
||||
torch::Tensor face_idxs = torch::full({N, H, W, K}, -1, long_opts);
|
||||
torch::Tensor zbuf = torch::full({N, H, W, K}, -1, float_opts);
|
||||
torch::Tensor pix_dists = torch::full({N, H, W, K}, -1, float_opts);
|
||||
torch::Tensor bary = torch::full({N, H, W, K, 3}, -1, float_opts);
|
||||
|
||||
const size_t blocks = 1024;
|
||||
const size_t threads = 64;
|
||||
|
||||
RasterizeMeshesNaiveCudaKernel<<<blocks, threads>>>(
|
||||
face_verts.contiguous().data<float>(),
|
||||
mesh_to_faces_packed_first_idx.contiguous().data<int64_t>(),
|
||||
num_faces_per_mesh.contiguous().data<int64_t>(),
|
||||
blur_radius,
|
||||
perspective_correct,
|
||||
N,
|
||||
H,
|
||||
W,
|
||||
K,
|
||||
face_idxs.contiguous().data<int64_t>(),
|
||||
zbuf.contiguous().data<float>(),
|
||||
pix_dists.contiguous().data<float>(),
|
||||
bary.contiguous().data<float>());
|
||||
|
||||
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * BACKWARD PASS *
|
||||
// ****************************************************************************
|
||||
// TODO: benchmark parallelizing over faces_verts instead of over pixels.
|
||||
__global__ void RasterizeMeshesBackwardCudaKernel(
|
||||
const float* face_verts, // (F, 3, 3)
|
||||
const int64_t* pix_to_face, // (N, H, W, K)
|
||||
bool perspective_correct,
|
||||
int N,
|
||||
int F,
|
||||
int H,
|
||||
int W,
|
||||
int K,
|
||||
const float* grad_zbuf, // (N, H, W, K)
|
||||
const float* grad_bary, // (N, H, W, K, 3)
|
||||
const float* grad_dists, // (N, H, W, K)
|
||||
float* grad_face_verts) { // (F, 3, 3)
|
||||
|
||||
// Parallelize over each pixel in images of
|
||||
// size H * W, for each image in the batch of size N.
|
||||
const int num_threads = gridDim.x * blockDim.x;
|
||||
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
|
||||
for (int t_i = tid; t_i < N * H * W; t_i += num_threads) {
|
||||
// Convert linear index to 3D index
|
||||
const int n = t_i / (H * W); // batch index.
|
||||
const int pix_idx = t_i % (H * W);
|
||||
const int yi = pix_idx / H;
|
||||
const int xi = pix_idx % W;
|
||||
const float xf = PixToNdc(xi, W);
|
||||
const float yf = PixToNdc(yi, H);
|
||||
const float2 pxy = make_float2(xf, yf);
|
||||
|
||||
// Loop over all the faces for this pixel.
|
||||
for (int k = 0; k < K; k++) {
|
||||
// Index into (N, H, W, K, :) grad tensors
|
||||
const int i =
|
||||
n * H * W * K + yi * H * K + xi * K + k; // pixel index + face index
|
||||
|
||||
const int f = pix_to_face[i];
|
||||
if (f < 0) {
|
||||
continue; // padded face.
|
||||
}
|
||||
// Get xyz coordinates of the three face vertices.
|
||||
const auto v012 = GetSingleFaceVerts(face_verts, f);
|
||||
const float3 v0 = thrust::get<0>(v012);
|
||||
const float3 v1 = thrust::get<1>(v012);
|
||||
const float3 v2 = thrust::get<2>(v012);
|
||||
|
||||
// Only neex xy for barycentric coordinate and distance calculations.
|
||||
const float2 v0xy = make_float2(v0.x, v0.y);
|
||||
const float2 v1xy = make_float2(v1.x, v1.y);
|
||||
const float2 v2xy = make_float2(v2.x, v2.y);
|
||||
|
||||
// Get upstream gradients for the face.
|
||||
const float grad_dist_upstream = grad_dists[i];
|
||||
const float grad_zbuf_upstream = grad_zbuf[i];
|
||||
const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0];
|
||||
const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1];
|
||||
const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2];
|
||||
const float3 grad_bary_upstream = make_float3(
|
||||
grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2);
|
||||
|
||||
const float3 bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy);
|
||||
const float3 bary = !perspective_correct
|
||||
? bary0
|
||||
: BarycentricPerspectiveCorrectionForward(bary0, v0.z, v1.z, v2.z);
|
||||
const bool inside = bary.x > 0.0f && bary.y > 0.0f && bary.z > 0.0f;
|
||||
const float sign = inside ? -1.0f : 1.0f;
|
||||
|
||||
// TODO(T52813608) Add support for non-square images.
|
||||
auto grad_dist_f = PointTriangleDistanceBackward(
|
||||
pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream);
|
||||
const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f);
|
||||
const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f);
|
||||
const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f);
|
||||
|
||||
// Upstream gradient for barycentric coords from zbuf calculation:
|
||||
// zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2
|
||||
// Therefore
|
||||
// d_zbuf/d_bary_w0 = z0
|
||||
// d_zbuf/d_bary_w1 = z1
|
||||
// d_zbuf/d_bary_w2 = z2
|
||||
const float3 d_zbuf_d_bary = make_float3(v0.z, v1.z, v2.z);
|
||||
|
||||
// Total upstream barycentric gradients are the sum of
|
||||
// external upstream gradients and contribution from zbuf.
|
||||
const float3 grad_bary_f_sum =
|
||||
(grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bary);
|
||||
float3 grad_bary0 = grad_bary_f_sum;
|
||||
float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f;
|
||||
if (perspective_correct) {
|
||||
auto perspective_grads = BarycentricPerspectiveCorrectionBackward(
|
||||
bary0, v0.z, v1.z, v2.z, grad_bary_f_sum);
|
||||
grad_bary0 = thrust::get<0>(perspective_grads);
|
||||
dz0_persp = thrust::get<1>(perspective_grads);
|
||||
dz1_persp = thrust::get<2>(perspective_grads);
|
||||
dz2_persp = thrust::get<3>(perspective_grads);
|
||||
}
|
||||
auto grad_bary_f =
|
||||
BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0);
|
||||
const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f);
|
||||
const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f);
|
||||
const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f);
|
||||
|
||||
atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x);
|
||||
atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y);
|
||||
atomicAdd(
|
||||
grad_face_verts + f * 9 + 2, grad_zbuf_upstream * bary.x + dz0_persp);
|
||||
atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x);
|
||||
atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y);
|
||||
atomicAdd(
|
||||
grad_face_verts + f * 9 + 5, grad_zbuf_upstream * bary.y + dz1_persp);
|
||||
atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x);
|
||||
atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y);
|
||||
atomicAdd(
|
||||
grad_face_verts + f * 9 + 8, grad_zbuf_upstream * bary.z + dz2_persp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
torch::Tensor RasterizeMeshesBackwardCuda(
|
||||
const torch::Tensor& face_verts, // (F, 3, 3)
|
||||
const torch::Tensor& pix_to_face, // (N, H, W, K)
|
||||
const torch::Tensor& grad_zbuf, // (N, H, W, K)
|
||||
const torch::Tensor& grad_bary, // (N, H, W, K, 3)
|
||||
const torch::Tensor& grad_dists, // (N, H, W, K)
|
||||
bool perspective_correct) {
|
||||
const int F = face_verts.size(0);
|
||||
const int N = pix_to_face.size(0);
|
||||
const int H = pix_to_face.size(1);
|
||||
const int W = pix_to_face.size(2);
|
||||
const int K = pix_to_face.size(3);
|
||||
|
||||
torch::Tensor grad_face_verts = torch::zeros({F, 3, 3}, face_verts.options());
|
||||
const size_t blocks = 1024;
|
||||
const size_t threads = 64;
|
||||
|
||||
RasterizeMeshesBackwardCudaKernel<<<blocks, threads>>>(
|
||||
face_verts.contiguous().data<float>(),
|
||||
pix_to_face.contiguous().data<int64_t>(),
|
||||
perspective_correct,
|
||||
N,
|
||||
F,
|
||||
H,
|
||||
W,
|
||||
K,
|
||||
grad_zbuf.contiguous().data<float>(),
|
||||
grad_bary.contiguous().data<float>(),
|
||||
grad_dists.contiguous().data<float>(),
|
||||
grad_face_verts.contiguous().data<float>());
|
||||
|
||||
return grad_face_verts;
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * COARSE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
__global__ void RasterizeMeshesCoarseCudaKernel(
|
||||
const float* face_verts,
|
||||
const int64_t* mesh_to_face_first_idx,
|
||||
const int64_t* num_faces_per_mesh,
|
||||
const float blur_radius,
|
||||
const int N,
|
||||
const int F,
|
||||
const int H,
|
||||
const int W,
|
||||
const int bin_size,
|
||||
const int chunk_size,
|
||||
const int max_faces_per_bin,
|
||||
int* faces_per_bin,
|
||||
int* bin_faces) {
|
||||
extern __shared__ char sbuf[];
|
||||
const int M = max_faces_per_bin;
|
||||
const int num_bins = 1 + (W - 1) / bin_size; // Integer divide round up
|
||||
const float half_pix = 1.0f / W; // Size of half a pixel in NDC units
|
||||
// This is a boolean array of shape (num_bins, num_bins, chunk_size)
|
||||
// stored in shared memory that will track whether each point in the chunk
|
||||
// falls into each bin of the image.
|
||||
BitMask binmask((unsigned int*)sbuf, num_bins, num_bins, chunk_size);
|
||||
|
||||
// Have each block handle a chunk of faces
|
||||
const int chunks_per_batch = 1 + (F - 1) / chunk_size;
|
||||
const int num_chunks = N * chunks_per_batch;
|
||||
for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) {
|
||||
const int batch_idx = chunk / chunks_per_batch; // batch index
|
||||
const int chunk_idx = chunk % chunks_per_batch;
|
||||
const int face_start_idx = chunk_idx * chunk_size;
|
||||
|
||||
binmask.block_clear();
|
||||
const int64_t mesh_face_start_idx = mesh_to_face_first_idx[batch_idx];
|
||||
const int64_t mesh_face_stop_idx =
|
||||
mesh_face_start_idx + num_faces_per_mesh[batch_idx];
|
||||
|
||||
// Have each thread handle a different face within the chunk
|
||||
for (int f = threadIdx.x; f < chunk_size; f += blockDim.x) {
|
||||
const int f_idx = face_start_idx + f;
|
||||
|
||||
// Check if face index corresponds to the mesh in the batch given by
|
||||
// batch_idx
|
||||
if (f_idx >= mesh_face_stop_idx || f_idx < mesh_face_start_idx) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get xyz coordinates of the three face vertices.
|
||||
const auto v012 = GetSingleFaceVerts(face_verts, f_idx);
|
||||
const float3 v0 = thrust::get<0>(v012);
|
||||
const float3 v1 = thrust::get<1>(v012);
|
||||
const float3 v2 = thrust::get<2>(v012);
|
||||
|
||||
// Compute screen-space bbox for the triangle expanded by blur.
|
||||
float xmin = FloatMin3(v0.x, v1.x, v2.x) - sqrt(blur_radius);
|
||||
float ymin = FloatMin3(v0.y, v1.y, v2.y) - sqrt(blur_radius);
|
||||
float xmax = FloatMax3(v0.x, v1.x, v2.x) + sqrt(blur_radius);
|
||||
float ymax = FloatMax3(v0.y, v1.y, v2.y) + sqrt(blur_radius);
|
||||
float zmax = FloatMax3(v0.z, v1.z, v2.z);
|
||||
|
||||
if (zmax < 0) {
|
||||
continue; // Face is behind the camera.
|
||||
}
|
||||
|
||||
// Brute-force search over all bins; TODO(T54294966) something smarter.
|
||||
for (int by = 0; by < num_bins; ++by) {
|
||||
// Y coordinate of the top and bottom of the bin.
|
||||
// PixToNdc gives the location of the center of each pixel, so we
|
||||
// need to add/subtract a half pixel to get the true extent of the bin.
|
||||
const float bin_y_min = PixToNdc(by * bin_size, H) - half_pix;
|
||||
const float bin_y_max = PixToNdc((by + 1) * bin_size - 1, H) + half_pix;
|
||||
const bool y_overlap = (ymin <= bin_y_max) && (bin_y_min < ymax);
|
||||
|
||||
for (int bx = 0; bx < num_bins; ++bx) {
|
||||
// X coordinate of the left and right of the bin.
|
||||
const float bin_x_min = PixToNdc(bx * bin_size, W) - half_pix;
|
||||
const float bin_x_max =
|
||||
PixToNdc((bx + 1) * bin_size - 1, W) + half_pix;
|
||||
const bool x_overlap = (xmin <= bin_x_max) && (bin_x_min < xmax);
|
||||
|
||||
if (y_overlap && x_overlap) {
|
||||
binmask.set(by, bx, f);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
// Now we have processed every face in the current chunk. We need to
|
||||
// count the number of faces in each bin so we can write the indices
|
||||
// out to global memory. We have each thread handle a different bin.
|
||||
for (int byx = threadIdx.x; byx < num_bins * num_bins; byx += blockDim.x) {
|
||||
const int by = byx / num_bins;
|
||||
const int bx = byx % num_bins;
|
||||
const int count = binmask.count(by, bx);
|
||||
const int faces_per_bin_idx =
|
||||
batch_idx * num_bins * num_bins + by * num_bins + bx;
|
||||
|
||||
// This atomically increments the (global) number of faces found
|
||||
// in the current bin, and gets the previous value of the counter;
|
||||
// this effectively allocates space in the bin_faces array for the
|
||||
// faces in the current chunk that fall into this bin.
|
||||
const int start = atomicAdd(faces_per_bin + faces_per_bin_idx, count);
|
||||
|
||||
// Now loop over the binmask and write the active bits for this bin
|
||||
// out to bin_faces.
|
||||
int next_idx = batch_idx * num_bins * num_bins * M + by * num_bins * M +
|
||||
bx * M + start;
|
||||
for (int f = 0; f < chunk_size; ++f) {
|
||||
if (binmask.get(by, bx, f)) {
|
||||
// TODO(T54296346) find the correct method for handling errors in
|
||||
// CUDA. Throw an error if num_faces_per_bin > max_faces_per_bin.
|
||||
// Either decrease bin size or increase max_faces_per_bin
|
||||
bin_faces[next_idx] = face_start_idx + f;
|
||||
next_idx++;
|
||||
}
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
|
||||
torch::Tensor RasterizeMeshesCoarseCuda(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& mesh_to_face_first_idx,
|
||||
const torch::Tensor& num_faces_per_mesh,
|
||||
const int image_size,
|
||||
const float blur_radius,
|
||||
const int bin_size,
|
||||
const int max_faces_per_bin) {
|
||||
if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 ||
|
||||
face_verts.size(2) != 3) {
|
||||
AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)");
|
||||
}
|
||||
const int W = image_size;
|
||||
const int H = image_size;
|
||||
const int F = face_verts.size(0);
|
||||
const int N = num_faces_per_mesh.size(0);
|
||||
const int num_bins = 1 + (image_size - 1) / bin_size; // Divide round up.
|
||||
const int M = max_faces_per_bin;
|
||||
if (num_bins >= 22) {
|
||||
std::stringstream ss;
|
||||
ss << "Got " << num_bins << "; that's too many!";
|
||||
AT_ERROR(ss.str());
|
||||
}
|
||||
auto opts = face_verts.options().dtype(torch::kInt32);
|
||||
torch::Tensor faces_per_bin = torch::zeros({N, num_bins, num_bins}, opts);
|
||||
torch::Tensor bin_faces = torch::full({N, num_bins, num_bins, M}, -1, opts);
|
||||
const int chunk_size = 512;
|
||||
const size_t shared_size = num_bins * num_bins * chunk_size / 8;
|
||||
const size_t blocks = 64;
|
||||
const size_t threads = 512;
|
||||
|
||||
RasterizeMeshesCoarseCudaKernel<<<blocks, threads, shared_size>>>(
|
||||
face_verts.contiguous().data<float>(),
|
||||
mesh_to_face_first_idx.contiguous().data<int64_t>(),
|
||||
num_faces_per_mesh.contiguous().data<int64_t>(),
|
||||
blur_radius,
|
||||
N,
|
||||
F,
|
||||
H,
|
||||
W,
|
||||
bin_size,
|
||||
chunk_size,
|
||||
M,
|
||||
faces_per_bin.contiguous().data<int32_t>(),
|
||||
bin_faces.contiguous().data<int32_t>());
|
||||
return bin_faces;
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * FINE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
__global__ void RasterizeMeshesFineCudaKernel(
|
||||
const float* face_verts, // (F, 3, 3)
|
||||
const int32_t* bin_faces, // (N, B, B, T)
|
||||
const float blur_radius,
|
||||
const int bin_size,
|
||||
const bool perspective_correct,
|
||||
const int N,
|
||||
const int F,
|
||||
const int B,
|
||||
const int M,
|
||||
const int H,
|
||||
const int W,
|
||||
const int K,
|
||||
int64_t* face_idxs, // (N, S, S, K)
|
||||
float* zbuf, // (N, S, S, K)
|
||||
float* pix_dists, // (N, S, S, K)
|
||||
float* bary // (N, S, S, K, 3)
|
||||
) {
|
||||
// This can be more than S^2 if S % bin_size != 0
|
||||
int num_pixels = N * B * B * bin_size * bin_size;
|
||||
int num_threads = gridDim.x * blockDim.x;
|
||||
int tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
|
||||
for (int pid = tid; pid < num_pixels; pid += num_threads) {
|
||||
// Convert linear index into bin and pixel indices. We make the within
|
||||
// block pixel ids move the fastest, so that adjacent threads will fall
|
||||
// into the same bin; this should give them coalesced memory reads when
|
||||
// they read from faces and bin_faces.
|
||||
int i = pid;
|
||||
const int n = i / (B * B * bin_size * bin_size);
|
||||
i %= B * B * bin_size * bin_size;
|
||||
const int by = i / (B * bin_size * bin_size);
|
||||
i %= B * bin_size * bin_size;
|
||||
const int bx = i / (bin_size * bin_size);
|
||||
i %= bin_size * bin_size;
|
||||
const int yi = i / bin_size + by * bin_size;
|
||||
const int xi = i % bin_size + bx * bin_size;
|
||||
|
||||
if (yi >= H || xi >= W)
|
||||
continue;
|
||||
const float xf = PixToNdc(xi, W);
|
||||
const float yf = PixToNdc(yi, H);
|
||||
const float2 pxy = make_float2(xf, yf);
|
||||
|
||||
// This part looks like the naive rasterization kernel, except we use
|
||||
// bin_faces to only look at a subset of faces already known to fall
|
||||
// in this bin. TODO abstract out this logic into some data structure
|
||||
// that is shared by both kernels?
|
||||
Pixel q[kMaxPointsPerPixel];
|
||||
int q_size = 0;
|
||||
float q_max_z = -1000;
|
||||
int q_max_idx = -1;
|
||||
for (int m = 0; m < M; m++) {
|
||||
const int f = bin_faces[n * B * B * M + by * B * M + bx * M + m];
|
||||
if (f < 0) {
|
||||
continue; // bin_faces uses -1 as a sentinal value.
|
||||
}
|
||||
// Check if the pixel pxy is inside the face bounding box and if it is,
|
||||
// update q, q_size, q_max_z and q_max_idx in place.
|
||||
CheckPixelInsideFace(
|
||||
face_verts,
|
||||
f,
|
||||
q_size,
|
||||
q_max_z,
|
||||
q_max_idx,
|
||||
q,
|
||||
blur_radius,
|
||||
pxy,
|
||||
K,
|
||||
perspective_correct);
|
||||
}
|
||||
|
||||
// Now we've looked at all the faces for this bin, so we can write
|
||||
// output for the current pixel.
|
||||
// TODO: make sorting an option as only top k is needed, not sorted values.
|
||||
BubbleSort(q, q_size);
|
||||
const int pix_idx = n * H * W * K + yi * H * K + xi * K;
|
||||
for (int k = 0; k < q_size; k++) {
|
||||
face_idxs[pix_idx + k] = q[k].idx;
|
||||
zbuf[pix_idx + k] = q[k].z;
|
||||
pix_dists[pix_idx + k] = q[k].dist;
|
||||
bary[(pix_idx + k) * 3 + 0] = q[k].bary.x;
|
||||
bary[(pix_idx + k) * 3 + 1] = q[k].bary.y;
|
||||
bary[(pix_idx + k) * 3 + 2] = q[k].bary.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
|
||||
RasterizeMeshesFineCuda(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& bin_faces,
|
||||
const int image_size,
|
||||
const float blur_radius,
|
||||
const int bin_size,
|
||||
const int faces_per_pixel,
|
||||
bool perspective_correct) {
|
||||
if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 ||
|
||||
face_verts.size(2) != 3) {
|
||||
AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)");
|
||||
}
|
||||
if (bin_faces.ndimension() != 4) {
|
||||
AT_ERROR("bin_faces must have 4 dimensions");
|
||||
}
|
||||
const int F = face_verts.size(0);
|
||||
const int N = bin_faces.size(0);
|
||||
const int B = bin_faces.size(1);
|
||||
const int M = bin_faces.size(3);
|
||||
const int K = faces_per_pixel;
|
||||
const int H = image_size; // Assume square images only.
|
||||
const int W = image_size;
|
||||
|
||||
if (K > kMaxPointsPerPixel) {
|
||||
AT_ERROR("Must have num_closest <= 8");
|
||||
}
|
||||
auto long_opts = face_verts.options().dtype(torch::kInt64);
|
||||
auto float_opts = face_verts.options().dtype(torch::kFloat32);
|
||||
|
||||
torch::Tensor face_idxs = torch::full({N, H, W, K}, -1, long_opts);
|
||||
torch::Tensor zbuf = torch::full({N, H, W, K}, -1, float_opts);
|
||||
torch::Tensor pix_dists = torch::full({N, H, W, K}, -1, float_opts);
|
||||
torch::Tensor bary = torch::full({N, H, W, K, 3}, -1, float_opts);
|
||||
|
||||
const size_t blocks = 1024;
|
||||
const size_t threads = 64;
|
||||
|
||||
RasterizeMeshesFineCudaKernel<<<blocks, threads>>>(
|
||||
face_verts.contiguous().data<float>(),
|
||||
bin_faces.contiguous().data<int32_t>(),
|
||||
blur_radius,
|
||||
bin_size,
|
||||
perspective_correct,
|
||||
N,
|
||||
F,
|
||||
B,
|
||||
M,
|
||||
H,
|
||||
W,
|
||||
K,
|
||||
face_idxs.contiguous().data<int64_t>(),
|
||||
zbuf.contiguous().data<float>(),
|
||||
pix_dists.contiguous().data<float>(),
|
||||
bary.contiguous().data<float>());
|
||||
|
||||
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
|
||||
}
|
||||
411
pytorch3d/csrc/rasterize_meshes/rasterize_meshes.h
Normal file
411
pytorch3d/csrc/rasterize_meshes/rasterize_meshes.h
Normal file
@@ -0,0 +1,411 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
#include <torch/extension.h>
|
||||
#include <cstdio>
|
||||
#include <tuple>
|
||||
|
||||
// ****************************************************************************
|
||||
// * FORWARD PASS *
|
||||
// ****************************************************************************
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
|
||||
RasterizeMeshesNaiveCpu(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& mesh_to_face_first_idx,
|
||||
const torch::Tensor& num_faces_per_mesh,
|
||||
int image_size,
|
||||
float blur_radius,
|
||||
int faces_per_pixel,
|
||||
bool perspective_correct);
|
||||
|
||||
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
|
||||
RasterizeMeshesNaiveCuda(
|
||||
const at::Tensor& face_verts,
|
||||
const at::Tensor& mesh_to_face_first_idx,
|
||||
const at::Tensor& num_faces_per_mesh,
|
||||
int image_size,
|
||||
float blur_radius,
|
||||
int num_closest,
|
||||
bool perspective_correct);
|
||||
|
||||
// Forward pass for rasterizing a batch of meshes.
|
||||
//
|
||||
// Args:
|
||||
// face_verts: Tensor of shape (F, 3, 3) giving (packed) vertex positions for
|
||||
// faces in all the meshes in the batch. Concretely,
|
||||
// face_verts[f, i] = [x, y, z] gives the coordinates for the
|
||||
// ith vertex of the fth face. These vertices are expected to be
|
||||
// in NDC coordinates in the range [-1, 1].
|
||||
// mesh_to_face_first_idx: LongTensor of shape (N) giving the index in
|
||||
// faces_verts of the first face in each mesh in
|
||||
// the batch where N is the batch size.
|
||||
// num_faces_per_mesh: LongTensor of shape (N) giving the number of faces
|
||||
// for each mesh in the batch.
|
||||
// image_size: Size in pixels of the output image to be rasterized.
|
||||
// Assume square images only.
|
||||
// blur_radius: float distance in NDC coordinates uses to expand the face
|
||||
// bounding boxes for the rasterization. Set to 0.0 if no blur
|
||||
// is required.
|
||||
// faces_per_pixel: the number of closeset faces to rasterize per pixel.
|
||||
// perspective_correct: Whether to apply perspective correction when
|
||||
// computing barycentric coordinates. If this is True,
|
||||
// then this function returns world-space barycentric
|
||||
// coordinates for each pixel; if this is False then
|
||||
// this function instead returns screen-space
|
||||
// barycentric coordinates for each pixel.
|
||||
//
|
||||
// Returns:
|
||||
// A 4 element tuple of:
|
||||
// pix_to_face: int64 tensor of shape (N, H, W, K) giving the face index of
|
||||
// each of the closest faces to the pixel in the rasterized
|
||||
// image, or -1 for pixels that are not covered by any face.
|
||||
// zbuf: float32 Tensor of shape (N, H, W, K) giving the depth of each of
|
||||
// the closest faces for each pixel.
|
||||
// barycentric_coords: float tensor of shape (N, H, W, K, 3) giving
|
||||
// barycentric coordinates of the pixel with respect to
|
||||
// each of the closest faces along the z axis, padded
|
||||
// with -1 for pixels hit by fewer than
|
||||
// faces_per_pixel faces.
|
||||
// dists: float tensor of shape (N, H, W, K) giving the euclidean distance
|
||||
// in the (NDC) x/y plane between each pixel and its K closest
|
||||
// faces along the z axis padded with -1 for pixels hit by fewer than
|
||||
// faces_per_pixel faces.
|
||||
inline std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
|
||||
RasterizeMeshesNaive(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& mesh_to_face_first_idx,
|
||||
const torch::Tensor& num_faces_per_mesh,
|
||||
int image_size,
|
||||
float blur_radius,
|
||||
int faces_per_pixel,
|
||||
bool perspective_correct) {
|
||||
// TODO: Better type checking.
|
||||
if (face_verts.type().is_cuda()) {
|
||||
return RasterizeMeshesNaiveCuda(
|
||||
face_verts,
|
||||
mesh_to_face_first_idx,
|
||||
num_faces_per_mesh,
|
||||
image_size,
|
||||
blur_radius,
|
||||
faces_per_pixel,
|
||||
perspective_correct);
|
||||
} else {
|
||||
return RasterizeMeshesNaiveCpu(
|
||||
face_verts,
|
||||
mesh_to_face_first_idx,
|
||||
num_faces_per_mesh,
|
||||
image_size,
|
||||
blur_radius,
|
||||
faces_per_pixel,
|
||||
perspective_correct);
|
||||
}
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * BACKWARD PASS *
|
||||
// ****************************************************************************
|
||||
|
||||
torch::Tensor RasterizeMeshesBackwardCpu(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& pix_to_face,
|
||||
const torch::Tensor& grad_bary,
|
||||
const torch::Tensor& grad_zbuf,
|
||||
const torch::Tensor& grad_dists,
|
||||
bool perspective_correct);
|
||||
|
||||
torch::Tensor RasterizeMeshesBackwardCuda(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& pix_to_face,
|
||||
const torch::Tensor& grad_bary,
|
||||
const torch::Tensor& grad_zbuf,
|
||||
const torch::Tensor& grad_dists,
|
||||
bool perspective_correct);
|
||||
|
||||
// Args:
|
||||
// face_verts: float32 Tensor of shape (F, 3, 3) (from forward pass) giving
|
||||
// (packed) vertex positions for faces in all the meshes in
|
||||
// the batch.
|
||||
// pix_to_face: int64 tensor of shape (N, H, W, K) giving the face index of
|
||||
// each of the closest faces to the pixel in the rasterized
|
||||
// image, or -1 for pixels that are not covered by any face.
|
||||
// grad_zbuf: Tensor of shape (N, H, W, K) giving upstream gradients
|
||||
// d(loss)/d(zbuf) of the zbuf tensor from the forward pass.
|
||||
// grad_bary: Tensor of shape (N, H, W, K, 3) giving upstream gradients
|
||||
// d(loss)/d(bary) of the barycentric_coords tensor returned by
|
||||
// the forward pass.
|
||||
// grad_dists: Tensor of shape (N, H, W, K) giving upstream gradients
|
||||
// d(loss)/d(dists) of the dists tensor from the forward pass.
|
||||
// perspective_correct: Whether to apply perspective correction when
|
||||
// computing barycentric coordinates. If this is True,
|
||||
// then this function returns world-space barycentric
|
||||
// coordinates for each pixel; if this is False then
|
||||
// this function instead returns screen-space
|
||||
// barycentric coordinates for each pixel.
|
||||
//
|
||||
// Returns:
|
||||
// grad_face_verts: float32 Tensor of shape (F, 3, 3) giving downstream
|
||||
// gradients for the face vertices.
|
||||
torch::Tensor RasterizeMeshesBackward(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& pix_to_face,
|
||||
const torch::Tensor& grad_zbuf,
|
||||
const torch::Tensor& grad_bary,
|
||||
const torch::Tensor& grad_dists,
|
||||
bool perspective_correct) {
|
||||
if (face_verts.type().is_cuda()) {
|
||||
return RasterizeMeshesBackwardCuda(
|
||||
face_verts,
|
||||
pix_to_face,
|
||||
grad_zbuf,
|
||||
grad_bary,
|
||||
grad_dists,
|
||||
perspective_correct);
|
||||
} else {
|
||||
return RasterizeMeshesBackwardCpu(
|
||||
face_verts,
|
||||
pix_to_face,
|
||||
grad_zbuf,
|
||||
grad_bary,
|
||||
grad_dists,
|
||||
perspective_correct);
|
||||
}
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * COARSE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
torch::Tensor RasterizeMeshesCoarseCpu(
|
||||
const torch::Tensor& face_verts,
|
||||
const at::Tensor& mesh_to_face_first_idx,
|
||||
const at::Tensor& num_faces_per_mesh,
|
||||
int image_size,
|
||||
float blur_radius,
|
||||
int bin_size,
|
||||
int max_faces_per_bin);
|
||||
|
||||
torch::Tensor RasterizeMeshesCoarseCuda(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& mesh_to_face_first_idx,
|
||||
const torch::Tensor& num_faces_per_mesh,
|
||||
int image_size,
|
||||
float blur_radius,
|
||||
int bin_size,
|
||||
int max_faces_per_bin);
|
||||
|
||||
// Args:
|
||||
// face_verts: Tensor of shape (F, 3, 3) giving (packed) vertex positions for
|
||||
// faces in all the meshes in the batch. Concretely,
|
||||
// face_verts[f, i] = [x, y, z] gives the coordinates for the
|
||||
// ith vertex of the fth face. These vertices are expected to be
|
||||
// in NDC coordinates in the range [-1, 1].
|
||||
// mesh_to_face_first_idx: LongTensor of shape (N) giving the index in
|
||||
// faces_verts of the first face in each mesh in
|
||||
// the batch where N is the batch size.
|
||||
// num_faces_per_mesh: LongTensor of shape (N) giving the number of faces
|
||||
// for each mesh in the batch.
|
||||
// image_size: Size in pixels of the output image to be rasterized.
|
||||
// blur_radius: float distance in NDC coordinates uses to expand the face
|
||||
// bounding boxes for the rasterization. Set to 0.0 if no blur
|
||||
// is required.
|
||||
// bin_size: Size of each bin within the image (in pixels)
|
||||
// max_faces_per_bin: Maximum number of faces to count in each bin.
|
||||
//
|
||||
// Returns:
|
||||
// bin_face_idxs: Tensor of shape (N, num_bins, num_bins, K) giving the
|
||||
// indices of faces that fall into each bin.
|
||||
|
||||
torch::Tensor RasterizeMeshesCoarse(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& mesh_to_face_first_idx,
|
||||
const torch::Tensor& num_faces_per_mesh,
|
||||
int image_size,
|
||||
float blur_radius,
|
||||
int bin_size,
|
||||
int max_faces_per_bin) {
|
||||
if (face_verts.type().is_cuda()) {
|
||||
return RasterizeMeshesCoarseCuda(
|
||||
face_verts,
|
||||
mesh_to_face_first_idx,
|
||||
num_faces_per_mesh,
|
||||
image_size,
|
||||
blur_radius,
|
||||
bin_size,
|
||||
max_faces_per_bin);
|
||||
} else {
|
||||
return RasterizeMeshesCoarseCpu(
|
||||
face_verts,
|
||||
mesh_to_face_first_idx,
|
||||
num_faces_per_mesh,
|
||||
image_size,
|
||||
blur_radius,
|
||||
bin_size,
|
||||
max_faces_per_bin);
|
||||
}
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * FINE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
|
||||
RasterizeMeshesFineCuda(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& bin_faces,
|
||||
int image_size,
|
||||
float blur_radius,
|
||||
int bin_size,
|
||||
int faces_per_pixel,
|
||||
bool perspective_correct);
|
||||
|
||||
// Args:
|
||||
// face_verts: Tensor of shape (F, 3, 3) giving (packed) vertex positions for
|
||||
// faces in all the meshes in the batch. Concretely,
|
||||
// face_verts[f, i] = [x, y, z] gives the coordinates for the
|
||||
// ith vertex of the fth face. These vertices are expected to be
|
||||
// in NDC coordinates in the range [-1, 1].
|
||||
// bin_faces: int32 Tensor of shape (N, B, B, M) giving the indices of faces
|
||||
// that fall into each bin (output from coarse rasterization).
|
||||
// image_size: Size in pixels of the output image to be rasterized.
|
||||
// blur_radius: float distance in NDC coordinates uses to expand the face
|
||||
// bounding boxes for the rasterization. Set to 0.0 if no blur
|
||||
// is required.
|
||||
// bin_size: Size of each bin within the image (in pixels)
|
||||
// faces_per_pixel: the number of closeset faces to rasterize per pixel.
|
||||
// perspective_correct: Whether to apply perspective correction when
|
||||
// computing barycentric coordinates. If this is True,
|
||||
// then this function returns world-space barycentric
|
||||
// coordinates for each pixel; if this is False then
|
||||
// this function instead returns screen-space
|
||||
// barycentric coordinates for each pixel.
|
||||
//
|
||||
// Returns (same as rasterize_meshes):
|
||||
// A 4 element tuple of:
|
||||
// pix_to_face: int64 tensor of shape (N, H, W, K) giving the face index of
|
||||
// each of the closest faces to the pixel in the rasterized
|
||||
// image, or -1 for pixels that are not covered by any face.
|
||||
// zbuf: float32 Tensor of shape (N, H, W, K) giving the depth of each of
|
||||
// the closest faces for each pixel.
|
||||
// barycentric_coords: float tensor of shape (N, H, W, K, 3) giving
|
||||
// barycentric coordinates of the pixel with respect to
|
||||
// each of the closest faces along the z axis, padded
|
||||
// with -1 for pixels hit by fewer than
|
||||
// faces_per_pixel faces.
|
||||
// dists: float tensor of shape (N, H, W, K) giving the euclidean distance
|
||||
// in the (NDC) x/y plane between each pixel and its K closest
|
||||
// faces along the z axis padded with -1 for pixels hit by fewer than
|
||||
// faces_per_pixel faces.
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
|
||||
RasterizeMeshesFine(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& bin_faces,
|
||||
int image_size,
|
||||
float blur_radius,
|
||||
int bin_size,
|
||||
int faces_per_pixel,
|
||||
bool perspective_correct) {
|
||||
if (face_verts.type().is_cuda()) {
|
||||
return RasterizeMeshesFineCuda(
|
||||
face_verts,
|
||||
bin_faces,
|
||||
image_size,
|
||||
blur_radius,
|
||||
bin_size,
|
||||
faces_per_pixel,
|
||||
perspective_correct);
|
||||
} else {
|
||||
AT_ERROR("NOT IMPLEMENTED");
|
||||
}
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * MAIN ENTRY POINT *
|
||||
// ****************************************************************************
|
||||
|
||||
// This is the main entry point for the forward pass of the mesh rasterizer;
|
||||
// it uses either naive or coarse-to-fine rasterization based on bin_size.
|
||||
//
|
||||
// Args:
|
||||
// face_verts: Tensor of shape (F, 3, 3) giving (packed) vertex positions for
|
||||
// faces in all the meshes in the batch. Concretely,
|
||||
// face_verts[f, i] = [x, y, z] gives the coordinates for the
|
||||
// ith vertex of the fth face. These vertices are expected to be
|
||||
// in NDC coordinates in the range [-1, 1].
|
||||
// mesh_to_face_first_idx: LongTensor of shape (N) giving the index in
|
||||
// faces_verts of the first face in each mesh in
|
||||
// the batch where N is the batch size.
|
||||
// num_faces_per_mesh: LongTensor of shape (N) giving the number of faces
|
||||
// for each mesh in the batch.
|
||||
// image_size: Size in pixels of the output image to be rasterized.
|
||||
// blur_radius: float distance in NDC coordinates uses to expand the face
|
||||
// bounding boxes for the rasterization. Set to 0.0 if no blur
|
||||
// is required.
|
||||
// bin_size: Bin size (in pixels) for coarse-to-fine rasterization. Setting
|
||||
// bin_size=0 uses naive rasterization instead.
|
||||
// max_faces_per_bin: The maximum number of faces allowed to fall into each
|
||||
// bin when using coarse-to-fine rasterization.
|
||||
// perspective_correct: Whether to apply perspective correction when
|
||||
// computing barycentric coordinates. If this is True,
|
||||
// then this function returns world-space barycentric
|
||||
// coordinates for each pixel; if this is False then
|
||||
// this function instead returns screen-space
|
||||
// barycentric coordinates for each pixel.
|
||||
//
|
||||
// Returns:
|
||||
// A 4 element tuple of:
|
||||
// pix_to_face: int64 tensor of shape (N, H, W, K) giving the face index of
|
||||
// each of the closest faces to the pixel in the rasterized
|
||||
// image, or -1 for pixels that are not covered by any face.
|
||||
// zbuf: float32 Tensor of shape (N, H, W, K) giving the depth of each of
|
||||
// the closest faces for each pixel.
|
||||
// barycentric_coords: float tensor of shape (N, H, W, K, 3) giving
|
||||
// barycentric coordinates of the pixel with respect to
|
||||
// each of the closest faces along the z axis, padded
|
||||
// with -1 for pixels hit by fewer than
|
||||
// faces_per_pixel faces.
|
||||
// dists: float tensor of shape (N, H, W, K) giving the euclidean distance
|
||||
// in the (NDC) x/y plane between each pixel and its K closest
|
||||
// faces along the z axis padded with -1 for pixels hit by fewer than
|
||||
// faces_per_pixel faces.
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
|
||||
RasterizeMeshes(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& mesh_to_face_first_idx,
|
||||
const torch::Tensor& num_faces_per_mesh,
|
||||
int image_size,
|
||||
float blur_radius,
|
||||
int faces_per_pixel,
|
||||
int bin_size,
|
||||
int max_faces_per_bin,
|
||||
bool perspective_correct) {
|
||||
if (bin_size > 0 && max_faces_per_bin > 0) {
|
||||
// Use coarse-to-fine rasterization
|
||||
auto bin_faces = RasterizeMeshesCoarse(
|
||||
face_verts,
|
||||
mesh_to_face_first_idx,
|
||||
num_faces_per_mesh,
|
||||
image_size,
|
||||
blur_radius,
|
||||
bin_size,
|
||||
max_faces_per_bin);
|
||||
return RasterizeMeshesFine(
|
||||
face_verts,
|
||||
bin_faces,
|
||||
image_size,
|
||||
blur_radius,
|
||||
bin_size,
|
||||
faces_per_pixel,
|
||||
perspective_correct);
|
||||
} else {
|
||||
// Use the naive per-pixel implementation
|
||||
return RasterizeMeshesNaive(
|
||||
face_verts,
|
||||
mesh_to_face_first_idx,
|
||||
num_faces_per_mesh,
|
||||
image_size,
|
||||
blur_radius,
|
||||
faces_per_pixel,
|
||||
perspective_correct);
|
||||
}
|
||||
}
|
||||
471
pytorch3d/csrc/rasterize_meshes/rasterize_meshes_cpu.cpp
Normal file
471
pytorch3d/csrc/rasterize_meshes/rasterize_meshes_cpu.cpp
Normal file
@@ -0,0 +1,471 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#include <torch/extension.h>
|
||||
#include <algorithm>
|
||||
#include <list>
|
||||
#include <queue>
|
||||
#include <tuple>
|
||||
#include "geometry_utils.h"
|
||||
#include "vec2.h"
|
||||
#include "vec3.h"
|
||||
|
||||
float PixToNdc(int i, int S) {
|
||||
// NDC x-offset + (i * pixel_width + half_pixel_width)
|
||||
return -1 + (2 * i + 1.0f) / S;
|
||||
}
|
||||
|
||||
// Get (x, y, z) values for vertex from (3, 3) tensor face.
|
||||
template <typename Face>
|
||||
auto ExtractVerts(const Face& face, const int vertex_index) {
|
||||
return std::make_tuple(
|
||||
face[vertex_index][0], face[vertex_index][1], face[vertex_index][2]);
|
||||
}
|
||||
|
||||
// Compute min/max x/y for each face.
|
||||
auto ComputeFaceBoundingBoxes(const torch::Tensor& face_verts) {
|
||||
const int total_F = face_verts.size(0);
|
||||
auto float_opts = face_verts.options().dtype(torch::kFloat32);
|
||||
auto face_verts_a = face_verts.accessor<float, 3>();
|
||||
torch::Tensor face_bboxes = torch::full({total_F, 6}, -2.0, float_opts);
|
||||
|
||||
// Loop through all the faces
|
||||
for (int f = 0; f < total_F; ++f) {
|
||||
const auto& face = face_verts_a[f];
|
||||
float x0, x1, x2, y0, y1, y2, z0, z1, z2;
|
||||
std::tie(x0, y0, z0) = ExtractVerts(face, 0);
|
||||
std::tie(x1, y1, z1) = ExtractVerts(face, 1);
|
||||
std::tie(x2, y2, z2) = ExtractVerts(face, 2);
|
||||
|
||||
const float x_min = std::min(x0, std::min(x1, x2));
|
||||
const float y_min = std::min(y0, std::min(y1, y2));
|
||||
const float x_max = std::max(x0, std::max(x1, x2));
|
||||
const float y_max = std::max(y0, std::max(y1, y2));
|
||||
const float z_min = std::min(z0, std::min(z1, z2));
|
||||
const float z_max = std::max(z0, std::max(z1, z2));
|
||||
|
||||
face_bboxes[f][0] = x_min;
|
||||
face_bboxes[f][1] = y_min;
|
||||
face_bboxes[f][2] = x_max;
|
||||
face_bboxes[f][3] = y_max;
|
||||
face_bboxes[f][4] = z_min;
|
||||
face_bboxes[f][5] = z_max;
|
||||
}
|
||||
|
||||
return face_bboxes;
|
||||
}
|
||||
|
||||
// Check if the point (px, py) lies inside the face bounding box face_bbox.
|
||||
// Return true if the point is outside.
|
||||
template <typename Face>
|
||||
bool CheckPointOutsideBoundingBox(
|
||||
const Face& face_bbox,
|
||||
float blur_radius,
|
||||
float px,
|
||||
float py) {
|
||||
// Read triangle bbox coordinates and expand by blur radius.
|
||||
float x_min = face_bbox[0] - blur_radius;
|
||||
float y_min = face_bbox[1] - blur_radius;
|
||||
float x_max = face_bbox[2] + blur_radius;
|
||||
float y_max = face_bbox[3] + blur_radius;
|
||||
|
||||
// Check if the current point is within the triangle bounding box.
|
||||
return (px > x_max || px < x_min || py > y_max || py < y_min);
|
||||
}
|
||||
|
||||
// Calculate areas of all faces. Returns a tensor of shape (total_faces, 1)
|
||||
// where faces with zero area have value -1.
|
||||
auto ComputeFaceAreas(const torch::Tensor& face_verts) {
|
||||
const int total_F = face_verts.size(0);
|
||||
auto float_opts = face_verts.options().dtype(torch::kFloat32);
|
||||
auto face_verts_a = face_verts.accessor<float, 3>();
|
||||
torch::Tensor face_areas = torch::full({total_F}, -1, float_opts);
|
||||
|
||||
// Loop through all the faces
|
||||
for (int f = 0; f < total_F; ++f) {
|
||||
const auto& face = face_verts_a[f];
|
||||
float x0, x1, x2, y0, y1, y2, z0, z1, z2;
|
||||
std::tie(x0, y0, z0) = ExtractVerts(face, 0);
|
||||
std::tie(x1, y1, z1) = ExtractVerts(face, 1);
|
||||
std::tie(x2, y2, z2) = ExtractVerts(face, 2);
|
||||
|
||||
const vec2<float> v0(x0, y0);
|
||||
const vec2<float> v1(x1, y1);
|
||||
const vec2<float> v2(x2, y2);
|
||||
|
||||
const float face_area = EdgeFunctionForward(v0, v1, v2);
|
||||
face_areas[f] = face_area;
|
||||
}
|
||||
|
||||
return face_areas;
|
||||
}
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
|
||||
RasterizeMeshesNaiveCpu(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& mesh_to_face_first_idx,
|
||||
const torch::Tensor& num_faces_per_mesh,
|
||||
int image_size,
|
||||
float blur_radius,
|
||||
int faces_per_pixel,
|
||||
bool perspective_correct) {
|
||||
if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 ||
|
||||
face_verts.size(2) != 3) {
|
||||
AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)");
|
||||
}
|
||||
if (num_faces_per_mesh.size(0) != mesh_to_face_first_idx.size(0)) {
|
||||
AT_ERROR(
|
||||
"num_faces_per_mesh must have save size first dimension as mesh_to_face_first_idx");
|
||||
}
|
||||
|
||||
const int32_t N = mesh_to_face_first_idx.size(0); // batch_size.
|
||||
const int H = image_size;
|
||||
const int W = image_size;
|
||||
const int K = faces_per_pixel;
|
||||
|
||||
auto long_opts = face_verts.options().dtype(torch::kInt64);
|
||||
auto float_opts = face_verts.options().dtype(torch::kFloat32);
|
||||
|
||||
// Initialize output tensors.
|
||||
torch::Tensor face_idxs = torch::full({N, H, W, K}, -1, long_opts);
|
||||
torch::Tensor zbuf = torch::full({N, H, W, K}, -1, float_opts);
|
||||
torch::Tensor pix_dists = torch::full({N, H, W, K}, -1, float_opts);
|
||||
torch::Tensor barycentric_coords =
|
||||
torch::full({N, H, W, K, 3}, -1, float_opts);
|
||||
|
||||
auto face_verts_a = face_verts.accessor<float, 3>();
|
||||
auto face_idxs_a = face_idxs.accessor<int64_t, 4>();
|
||||
auto zbuf_a = zbuf.accessor<float, 4>();
|
||||
auto pix_dists_a = pix_dists.accessor<float, 4>();
|
||||
auto barycentric_coords_a = barycentric_coords.accessor<float, 5>();
|
||||
|
||||
auto face_bboxes = ComputeFaceBoundingBoxes(face_verts);
|
||||
auto face_bboxes_a = face_bboxes.accessor<float, 2>();
|
||||
auto face_areas = ComputeFaceAreas(face_verts);
|
||||
auto face_areas_a = face_areas.accessor<float, 1>();
|
||||
|
||||
for (int n = 0; n < N; ++n) {
|
||||
// Loop through each mesh in the batch.
|
||||
// Get the start index of the faces in faces_packed and the num faces
|
||||
// in the mesh to avoid having to loop through all the faces.
|
||||
const int face_start_idx = mesh_to_face_first_idx[n].item().to<int32_t>();
|
||||
const int face_stop_idx =
|
||||
(face_start_idx + num_faces_per_mesh[n].item().to<int32_t>());
|
||||
|
||||
// Iterate through the horizontal lines of the image from top to bottom.
|
||||
for (int yi = 0; yi < H; ++yi) {
|
||||
// Y coordinate of the top of the pixel.
|
||||
const float yf = PixToNdc(yi, H);
|
||||
// Iterate through pixels on this horizontal line, left to right.
|
||||
for (int xi = 0; xi < W; ++xi) {
|
||||
// X coordinate of the left of the pixel.
|
||||
const float xf = PixToNdc(xi, W);
|
||||
// Use a priority queue to hold values:
|
||||
// (z, idx, r, bary.x, bary.y. bary.z)
|
||||
std::priority_queue<std::tuple<float, int, float, float, float, float>>
|
||||
q;
|
||||
|
||||
// Loop through the faces in the mesh.
|
||||
for (int f = face_start_idx; f < face_stop_idx; ++f) {
|
||||
// Get coordinates of three face vertices.
|
||||
const auto& face = face_verts_a[f];
|
||||
float x0, x1, x2, y0, y1, y2, z0, z1, z2;
|
||||
std::tie(x0, y0, z0) = ExtractVerts(face, 0);
|
||||
std::tie(x1, y1, z1) = ExtractVerts(face, 1);
|
||||
std::tie(x2, y2, z2) = ExtractVerts(face, 2);
|
||||
|
||||
const vec2<float> v0(x0, y0);
|
||||
const vec2<float> v1(x1, y1);
|
||||
const vec2<float> v2(x2, y2);
|
||||
|
||||
// Skip faces with zero area.
|
||||
const float face_area = face_areas_a[f];
|
||||
if (face_area <= kEpsilon && face_area >= -1.0f * kEpsilon) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip if point is outside the face bounding box.
|
||||
const auto face_bbox = face_bboxes_a[f];
|
||||
const bool outside_bbox = CheckPointOutsideBoundingBox(
|
||||
face_bbox, std::sqrt(blur_radius), xf, yf);
|
||||
if (outside_bbox) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Compute barycentric coordinates and use this to get the
|
||||
// depth of the point on the triangle.
|
||||
const vec2<float> pxy(xf, yf);
|
||||
const vec3<float> bary0 =
|
||||
BarycentricCoordinatesForward(pxy, v0, v1, v2);
|
||||
const vec3<float> bary = !perspective_correct
|
||||
? bary0
|
||||
: BarycentricPerspectiveCorrectionForward(bary0, z0, z1, z2);
|
||||
|
||||
// Use barycentric coordinates to get the depth of the current pixel
|
||||
const float pz = (bary.x * z0 + bary.y * z1 + bary.z * z2);
|
||||
|
||||
if (pz < 0) {
|
||||
continue; // Point is behind the image plane so ignore.
|
||||
}
|
||||
|
||||
// Compute absolute distance of the point to the triangle.
|
||||
// If the point is inside the triangle then the distance
|
||||
// is negative.
|
||||
const float dist = PointTriangleDistanceForward(pxy, v0, v1, v2);
|
||||
|
||||
// Use the bary coordinates to determine if the point is
|
||||
// inside the face.
|
||||
const bool inside = bary.x > 0.0f && bary.y > 0.0f && bary.z > 0.0f;
|
||||
const float signed_dist = inside ? -dist : dist;
|
||||
|
||||
// Check if pixel is outside blur region
|
||||
if (!inside && dist >= blur_radius) {
|
||||
continue;
|
||||
}
|
||||
// The current pixel lies inside the current face.
|
||||
q.emplace(pz, f, signed_dist, bary.x, bary.y, bary.z);
|
||||
if (static_cast<int>(q.size()) > K) {
|
||||
q.pop();
|
||||
}
|
||||
}
|
||||
while (!q.empty()) {
|
||||
auto t = q.top();
|
||||
q.pop();
|
||||
const int i = q.size();
|
||||
zbuf_a[n][yi][xi][i] = std::get<0>(t);
|
||||
face_idxs_a[n][yi][xi][i] = std::get<1>(t);
|
||||
pix_dists_a[n][yi][xi][i] = std::get<2>(t);
|
||||
barycentric_coords_a[n][yi][xi][i][0] = std::get<3>(t);
|
||||
barycentric_coords_a[n][yi][xi][i][1] = std::get<4>(t);
|
||||
barycentric_coords_a[n][yi][xi][i][2] = std::get<5>(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return std::make_tuple(face_idxs, zbuf, barycentric_coords, pix_dists);
|
||||
}
|
||||
|
||||
torch::Tensor RasterizeMeshesBackwardCpu(
|
||||
const torch::Tensor& face_verts, // (F, 3, 3)
|
||||
const torch::Tensor& pix_to_face, // (N, H, W, K)
|
||||
const torch::Tensor& grad_zbuf, // (N, H, W, K)
|
||||
const torch::Tensor& grad_bary, // (N, H, W, K, 3)
|
||||
const torch::Tensor& grad_dists, // (N, H, W, K)
|
||||
bool perspective_correct) {
|
||||
const int F = face_verts.size(0);
|
||||
const int N = pix_to_face.size(0);
|
||||
const int H = pix_to_face.size(1);
|
||||
const int W = pix_to_face.size(2);
|
||||
const int K = pix_to_face.size(3);
|
||||
|
||||
torch::Tensor grad_face_verts = torch::zeros({F, 3, 3}, face_verts.options());
|
||||
auto face_verts_a = face_verts.accessor<float, 3>();
|
||||
auto pix_to_face_a = pix_to_face.accessor<int64_t, 4>();
|
||||
auto grad_dists_a = grad_dists.accessor<float, 4>();
|
||||
auto grad_zbuf_a = grad_zbuf.accessor<float, 4>();
|
||||
auto grad_bary_a = grad_bary.accessor<float, 5>();
|
||||
|
||||
for (int n = 0; n < N; ++n) {
|
||||
// Iterate through the horizontal lines of the image from top to bottom.
|
||||
for (int y = 0; y < H; ++y) {
|
||||
// Y coordinate of the top of the pixel.
|
||||
const float yf = PixToNdc(y, H);
|
||||
// Iterate through pixels on this horizontal line, left to right.
|
||||
for (int x = 0; x < W; ++x) {
|
||||
// X coordinate of the left of the pixel.
|
||||
const float xf = PixToNdc(x, W);
|
||||
const vec2<float> pxy(xf, yf);
|
||||
|
||||
// Iterate through the faces that hit this pixel.
|
||||
for (int k = 0; k < K; ++k) {
|
||||
// Get face index from forward pass output.
|
||||
const int f = pix_to_face_a[n][y][x][k];
|
||||
if (f < 0) {
|
||||
continue; // padded face.
|
||||
}
|
||||
// Get coordinates of the three face vertices.
|
||||
const auto face_verts_f = face_verts_a[f];
|
||||
const float x0 = face_verts_f[0][0];
|
||||
const float y0 = face_verts_f[0][1];
|
||||
const float z0 = face_verts_f[0][2];
|
||||
const float x1 = face_verts_f[1][0];
|
||||
const float y1 = face_verts_f[1][1];
|
||||
const float z1 = face_verts_f[1][2];
|
||||
const float x2 = face_verts_f[2][0];
|
||||
const float y2 = face_verts_f[2][1];
|
||||
const float z2 = face_verts_f[2][2];
|
||||
const vec2<float> v0xy(x0, y0);
|
||||
const vec2<float> v1xy(x1, y1);
|
||||
const vec2<float> v2xy(x2, y2);
|
||||
|
||||
// Get upstream gradients for the face.
|
||||
const float grad_dist_upstream = grad_dists_a[n][y][x][k];
|
||||
const float grad_zbuf_upstream = grad_zbuf_a[n][y][x][k];
|
||||
const auto grad_bary_upstream_w012 = grad_bary_a[n][y][x][k];
|
||||
const float grad_bary_upstream_w0 = grad_bary_upstream_w012[0];
|
||||
const float grad_bary_upstream_w1 = grad_bary_upstream_w012[1];
|
||||
const float grad_bary_upstream_w2 = grad_bary_upstream_w012[2];
|
||||
const vec3<float> grad_bary_upstream(
|
||||
grad_bary_upstream_w0,
|
||||
grad_bary_upstream_w1,
|
||||
grad_bary_upstream_w2);
|
||||
|
||||
const vec3<float> bary0 =
|
||||
BarycentricCoordinatesForward(pxy, v0xy, v1xy, v2xy);
|
||||
const vec3<float> bary = !perspective_correct
|
||||
? bary0
|
||||
: BarycentricPerspectiveCorrectionForward(bary0, z0, z1, z2);
|
||||
|
||||
// Distances inside the face are negative so get the
|
||||
// correct sign to apply to the upstream gradient.
|
||||
const bool inside = bary.x > 0.0f && bary.y > 0.0f && bary.z > 0.0f;
|
||||
const float sign = inside ? -1.0f : 1.0f;
|
||||
|
||||
// TODO(T52813608) Add support for non-square images.
|
||||
const auto grad_dist_f = PointTriangleDistanceBackward(
|
||||
pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream);
|
||||
const auto ddist_d_v0 = std::get<1>(grad_dist_f);
|
||||
const auto ddist_d_v1 = std::get<2>(grad_dist_f);
|
||||
const auto ddist_d_v2 = std::get<3>(grad_dist_f);
|
||||
|
||||
// Upstream gradient for barycentric coords from zbuf calculation:
|
||||
// zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2
|
||||
// Therefore
|
||||
// d_zbuf/d_bary_w0 = z0
|
||||
// d_zbuf/d_bary_w1 = z1
|
||||
// d_zbuf/d_bary_w2 = z2
|
||||
const vec3<float> d_zbuf_d_bary(z0, z1, z2);
|
||||
|
||||
// Total upstream barycentric gradients are the sum of
|
||||
// external upstream gradients and contribution from zbuf.
|
||||
vec3<float> grad_bary_f_sum =
|
||||
(grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bary);
|
||||
|
||||
vec3<float> grad_bary0 = grad_bary_f_sum;
|
||||
if (perspective_correct) {
|
||||
auto perspective_grads = BarycentricPerspectiveCorrectionBackward(
|
||||
bary0, z0, z1, z2, grad_bary_f_sum);
|
||||
grad_bary0 = std::get<0>(perspective_grads);
|
||||
grad_face_verts[f][0][2] += std::get<1>(perspective_grads);
|
||||
grad_face_verts[f][1][2] += std::get<2>(perspective_grads);
|
||||
grad_face_verts[f][2][2] += std::get<3>(perspective_grads);
|
||||
}
|
||||
auto grad_bary_f =
|
||||
BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0);
|
||||
const vec2<float> dbary_d_v0 = std::get<1>(grad_bary_f);
|
||||
const vec2<float> dbary_d_v1 = std::get<2>(grad_bary_f);
|
||||
const vec2<float> dbary_d_v2 = std::get<3>(grad_bary_f);
|
||||
|
||||
// Update output gradient buffer.
|
||||
grad_face_verts[f][0][0] += dbary_d_v0.x + ddist_d_v0.x;
|
||||
grad_face_verts[f][0][1] += dbary_d_v0.y + ddist_d_v0.y;
|
||||
grad_face_verts[f][0][2] += grad_zbuf_upstream * bary.x;
|
||||
grad_face_verts[f][1][0] += dbary_d_v1.x + ddist_d_v1.x;
|
||||
grad_face_verts[f][1][1] += dbary_d_v1.y + ddist_d_v1.y;
|
||||
grad_face_verts[f][1][2] += grad_zbuf_upstream * bary.y;
|
||||
grad_face_verts[f][2][0] += dbary_d_v2.x + ddist_d_v2.x;
|
||||
grad_face_verts[f][2][1] += dbary_d_v2.y + ddist_d_v2.y;
|
||||
grad_face_verts[f][2][2] += grad_zbuf_upstream * bary.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return grad_face_verts;
|
||||
}
|
||||
|
||||
torch::Tensor RasterizeMeshesCoarseCpu(
|
||||
const torch::Tensor& face_verts,
|
||||
const torch::Tensor& mesh_to_face_first_idx,
|
||||
const torch::Tensor& num_faces_per_mesh,
|
||||
int image_size,
|
||||
float blur_radius,
|
||||
int bin_size,
|
||||
int max_faces_per_bin) {
|
||||
if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 ||
|
||||
face_verts.size(2) != 3) {
|
||||
AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)");
|
||||
}
|
||||
if (num_faces_per_mesh.ndimension() != 1) {
|
||||
AT_ERROR("num_faces_per_mesh can only have one dimension");
|
||||
}
|
||||
const int N = num_faces_per_mesh.size(0); // batch size.
|
||||
const int M = max_faces_per_bin;
|
||||
|
||||
// Assume square images. TODO(T52813608) Support non square images.
|
||||
const float height = image_size;
|
||||
const float width = image_size;
|
||||
const int BH = 1 + (height - 1) / bin_size; // Integer division round up.
|
||||
const int BW = 1 + (width - 1) / bin_size; // Integer division round up.
|
||||
|
||||
auto opts = face_verts.options().dtype(torch::kInt32);
|
||||
torch::Tensor faces_per_bin = torch::zeros({N, BH, BW}, opts);
|
||||
torch::Tensor bin_faces = torch::full({N, BH, BW, M}, -1, opts);
|
||||
auto faces_per_bin_a = faces_per_bin.accessor<int32_t, 3>();
|
||||
auto bin_faces_a = bin_faces.accessor<int32_t, 4>();
|
||||
|
||||
// Precompute all face bounding boxes.
|
||||
auto face_bboxes = ComputeFaceBoundingBoxes(face_verts);
|
||||
auto face_bboxes_a = face_bboxes.accessor<float, 2>();
|
||||
|
||||
const float pixel_width = 2.0f / image_size;
|
||||
const float bin_width = pixel_width * bin_size;
|
||||
|
||||
// Iterate through the meshes in the batch.
|
||||
for (int n = 0; n < N; ++n) {
|
||||
const int face_start_idx = mesh_to_face_first_idx[n].item().to<int32_t>();
|
||||
const int face_stop_idx =
|
||||
(face_start_idx + num_faces_per_mesh[n].item().to<int32_t>());
|
||||
|
||||
float bin_y_min = -1.0f;
|
||||
float bin_y_max = bin_y_min + bin_width;
|
||||
|
||||
// Iterate through the horizontal bins from top to bottom.
|
||||
for (int by = 0; by < BH; ++by) {
|
||||
float bin_x_min = -1.0f;
|
||||
float bin_x_max = bin_x_min + bin_width;
|
||||
|
||||
// Iterate through bins on this horizontal line, left to right.
|
||||
for (int bx = 0; bx < BW; ++bx) {
|
||||
int32_t faces_hit = 0;
|
||||
|
||||
for (int32_t f = face_start_idx; f < face_stop_idx; ++f) {
|
||||
// Get bounding box and expand by blur radius.
|
||||
float face_x_min = face_bboxes_a[f][0] - std::sqrt(blur_radius);
|
||||
float face_y_min = face_bboxes_a[f][1] - std::sqrt(blur_radius);
|
||||
float face_x_max = face_bboxes_a[f][2] + std::sqrt(blur_radius);
|
||||
float face_y_max = face_bboxes_a[f][3] + std::sqrt(blur_radius);
|
||||
float face_z_max = face_bboxes_a[f][5];
|
||||
|
||||
if (face_z_max < 0) {
|
||||
continue; // Face is behind the camera.
|
||||
}
|
||||
|
||||
// Use a half-open interval so that faces exactly on the
|
||||
// boundary between bins will fall into exactly one bin.
|
||||
bool x_overlap =
|
||||
(face_x_min <= bin_x_max) && (bin_x_min < face_x_max);
|
||||
bool y_overlap =
|
||||
(face_y_min <= bin_y_max) && (bin_y_min < face_y_max);
|
||||
|
||||
if (x_overlap && y_overlap) {
|
||||
// Got too many faces for this bin, so throw an error.
|
||||
if (faces_hit >= max_faces_per_bin) {
|
||||
AT_ERROR("Got too many faces per bin");
|
||||
}
|
||||
// The current point falls in the current bin, so
|
||||
// record it.
|
||||
bin_faces_a[n][by][bx][faces_hit] = f;
|
||||
faces_hit++;
|
||||
}
|
||||
}
|
||||
|
||||
// Shift the bin to the right for the next loop iteration.
|
||||
bin_x_min = bin_x_max;
|
||||
bin_x_max = bin_x_min + bin_width;
|
||||
}
|
||||
// Shift the bin down for the next loop iteration.
|
||||
bin_y_min = bin_y_max;
|
||||
bin_y_max = bin_y_min + bin_width;
|
||||
}
|
||||
}
|
||||
return bin_faces;
|
||||
}
|
||||
59
pytorch3d/csrc/rasterize_meshes/vec2.h
Normal file
59
pytorch3d/csrc/rasterize_meshes/vec2.h
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
#include <type_traits>
|
||||
|
||||
// A fixed-sized vector with basic arithmetic operators useful for
|
||||
// representing 2D coordinates.
|
||||
// TODO: switch to Eigen if more functionality is needed.
|
||||
|
||||
template <
|
||||
typename T,
|
||||
typename = std::enable_if_t<
|
||||
std::is_same<T, double>::value || std::is_same<T, float>::value>>
|
||||
struct vec2 {
|
||||
T x, y;
|
||||
typedef T scalar_t;
|
||||
vec2(T x, T y) : x(x), y(y) {}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
inline vec2<T> operator+(const vec2<T>& a, const vec2<T>& b) {
|
||||
return vec2<T>(a.x + b.x, a.y + b.y);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline vec2<T> operator-(const vec2<T>& a, const vec2<T>& b) {
|
||||
return vec2<T>(a.x - b.x, a.y - b.y);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline vec2<T> operator*(const T a, const vec2<T>& b) {
|
||||
return vec2<T>(a * b.x, a * b.y);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline vec2<T> operator/(const vec2<T>& a, const T b) {
|
||||
if (b == 0.0) {
|
||||
AT_ERROR(
|
||||
"denominator in vec2 division is 0"); // prevent divide by 0 errors.
|
||||
}
|
||||
return vec2<T>(a.x / b, a.y / b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T dot(const vec2<T>& a, const vec2<T>& b) {
|
||||
return a.x * b.x + a.y * b.y;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T norm(const vec2<T>& a, const vec2<T>& b) {
|
||||
const vec2<T> ba = b - a;
|
||||
return sqrt(dot(ba, ba));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::ostream& operator<<(std::ostream& os, const vec2<T>& v) {
|
||||
os << "vec2(" << v.x << ", " << v.y << ")";
|
||||
return os;
|
||||
}
|
||||
63
pytorch3d/csrc/rasterize_meshes/vec3.h
Normal file
63
pytorch3d/csrc/rasterize_meshes/vec3.h
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
// A fixed-sized vector with basic arithmetic operators useful for
|
||||
// representing 3D coordinates.
|
||||
// TODO: switch to Eigen if more functionality is needed.
|
||||
|
||||
template <
|
||||
typename T,
|
||||
typename = std::enable_if_t<
|
||||
std::is_same<T, double>::value || std::is_same<T, float>::value>>
|
||||
struct vec3 {
|
||||
T x, y, z;
|
||||
typedef T scalar_t;
|
||||
vec3(T x, T y, T z) : x(x), y(y), z(z) {}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
inline vec3<T> operator+(const vec3<T>& a, const vec3<T>& b) {
|
||||
return vec3<T>(a.x + b.x, a.y + b.y, a.z + b.z);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline vec3<T> operator-(const vec3<T>& a, const vec3<T>& b) {
|
||||
return vec3<T>(a.x - b.x, a.y - b.y, a.z - b.z);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline vec3<T> operator/(const vec3<T>& a, const T b) {
|
||||
if (b == 0.0) {
|
||||
AT_ERROR(
|
||||
"denominator in vec3 division is 0"); // prevent divide by 0 errors.
|
||||
}
|
||||
return vec3<T>(a.x / b, a.y / b, a.z / b);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline vec3<T> operator*(const T a, const vec3<T>& b) {
|
||||
return vec3<T>(a * b.x, a * b.y, a * b.z);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline vec3<T> operator*(const vec3<T>& a, const vec3<T>& b) {
|
||||
return vec3<T>(a.x * b.x, a.y * b.y, a.z * b.z);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T dot(const vec3<T>& a, const vec3<T>& b) {
|
||||
return a.x * b.x + a.y * b.y + a.z * b.z;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline vec3<T> cross(const vec3<T>& a, const vec3<T>& b) {
|
||||
return vec3<T>(
|
||||
a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::ostream& operator<<(std::ostream& os, const vec3<T>& v) {
|
||||
os << "vec3(" << v.x << ", " << v.y << ", " << v.z << ")";
|
||||
return os;
|
||||
}
|
||||
73
pytorch3d/csrc/rasterize_points/bitmask.cuh
Normal file
73
pytorch3d/csrc/rasterize_points/bitmask.cuh
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
#define BINMASK_H
|
||||
|
||||
// A BitMask represents a bool array of shape (H, W, N). We pack values into
|
||||
// the bits of unsigned ints; a single unsigned int has B = 32 bits, so to hold
|
||||
// all values we use H * W * (N / B) = H * W * D values. We want to store
|
||||
// BitMasks in shared memory, so we assume that the memory has already been
|
||||
// allocated for it elsewhere.
|
||||
class BitMask {
|
||||
public:
|
||||
__device__ BitMask(unsigned int* data, int H, int W, int N)
|
||||
: data(data), H(H), W(W), B(8 * sizeof(unsigned int)), D(N / B) {
|
||||
// TODO: check if the data is null.
|
||||
N = ceilf(N % 32); // take ceil incase N % 32 != 0
|
||||
block_clear(); // clear the data
|
||||
}
|
||||
|
||||
// Use all threads in the current block to clear all bits of this BitMask
|
||||
__device__ void block_clear() {
|
||||
for (int i = threadIdx.x; i < H * W * D; i += blockDim.x) {
|
||||
data[i] = 0;
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
__device__ int _get_elem_idx(int y, int x, int d) {
|
||||
return y * W * D + x * D + d / B;
|
||||
}
|
||||
|
||||
__device__ int _get_bit_idx(int d) {
|
||||
return d % B;
|
||||
}
|
||||
|
||||
// Turn on a single bit (y, x, d)
|
||||
__device__ void set(int y, int x, int d) {
|
||||
int elem_idx = _get_elem_idx(y, x, d);
|
||||
int bit_idx = _get_bit_idx(d);
|
||||
const unsigned int mask = 1U << bit_idx;
|
||||
atomicOr(data + elem_idx, mask);
|
||||
}
|
||||
|
||||
// Turn off a single bit (y, x, d)
|
||||
__device__ void unset(int y, int x, int d) {
|
||||
int elem_idx = _get_elem_idx(y, x, d);
|
||||
int bit_idx = _get_bit_idx(d);
|
||||
const unsigned int mask = ~(1U << bit_idx);
|
||||
atomicAnd(data + elem_idx, mask);
|
||||
}
|
||||
|
||||
// Check whether the bit (y, x, d) is on or off
|
||||
__device__ bool get(int y, int x, int d) {
|
||||
int elem_idx = _get_elem_idx(y, x, d);
|
||||
int bit_idx = _get_bit_idx(d);
|
||||
return (data[elem_idx] >> bit_idx) & 1U;
|
||||
}
|
||||
|
||||
// Compute the number of bits set in the row (y, x, :)
|
||||
__device__ int count(int y, int x) {
|
||||
int total = 0;
|
||||
for (int i = 0; i < D; ++i) {
|
||||
int elem_idx = y * W * D + x * D + i;
|
||||
unsigned int elem = data[elem_idx];
|
||||
total += __popc(elem);
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
private:
|
||||
unsigned int* data;
|
||||
int H, W, B, D;
|
||||
};
|
||||
33
pytorch3d/csrc/rasterize_points/rasterization_utils.cuh
Normal file
33
pytorch3d/csrc/rasterize_points/rasterization_utils.cuh
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
// Given a pixel coordinate 0 <= i < S, convert it to a normalized device
|
||||
// coordinate in the range [-1, 1]. We divide the NDC range into S evenly-sized
|
||||
// pixels, and assume that each pixel falls in the *center* of its range.
|
||||
__device__ inline float PixToNdc(int i, int S) {
|
||||
// NDC x-offset + (i * pixel_width + half_pixel_width)
|
||||
return -1 + (2 * i + 1.0f) / S;
|
||||
}
|
||||
|
||||
// The maximum number of points per pixel that we can return. Since we use
|
||||
// thread-local arrays to hold and sort points, the maximum size of the array
|
||||
// needs to be known at compile time. There might be some fancy template magic
|
||||
// we could use to make this more dynamic, but for now just fix a constant.
|
||||
// TODO: is 8 enough? Would increasing have performance considerations?
|
||||
const int32_t kMaxPointsPerPixel = 150;
|
||||
|
||||
template <typename T>
|
||||
__device__ inline void BubbleSort(T* arr, int n) {
|
||||
// Bubble sort. We only use it for tiny thread-local arrays (n < 8); in this
|
||||
// regime we care more about warp divergence than computational complexity.
|
||||
for (int i = 0; i < n - 1; ++i) {
|
||||
for (int j = 0; j < n - i - 1; ++j) {
|
||||
if (arr[j + 1] < arr[j]) {
|
||||
T temp = arr[j];
|
||||
arr[j] = arr[j + 1];
|
||||
arr[j + 1] = temp;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
511
pytorch3d/csrc/rasterize_points/rasterize_points.cu
Normal file
511
pytorch3d/csrc/rasterize_points/rasterize_points.cu
Normal file
@@ -0,0 +1,511 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#include <math.h>
|
||||
#include <torch/extension.h>
|
||||
#include <cstdio>
|
||||
#include <sstream>
|
||||
#include <tuple>
|
||||
#include "rasterize_points/bitmask.cuh"
|
||||
#include "rasterize_points/rasterization_utils.cuh"
|
||||
|
||||
namespace {
|
||||
// A little structure for holding details about a pixel.
|
||||
struct Pix {
|
||||
float z; // Depth of the reference point.
|
||||
int32_t idx; // Index of the reference point.
|
||||
float dist2; // Euclidean distance square to the reference point.
|
||||
};
|
||||
|
||||
__device__ inline bool operator<(const Pix& a, const Pix& b) {
|
||||
return a.z < b.z;
|
||||
}
|
||||
|
||||
// This function checks if a pixel given by xy location pxy lies within the
|
||||
// point with index p and batch index n. One of the inputs is a list (q)
|
||||
// which contains Pixel structs with the indices of the points which intersect
|
||||
// with this pixel sorted by closest z distance. If the pixel pxy lies in the
|
||||
// point, the list (q) is updated and re-orderered in place. In addition
|
||||
// the auxillary variables q_size, q_max_z and q_max_idx are also modified.
|
||||
// This code is shared between RasterizePointsNaiveCudaKernel and
|
||||
// RasterizePointsFineCudaKernel.
|
||||
template <typename PointQ>
|
||||
__device__ void CheckPixelInsidePoint(
|
||||
const float* points, // (N, P, 3)
|
||||
const int p,
|
||||
int& q_size,
|
||||
float& q_max_z,
|
||||
int& q_max_idx,
|
||||
PointQ& q,
|
||||
const float radius2,
|
||||
const float xf,
|
||||
const float yf,
|
||||
const int n,
|
||||
const int P,
|
||||
const int K) {
|
||||
const float px = points[n * P * 3 + p * 3 + 0];
|
||||
const float py = points[n * P * 3 + p * 3 + 1];
|
||||
const float pz = points[n * P * 3 + p * 3 + 2];
|
||||
if (pz < 0)
|
||||
return; // Don't render points behind the camera
|
||||
const float dx = xf - px;
|
||||
const float dy = yf - py;
|
||||
const float dist2 = dx * dx + dy * dy;
|
||||
if (dist2 < radius2) {
|
||||
if (q_size < K) {
|
||||
// Just insert it
|
||||
q[q_size] = {pz, p, dist2};
|
||||
if (pz > q_max_z) {
|
||||
q_max_z = pz;
|
||||
q_max_idx = q_size;
|
||||
}
|
||||
q_size++;
|
||||
} else if (pz < q_max_z) {
|
||||
// Overwrite the old max, and find the new max
|
||||
q[q_max_idx] = {pz, p, dist2};
|
||||
q_max_z = pz;
|
||||
for (int i = 0; i < K; i++) {
|
||||
if (q[i].z > q_max_z) {
|
||||
q_max_z = q[i].z;
|
||||
q_max_idx = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
// ****************************************************************************
|
||||
// * NAIVE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
__global__ void RasterizePointsNaiveCudaKernel(
|
||||
const float* points, // (N, P, 3)
|
||||
const float radius,
|
||||
const int N,
|
||||
const int P,
|
||||
const int S,
|
||||
const int K,
|
||||
int32_t* point_idxs, // (N, S, S, K)
|
||||
float* zbuf, // (N, S, S, K)
|
||||
float* pix_dists) { // (N, S, S, K)
|
||||
// Simple version: One thread per output pixel
|
||||
const int num_threads = gridDim.x * blockDim.x;
|
||||
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||
const float radius2 = radius * radius;
|
||||
for (int i = tid; i < N * S * S; i += num_threads) {
|
||||
// Convert linear index to 3D index
|
||||
const int n = i / (S * S); // Batch index
|
||||
const int pix_idx = i % (S * S);
|
||||
const int yi = pix_idx / S;
|
||||
const int xi = pix_idx % S;
|
||||
|
||||
const float xf = PixToNdc(xi, S);
|
||||
const float yf = PixToNdc(yi, S);
|
||||
|
||||
// For keeping track of the K closest points we want a data structure
|
||||
// that (1) gives O(1) access to the closest point for easy comparisons,
|
||||
// and (2) allows insertion of new elements. In the CPU version we use
|
||||
// std::priority_queue; then (2) is O(log K). We can't use STL
|
||||
// containers in CUDA; we could roll our own max heap in an array, but
|
||||
// that would likely have a lot of warp divergence so we do something
|
||||
// simpler instead: keep the elements in an unsorted array, but keep
|
||||
// track of the max value and the index of the max value. Then (1) is
|
||||
// still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8
|
||||
// this should be fast enough for our purposes.
|
||||
// TODO(jcjohns) Abstract this out into a standalone data structure
|
||||
Pix q[kMaxPointsPerPixel];
|
||||
int q_size = 0;
|
||||
float q_max_z = -1000;
|
||||
int q_max_idx = -1;
|
||||
for (int p = 0; p < P; ++p) {
|
||||
CheckPixelInsidePoint(
|
||||
points, p, q_size, q_max_z, q_max_idx, q, radius2, xf, yf, n, P, K);
|
||||
}
|
||||
BubbleSort(q, q_size);
|
||||
int idx = n * S * S * K + yi * S * K + xi * K;
|
||||
for (int k = 0; k < q_size; ++k) {
|
||||
point_idxs[idx + k] = q[k].idx;
|
||||
zbuf[idx + k] = q[k].z;
|
||||
pix_dists[idx + k] = q[k].dist2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
|
||||
RasterizePointsNaiveCuda(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int points_per_pixel) {
|
||||
const int N = points.size(0);
|
||||
const int P = points.size(1);
|
||||
const int S = image_size;
|
||||
const int K = points_per_pixel;
|
||||
if (K > kMaxPointsPerPixel) {
|
||||
std::stringstream ss;
|
||||
ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel;
|
||||
AT_ERROR(ss.str());
|
||||
}
|
||||
|
||||
auto int_opts = points.options().dtype(torch::kInt32);
|
||||
auto float_opts = points.options().dtype(torch::kFloat32);
|
||||
torch::Tensor point_idxs = torch::full({N, S, S, K}, -1, int_opts);
|
||||
torch::Tensor zbuf = torch::full({N, S, S, K}, -1, float_opts);
|
||||
torch::Tensor pix_dists = torch::full({N, S, S, K}, -1, float_opts);
|
||||
|
||||
const size_t blocks = 1024;
|
||||
const size_t threads = 64;
|
||||
RasterizePointsNaiveCudaKernel<<<blocks, threads>>>(
|
||||
points.contiguous().data<float>(),
|
||||
radius,
|
||||
N,
|
||||
P,
|
||||
S,
|
||||
K,
|
||||
point_idxs.contiguous().data<int32_t>(),
|
||||
zbuf.contiguous().data<float>(),
|
||||
pix_dists.contiguous().data<float>());
|
||||
return std::make_tuple(point_idxs, zbuf, pix_dists);
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * COARSE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
__global__ void RasterizePointsCoarseCudaKernel(
|
||||
const float* points,
|
||||
const float radius,
|
||||
const int N,
|
||||
const int P,
|
||||
const int S,
|
||||
const int bin_size,
|
||||
const int chunk_size,
|
||||
const int max_points_per_bin,
|
||||
int* points_per_bin,
|
||||
int* bin_points) {
|
||||
extern __shared__ char sbuf[];
|
||||
const int M = max_points_per_bin;
|
||||
const int num_bins = 1 + (S - 1) / bin_size; // Integer divide round up
|
||||
const float half_pix = 1.0f / S; // Size of half a pixel in NDC units
|
||||
|
||||
// This is a boolean array of shape (num_bins, num_bins, chunk_size)
|
||||
// stored in shared memory that will track whether each point in the chunk
|
||||
// falls into each bin of the image.
|
||||
BitMask binmask((unsigned int*)sbuf, num_bins, num_bins, chunk_size);
|
||||
|
||||
// Have each block handle a chunk of points and build a 3D bitmask in
|
||||
// shared memory to mark which points hit which bins. In this first phase,
|
||||
// each thread processes one point at a time. After processing the chunk,
|
||||
// one thread is assigned per bin, and the thread counts and writes the
|
||||
// points for the bin out to global memory.
|
||||
const int chunks_per_batch = 1 + (P - 1) / chunk_size;
|
||||
const int num_chunks = N * chunks_per_batch;
|
||||
for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) {
|
||||
const int batch_idx = chunk / chunks_per_batch;
|
||||
const int chunk_idx = chunk % chunks_per_batch;
|
||||
const int point_start_idx = chunk_idx * chunk_size;
|
||||
|
||||
binmask.block_clear();
|
||||
|
||||
// Have each thread handle a different point within the chunk
|
||||
for (int p = threadIdx.x; p < chunk_size; p += blockDim.x) {
|
||||
const int p_idx = point_start_idx + p;
|
||||
if (p_idx >= P)
|
||||
break;
|
||||
const float px = points[batch_idx * P * 3 + p_idx * 3 + 0];
|
||||
const float py = points[batch_idx * P * 3 + p_idx * 3 + 1];
|
||||
const float pz = points[batch_idx * P * 3 + p_idx * 3 + 2];
|
||||
if (pz < 0)
|
||||
continue; // Don't render points behind the camera
|
||||
const float px0 = px - radius;
|
||||
const float px1 = px + radius;
|
||||
const float py0 = py - radius;
|
||||
const float py1 = py + radius;
|
||||
|
||||
// Brute-force search over all bins; TODO something smarter?
|
||||
// For example we could compute the exact bin where the point falls,
|
||||
// then check neighboring bins. This way we wouldn't have to check
|
||||
// all bins (however then we might have more warp divergence?)
|
||||
for (int by = 0; by < num_bins; ++by) {
|
||||
// Get y extent for the bin. PixToNdc gives us the location of
|
||||
// the center of each pixel, so we need to add/subtract a half
|
||||
// pixel to get the true extent of the bin.
|
||||
const float by0 = PixToNdc(by * bin_size, S) - half_pix;
|
||||
const float by1 = PixToNdc((by + 1) * bin_size - 1, S) + half_pix;
|
||||
const bool y_overlap = (py0 <= by1) && (by0 <= py1);
|
||||
if (!y_overlap) {
|
||||
continue;
|
||||
}
|
||||
for (int bx = 0; bx < num_bins; ++bx) {
|
||||
// Get x extent for the bin; again we need to adjust the
|
||||
// output of PixToNdc by half a pixel.
|
||||
const float bx0 = PixToNdc(bx * bin_size, S) - half_pix;
|
||||
const float bx1 = PixToNdc((bx + 1) * bin_size - 1, S) + half_pix;
|
||||
const bool x_overlap = (px0 <= bx1) && (bx0 <= px1);
|
||||
if (x_overlap) {
|
||||
binmask.set(by, bx, p);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
// Now we have processed every point in the current chunk. We need to
|
||||
// count the number of points in each bin so we can write the indices
|
||||
// out to global memory. We have each thread handle a different bin.
|
||||
for (int byx = threadIdx.x; byx < num_bins * num_bins; byx += blockDim.x) {
|
||||
const int by = byx / num_bins;
|
||||
const int bx = byx % num_bins;
|
||||
const int count = binmask.count(by, bx);
|
||||
const int points_per_bin_idx =
|
||||
batch_idx * num_bins * num_bins + by * num_bins + bx;
|
||||
|
||||
// This atomically increments the (global) number of points found
|
||||
// in the current bin, and gets the previous value of the counter;
|
||||
// this effectively allocates space in the bin_points array for the
|
||||
// points in the current chunk that fall into this bin.
|
||||
const int start = atomicAdd(points_per_bin + points_per_bin_idx, count);
|
||||
|
||||
// Now loop over the binmask and write the active bits for this bin
|
||||
// out to bin_points.
|
||||
int next_idx = batch_idx * num_bins * num_bins * M + by * num_bins * M +
|
||||
bx * M + start;
|
||||
for (int p = 0; p < chunk_size; ++p) {
|
||||
if (binmask.get(by, bx, p)) {
|
||||
// TODO: Throw an error if next_idx >= M -- this means that
|
||||
// we got more than max_points_per_bin in this bin
|
||||
// TODO: check if atomicAdd is needed in line 265.
|
||||
bin_points[next_idx] = point_start_idx + p;
|
||||
next_idx++;
|
||||
}
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
|
||||
torch::Tensor RasterizePointsCoarseCuda(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int max_points_per_bin) {
|
||||
const int N = points.size(0);
|
||||
const int P = points.size(1);
|
||||
const int num_bins = 1 + (image_size - 1) / bin_size; // divide round up
|
||||
const int M = max_points_per_bin;
|
||||
if (num_bins >= 22) {
|
||||
// Make sure we do not use too much shared memory.
|
||||
std::stringstream ss;
|
||||
ss << "Got " << num_bins << "; that's too many!";
|
||||
AT_ERROR(ss.str());
|
||||
}
|
||||
auto opts = points.options().dtype(torch::kInt32);
|
||||
torch::Tensor points_per_bin = torch::zeros({N, num_bins, num_bins}, opts);
|
||||
torch::Tensor bin_points = torch::full({N, num_bins, num_bins, M}, -1, opts);
|
||||
const int chunk_size = 512;
|
||||
const size_t shared_size = num_bins * num_bins * chunk_size / 8;
|
||||
const size_t blocks = 64;
|
||||
const size_t threads = 512;
|
||||
RasterizePointsCoarseCudaKernel<<<blocks, threads, shared_size>>>(
|
||||
points.contiguous().data<float>(),
|
||||
radius,
|
||||
N,
|
||||
P,
|
||||
image_size,
|
||||
bin_size,
|
||||
chunk_size,
|
||||
M,
|
||||
points_per_bin.contiguous().data<int32_t>(),
|
||||
bin_points.contiguous().data<int32_t>());
|
||||
return bin_points;
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * FINE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
__global__ void RasterizePointsFineCudaKernel(
|
||||
const float* points, // (N, P, 3)
|
||||
const int32_t* bin_points, // (N, B, B, T)
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int N,
|
||||
const int P,
|
||||
const int B,
|
||||
const int M,
|
||||
const int S,
|
||||
const int K,
|
||||
int32_t* point_idxs, // (N, S, S, K)
|
||||
float* zbuf, // (N, S, S, K)
|
||||
float* pix_dists) { // (N, S, S, K)
|
||||
// This can be more than S^2 if S is not dividable by bin_size.
|
||||
const int num_pixels = N * B * B * bin_size * bin_size;
|
||||
const int num_threads = gridDim.x * blockDim.x;
|
||||
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const float radius2 = radius * radius;
|
||||
for (int pid = tid; pid < num_pixels; pid += num_threads) {
|
||||
// Convert linear index into bin and pixel indices. We make the within
|
||||
// block pixel ids move the fastest, so that adjacent threads will fall
|
||||
// into the same bin; this should give them coalesced memory reads when
|
||||
// they read from points and bin_points.
|
||||
int i = pid;
|
||||
const int n = i / (B * B * bin_size * bin_size);
|
||||
i %= B * B * bin_size * bin_size;
|
||||
const int by = i / (B * bin_size * bin_size);
|
||||
i %= B * bin_size * bin_size;
|
||||
const int bx = i / (bin_size * bin_size);
|
||||
i %= bin_size * bin_size;
|
||||
const int yi = i / bin_size + by * bin_size;
|
||||
const int xi = i % bin_size + bx * bin_size;
|
||||
|
||||
if (yi >= S || xi >= S)
|
||||
continue;
|
||||
const float xf = PixToNdc(xi, S);
|
||||
const float yf = PixToNdc(yi, S);
|
||||
|
||||
// This part looks like the naive rasterization kernel, except we use
|
||||
// bin_points to only look at a subset of points already known to fall
|
||||
// in this bin. TODO abstract out this logic into some data structure
|
||||
// that is shared by both kernels?
|
||||
Pix q[kMaxPointsPerPixel];
|
||||
int q_size = 0;
|
||||
float q_max_z = -1000;
|
||||
int q_max_idx = -1;
|
||||
for (int m = 0; m < M; ++m) {
|
||||
const int p = bin_points[n * B * B * M + by * B * M + bx * M + m];
|
||||
if (p < 0) {
|
||||
// bin_points uses -1 as a sentinal value
|
||||
continue;
|
||||
}
|
||||
CheckPixelInsidePoint(
|
||||
points, p, q_size, q_max_z, q_max_idx, q, radius2, xf, yf, n, P, K);
|
||||
}
|
||||
// Now we've looked at all the points for this bin, so we can write
|
||||
// output for the current pixel.
|
||||
BubbleSort(q, q_size);
|
||||
const int pix_idx = n * S * S * K + yi * S * K + xi * K;
|
||||
for (int k = 0; k < q_size; ++k) {
|
||||
point_idxs[pix_idx + k] = q[k].idx;
|
||||
zbuf[pix_idx + k] = q[k].z;
|
||||
pix_dists[pix_idx + k] = q[k].dist2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsFineCuda(
|
||||
const torch::Tensor& points,
|
||||
const torch::Tensor& bin_points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int points_per_pixel) {
|
||||
const int N = points.size(0);
|
||||
const int P = points.size(1);
|
||||
const int B = bin_points.size(1);
|
||||
const int M = bin_points.size(3);
|
||||
const int S = image_size;
|
||||
const int K = points_per_pixel;
|
||||
if (K > kMaxPointsPerPixel) {
|
||||
AT_ERROR("Must have num_closest <= 8");
|
||||
}
|
||||
auto int_opts = points.options().dtype(torch::kInt32);
|
||||
auto float_opts = points.options().dtype(torch::kFloat32);
|
||||
torch::Tensor point_idxs = torch::full({N, S, S, K}, -1, int_opts);
|
||||
torch::Tensor zbuf = torch::full({N, S, S, K}, -1, float_opts);
|
||||
torch::Tensor pix_dists = torch::full({N, S, S, K}, -1, float_opts);
|
||||
|
||||
const size_t blocks = 1024;
|
||||
const size_t threads = 64;
|
||||
RasterizePointsFineCudaKernel<<<blocks, threads>>>(
|
||||
points.contiguous().data<float>(),
|
||||
bin_points.contiguous().data<int32_t>(),
|
||||
radius,
|
||||
bin_size,
|
||||
N,
|
||||
P,
|
||||
B,
|
||||
M,
|
||||
S,
|
||||
K,
|
||||
point_idxs.contiguous().data<int32_t>(),
|
||||
zbuf.contiguous().data<float>(),
|
||||
pix_dists.contiguous().data<float>());
|
||||
|
||||
return std::make_tuple(point_idxs, zbuf, pix_dists);
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * BACKWARD PASS *
|
||||
// ****************************************************************************
|
||||
// TODO(T55115174) Add more documentation for backward kernel.
|
||||
__global__ void RasterizePointsBackwardCudaKernel(
|
||||
const float* points, // (N, P, 3)
|
||||
const int32_t* idxs, // (N, H, W, K)
|
||||
const int N,
|
||||
const int P,
|
||||
const int H,
|
||||
const int W,
|
||||
const int K,
|
||||
const float* grad_zbuf, // (N, H, W, K)
|
||||
const float* grad_dists, // (N, H, W, K)
|
||||
float* grad_points) { // (N, P, 3)
|
||||
// Parallelized over each of K points per pixel, for each pixel in images of
|
||||
// size H * W, for each image in the batch of size N.
|
||||
int num_threads = gridDim.x * blockDim.x;
|
||||
int tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
for (int i = tid; i < N * H * W * K; i += num_threads) {
|
||||
const int n = i / (H * W * K);
|
||||
const int yxk = i % (H * W * K);
|
||||
const int yi = yxk / (W * K);
|
||||
const int xk = yxk % (W * K);
|
||||
const int xi = xk / K;
|
||||
// k = xk % K (We don't actually need k, but this would be it.)
|
||||
const float xf = PixToNdc(xi, W);
|
||||
const float yf = PixToNdc(yi, H);
|
||||
|
||||
const int p = idxs[i];
|
||||
if (p < 0)
|
||||
continue;
|
||||
const float grad_dist2 = grad_dists[i];
|
||||
const int p_ind = n * P * 3 + p * 3;
|
||||
const float px = points[p_ind];
|
||||
const float py = points[p_ind + 1];
|
||||
const float dx = px - xf;
|
||||
const float dy = py - yf;
|
||||
const float grad_px = 2.0f * grad_dist2 * dx;
|
||||
const float grad_py = 2.0f * grad_dist2 * dy;
|
||||
const float grad_pz = grad_zbuf[i];
|
||||
atomicAdd(grad_points + p_ind, grad_px);
|
||||
atomicAdd(grad_points + p_ind + 1, grad_py);
|
||||
atomicAdd(grad_points + p_ind + 2, grad_pz);
|
||||
}
|
||||
}
|
||||
|
||||
torch::Tensor RasterizePointsBackwardCuda(
|
||||
const torch::Tensor& points, // (N, P, 3)
|
||||
const torch::Tensor& idxs, // (N, H, W, K)
|
||||
const torch::Tensor& grad_zbuf, // (N, H, W, K)
|
||||
const torch::Tensor& grad_dists) { // (N, H, W, K)
|
||||
const int N = points.size(0);
|
||||
const int P = points.size(1);
|
||||
const int H = idxs.size(1);
|
||||
const int W = idxs.size(2);
|
||||
const int K = idxs.size(3);
|
||||
|
||||
torch::Tensor grad_points = torch::zeros({N, P, 3}, points.options());
|
||||
const size_t blocks = 1024;
|
||||
const size_t threads = 64;
|
||||
|
||||
RasterizePointsBackwardCudaKernel<<<blocks, threads>>>(
|
||||
points.contiguous().data<float>(),
|
||||
idxs.contiguous().data<int32_t>(),
|
||||
N,
|
||||
P,
|
||||
H,
|
||||
W,
|
||||
K,
|
||||
grad_zbuf.contiguous().data<float>(),
|
||||
grad_dists.contiguous().data<float>(),
|
||||
grad_points.contiguous().data<float>());
|
||||
|
||||
return grad_points;
|
||||
}
|
||||
230
pytorch3d/csrc/rasterize_points/rasterize_points.h
Normal file
230
pytorch3d/csrc/rasterize_points/rasterize_points.h
Normal file
@@ -0,0 +1,230 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
#include <torch/extension.h>
|
||||
#include <cstdio>
|
||||
#include <tuple>
|
||||
|
||||
// ****************************************************************************
|
||||
// * NAIVE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsNaiveCpu(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int points_per_pixel);
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
|
||||
RasterizePointsNaiveCuda(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int points_per_pixel);
|
||||
|
||||
// Naive (forward) pointcloud rasterization: For each pixel, for each point,
|
||||
// check whether that point hits the pixel.
|
||||
//
|
||||
// Args:
|
||||
// points: Tensor of shape (N, P, 3) (in NDC)
|
||||
// radius: Radius of each point (in NDC units)
|
||||
// image_size: (S) Size of the image to return (in pixels)
|
||||
// points_per_pixel: (K) The number closest of points to return for each pixel
|
||||
//
|
||||
// Returns:
|
||||
// idxs: int32 Tensor of shape (N, S, S, K) giving the indices of the
|
||||
// closest K points along the z-axis for each pixel, padded with -1 for
|
||||
// pixels
|
||||
// hit by fewer than K points.
|
||||
// zbuf: float32 Tensor of shape (N, S, S, K) giving the depth of each
|
||||
// closest point for each pixel.
|
||||
// dists: float32 Tensor of shape (N, S, S, K) giving squared Euclidean
|
||||
// distance in the (NDC) x/y plane between each pixel and its K closest
|
||||
// points along the z axis.
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsNaive(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int points_per_pixel) {
|
||||
if (points.type().is_cuda()) {
|
||||
return RasterizePointsNaiveCuda(
|
||||
points, image_size, radius, points_per_pixel);
|
||||
} else {
|
||||
return RasterizePointsNaiveCpu(
|
||||
points, image_size, radius, points_per_pixel);
|
||||
}
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * COARSE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
torch::Tensor RasterizePointsCoarseCpu(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int max_points_per_bin);
|
||||
|
||||
torch::Tensor RasterizePointsCoarseCuda(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int max_points_per_bin);
|
||||
|
||||
// Args:
|
||||
// points: Tensor of shape (N, P, 3)
|
||||
// radius: Radius of points to rasterize (in NDC units)
|
||||
// image_size: Size of the image to generate (in pixels)
|
||||
// bin_size: Size of each bin within the image (in pixels)
|
||||
//
|
||||
// Returns:
|
||||
// points_per_bin: Tensor of shape (N, num_bins, num_bins) giving the number
|
||||
// of points that fall in each bin
|
||||
// bin_points: Tensor of shape (N, num_bins, num_bins, K) giving the indices
|
||||
// of points that fall into each bin.
|
||||
torch::Tensor RasterizePointsCoarse(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int max_points_per_bin) {
|
||||
if (points.type().is_cuda()) {
|
||||
return RasterizePointsCoarseCuda(
|
||||
points, image_size, radius, bin_size, max_points_per_bin);
|
||||
} else {
|
||||
return RasterizePointsCoarseCpu(
|
||||
points, image_size, radius, bin_size, max_points_per_bin);
|
||||
}
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * FINE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsFineCuda(
|
||||
const torch::Tensor& points,
|
||||
const torch::Tensor& bin_points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int points_per_pixel);
|
||||
|
||||
// Args:
|
||||
// points: float32 Tensor of shape (N, P, 3)
|
||||
// bin_points: int32 Tensor of shape (N, B, B, M) giving the indices of points
|
||||
// that fall into each bin (output from coarse rasterization)
|
||||
// image_size: Size of image to generate (in pixels)
|
||||
// radius: Radius of points to rasterize (NDC units)
|
||||
// bin_size: Size of each bin (in pixels)
|
||||
// points_per_pixel: How many points to rasterize for each pixel
|
||||
//
|
||||
// Returns (same as rasterize_points):
|
||||
// idxs: int32 Tensor of shape (N, S, S, K) giving the indices of the closest
|
||||
// points_per_pixel points along the z-axis for each pixel, padded with
|
||||
// -1 for pixels hit by fewer than points_per_pixel points
|
||||
// zbuf: float32 Tensor of shape (N, S, S, K) giving the depth of each of each
|
||||
// closest point for each pixel
|
||||
// dists: float32 Tensor of shape (N, S, S, K) giving squared Euclidean
|
||||
// distance in the (NDC) x/y plane between each pixel and its K closest
|
||||
// points along the z axis.
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsFine(
|
||||
const torch::Tensor& points,
|
||||
const torch::Tensor& bin_points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int points_per_pixel) {
|
||||
if (points.type().is_cuda()) {
|
||||
return RasterizePointsFineCuda(
|
||||
points, bin_points, image_size, radius, bin_size, points_per_pixel);
|
||||
} else {
|
||||
AT_ERROR("NOT IMPLEMENTED");
|
||||
}
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * BACKWARD PASS *
|
||||
// ****************************************************************************
|
||||
|
||||
torch::Tensor RasterizePointsBackwardCpu(
|
||||
const torch::Tensor& points,
|
||||
const torch::Tensor& idxs,
|
||||
const torch::Tensor& grad_zbuf,
|
||||
const torch::Tensor& grad_dists);
|
||||
|
||||
torch::Tensor RasterizePointsBackwardCuda(
|
||||
const torch::Tensor& points,
|
||||
const torch::Tensor& idxs,
|
||||
const torch::Tensor& grad_zbuf,
|
||||
const torch::Tensor& grad_dists);
|
||||
|
||||
// Args:
|
||||
// points: float32 Tensor of shape (N, P, 3)
|
||||
// idxs: int32 Tensor of shape (N, H, W, K) (from forward pass)
|
||||
// grad_zbuf: float32 Tensor of shape (N, H, W, K) giving upstream gradient
|
||||
// d(loss)/d(zbuf) of the distances from each pixel to its nearest
|
||||
// points.
|
||||
// grad_dists: Tensor of shape (N, H, W, K) giving upstream gradient
|
||||
// d(loss)/d(dists) of the dists tensor returned by the forward
|
||||
// pass.
|
||||
//
|
||||
// Returns:
|
||||
// grad_points: float32 Tensor of shape (N, P, 3) giving downstream gradients
|
||||
torch::Tensor RasterizePointsBackward(
|
||||
const torch::Tensor& points,
|
||||
const torch::Tensor& idxs,
|
||||
const torch::Tensor& grad_zbuf,
|
||||
const torch::Tensor& grad_dists) {
|
||||
if (points.type().is_cuda()) {
|
||||
return RasterizePointsBackwardCuda(points, idxs, grad_zbuf, grad_dists);
|
||||
} else {
|
||||
return RasterizePointsBackwardCpu(points, idxs, grad_zbuf, grad_dists);
|
||||
}
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * MAIN ENTRY POINT *
|
||||
// ****************************************************************************
|
||||
|
||||
// This is the main entry point for the forward pass of the point rasterizer;
|
||||
// it uses either naive or coarse-to-fine rasterization based on bin_size.
|
||||
//
|
||||
// Args:
|
||||
// points: Tensor of shape (N, P, 3) (in NDC)
|
||||
// radius: Radius of each point (in NDC units)
|
||||
// image_size: (S) Size of the image to return (in pixels)
|
||||
// points_per_pixel: (K) The number of points to return for each pixel
|
||||
// bin_size: Bin size (in pixels) for coarse-to-fine rasterization. Setting
|
||||
// bin_size=0 uses naive rasterization instead.
|
||||
// max_points_per_bin: The maximum number of points allowed to fall into each
|
||||
// bin when using coarse-to-fine rasterization.
|
||||
//
|
||||
// Returns:
|
||||
// idxs: int32 Tensor of shape (N, S, S, K) giving the indices of the
|
||||
// closest points_per_pixel points along the z-axis for each pixel,
|
||||
// padded with -1 for pixels hit by fewer than points_per_pixel points
|
||||
// zbuf: float32 Tensor of shape (N, S, S, K) giving the depth of each of each
|
||||
// closest point for each pixel
|
||||
// dists: float32 Tensor of shape (N, S, S, K) giving squared Euclidean
|
||||
// distance in the (NDC) x/y plane between each pixel and its K closest
|
||||
// points along the z axis.
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePoints(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int points_per_pixel,
|
||||
const int bin_size,
|
||||
const int max_points_per_bin) {
|
||||
if (bin_size == 0) {
|
||||
// Use the naive per-pixel implementation
|
||||
return RasterizePointsNaive(points, image_size, radius, points_per_pixel);
|
||||
} else {
|
||||
// Use coarse-to-fine rasterization
|
||||
const auto bin_points = RasterizePointsCoarse(
|
||||
points, image_size, radius, bin_size, max_points_per_bin);
|
||||
return RasterizePointsFine(
|
||||
points, bin_points, image_size, radius, bin_size, points_per_pixel);
|
||||
}
|
||||
}
|
||||
196
pytorch3d/csrc/rasterize_points/rasterize_points_cpu.cpp
Normal file
196
pytorch3d/csrc/rasterize_points/rasterize_points_cpu.cpp
Normal file
@@ -0,0 +1,196 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#include <torch/extension.h>
|
||||
#include <queue>
|
||||
#include <tuple>
|
||||
|
||||
// Given a pixel coordinate 0 <= i < S, convert it to a normalized device
|
||||
// coordinate in the range [-1, 1]. The NDC range is divided into S evenly-sized
|
||||
// pixels, and assume that each pixel falls in the *center* of its range.
|
||||
inline float PixToNdc(const int i, const int S) {
|
||||
// NDC x-offset + (i * pixel_width + half_pixel_width)
|
||||
return -1 + (2 * i + 1.0f) / S;
|
||||
}
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsNaiveCpu(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int points_per_pixel) {
|
||||
const int N = points.size(0);
|
||||
const int P = points.size(1);
|
||||
const int S = image_size;
|
||||
const int K = points_per_pixel;
|
||||
auto int_opts = points.options().dtype(torch::kInt32);
|
||||
auto float_opts = points.options().dtype(torch::kFloat32);
|
||||
torch::Tensor point_idxs = torch::full({N, S, S, K}, -1, int_opts);
|
||||
torch::Tensor zbuf = torch::full({N, S, S, K}, -1, float_opts);
|
||||
torch::Tensor pix_dists = torch::full({N, S, S, K}, -1, float_opts);
|
||||
|
||||
auto points_a = points.accessor<float, 3>();
|
||||
auto point_idxs_a = point_idxs.accessor<int32_t, 4>();
|
||||
auto zbuf_a = zbuf.accessor<float, 4>();
|
||||
auto pix_dists_a = pix_dists.accessor<float, 4>();
|
||||
|
||||
const float radius2 = radius * radius;
|
||||
for (int n = 0; n < N; ++n) {
|
||||
for (int yi = 0; yi < S; ++yi) {
|
||||
float yf = PixToNdc(yi, S);
|
||||
for (int xi = 0; xi < S; ++xi) {
|
||||
float xf = PixToNdc(xi, S);
|
||||
// Use a priority queue to hold (z, idx, r)
|
||||
std::priority_queue<std::tuple<float, int, float>> q;
|
||||
for (int p = 0; p < P; ++p) {
|
||||
const float px = points_a[n][p][0];
|
||||
const float py = points_a[n][p][1];
|
||||
const float pz = points_a[n][p][2];
|
||||
if (pz < 0) {
|
||||
continue;
|
||||
}
|
||||
const float dx = px - xf;
|
||||
const float dy = py - yf;
|
||||
const float dist2 = dx * dx + dy * dy;
|
||||
if (dist2 < radius2) {
|
||||
// The current point hit the current pixel
|
||||
q.emplace(pz, p, dist2);
|
||||
if ((int)q.size() > K) {
|
||||
q.pop();
|
||||
}
|
||||
}
|
||||
}
|
||||
// Now all the points have been seen, so pop elements off the queue
|
||||
// one by one and write them into the output tensors.
|
||||
while (!q.empty()) {
|
||||
auto t = q.top();
|
||||
q.pop();
|
||||
int i = q.size();
|
||||
zbuf_a[n][yi][xi][i] = std::get<0>(t);
|
||||
point_idxs_a[n][yi][xi][i] = std::get<1>(t);
|
||||
pix_dists_a[n][yi][xi][i] = std::get<2>(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return std::make_tuple(point_idxs, zbuf, pix_dists);
|
||||
}
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor> RasterizePointsCoarseCpu(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int max_points_per_bin) {
|
||||
const int N = points.size(0);
|
||||
const int P = points.size(1);
|
||||
const int B = 1 + (image_size - 1) / bin_size; // Integer division round up
|
||||
const int M = max_points_per_bin;
|
||||
auto opts = points.options().dtype(torch::kInt32);
|
||||
torch::Tensor points_per_bin = torch::zeros({N, B, B}, opts);
|
||||
torch::Tensor bin_points = torch::full({N, B, B, M}, -1, opts);
|
||||
|
||||
auto points_a = points.accessor<float, 3>();
|
||||
auto points_per_bin_a = points_per_bin.accessor<int32_t, 3>();
|
||||
auto bin_points_a = bin_points.accessor<int32_t, 4>();
|
||||
|
||||
const float pixel_width = 2.0f / image_size;
|
||||
const float bin_width = pixel_width * bin_size;
|
||||
for (int n = 0; n < N; ++n) {
|
||||
float bin_y_min = -1.0f;
|
||||
float bin_y_max = bin_y_min + bin_width;
|
||||
for (int by = 0; by < B; by++) {
|
||||
float bin_x_min = -1.0f;
|
||||
float bin_x_max = bin_x_min + bin_width;
|
||||
for (int bx = 0; bx < B; bx++) {
|
||||
int32_t points_hit = 0;
|
||||
for (int32_t p = 0; p < P; p++) {
|
||||
float px = points_a[n][p][0];
|
||||
float py = points_a[n][p][1];
|
||||
float pz = points_a[n][p][2];
|
||||
if (pz < 0) {
|
||||
continue;
|
||||
}
|
||||
float point_x_min = px - radius;
|
||||
float point_x_max = px + radius;
|
||||
float point_y_min = py - radius;
|
||||
float point_y_max = py + radius;
|
||||
// Use a half-open interval so that points exactly on the
|
||||
// boundary between bins will fall into exactly one bin.
|
||||
bool x_hit = (point_x_min <= bin_x_max) && (bin_x_min <= point_x_max);
|
||||
bool y_hit = (point_y_min <= bin_y_max) && (bin_y_min <= point_y_max);
|
||||
if (x_hit && y_hit) {
|
||||
// Got too many points for this bin, so throw an error.
|
||||
if (points_hit >= max_points_per_bin) {
|
||||
AT_ERROR("Got too many points per bin");
|
||||
}
|
||||
// The current point falls in the current bin, so
|
||||
// record it.
|
||||
bin_points_a[n][by][bx][points_hit] = p;
|
||||
points_hit++;
|
||||
}
|
||||
}
|
||||
// Record the number of points found in this bin
|
||||
points_per_bin_a[n][by][bx] = points_hit;
|
||||
|
||||
// Shift the bin to the right for the next loop iteration
|
||||
bin_x_min = bin_x_max;
|
||||
bin_x_max = bin_x_min + bin_width;
|
||||
}
|
||||
// Shift the bin down for the next loop iteration
|
||||
bin_y_min = bin_y_max;
|
||||
bin_y_max = bin_y_min + bin_width;
|
||||
}
|
||||
}
|
||||
return std::make_tuple(points_per_bin, bin_points);
|
||||
}
|
||||
|
||||
torch::Tensor RasterizePointsBackwardCpu(
|
||||
const torch::Tensor& points, // (N, P, 3)
|
||||
const torch::Tensor& idxs, // (N, H, W, K)
|
||||
const torch::Tensor& grad_zbuf, // (N, H, W, K)
|
||||
const torch::Tensor& grad_dists) { // (N, H, W, K)
|
||||
const int N = points.size(0);
|
||||
const int P = points.size(1);
|
||||
const int H = idxs.size(1);
|
||||
const int W = idxs.size(2);
|
||||
const int K = idxs.size(3);
|
||||
|
||||
// For now only support square images.
|
||||
// TODO(jcjohns): Extend to non-square images.
|
||||
if (H != W) {
|
||||
AT_ERROR("RasterizePointsBackwardCpu only supports square images");
|
||||
}
|
||||
torch::Tensor grad_points = torch::zeros({N, P, 3}, points.options());
|
||||
|
||||
auto points_a = points.accessor<float, 3>();
|
||||
auto idxs_a = idxs.accessor<int32_t, 4>();
|
||||
auto grad_dists_a = grad_dists.accessor<float, 4>();
|
||||
auto grad_zbuf_a = grad_zbuf.accessor<float, 4>();
|
||||
auto grad_points_a = grad_points.accessor<float, 3>();
|
||||
|
||||
for (int n = 0; n < N; ++n) { // Loop over images in the batch
|
||||
for (int y = 0; y < H; ++y) { // Loop over rows in the image
|
||||
const float yf = PixToNdc(y, H);
|
||||
for (int x = 0; x < W; ++x) { // Loop over pixels in the row
|
||||
const float xf = PixToNdc(x, W);
|
||||
for (int k = 0; k < K; ++k) { // Loop over points for the pixel
|
||||
const int p = idxs_a[n][y][x][k];
|
||||
if (p < 0) {
|
||||
break;
|
||||
}
|
||||
const float grad_dist2 = grad_dists_a[n][y][x][k];
|
||||
const float px = points_a[n][p][0];
|
||||
const float py = points_a[n][p][1];
|
||||
const float dx = px - xf;
|
||||
const float dy = py - yf;
|
||||
// Remember: dists[n][y][x][k] = dx * dx + dy * dy;
|
||||
const float grad_px = 2.0f * grad_dist2 * dx;
|
||||
const float grad_py = 2.0f * grad_dist2 * dy;
|
||||
grad_points_a[n][p][0] += grad_px;
|
||||
grad_points_a[n][p][1] += grad_py;
|
||||
grad_points_a[n][p][2] += grad_zbuf_a[n][y][x][k];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return grad_points;
|
||||
}
|
||||
Reference in New Issue
Block a user