mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2026-04-10 14:35:58 +08:00
Initial commit
fbshipit-source-id: ad58e416e3ceeca85fae0583308968d04e78fe0d
This commit is contained in:
73
pytorch3d/csrc/rasterize_points/bitmask.cuh
Normal file
73
pytorch3d/csrc/rasterize_points/bitmask.cuh
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
#define BINMASK_H
|
||||
|
||||
// A BitMask represents a bool array of shape (H, W, N). We pack values into
|
||||
// the bits of unsigned ints; a single unsigned int has B = 32 bits, so to hold
|
||||
// all values we use H * W * (N / B) = H * W * D values. We want to store
|
||||
// BitMasks in shared memory, so we assume that the memory has already been
|
||||
// allocated for it elsewhere.
|
||||
class BitMask {
|
||||
public:
|
||||
__device__ BitMask(unsigned int* data, int H, int W, int N)
|
||||
: data(data), H(H), W(W), B(8 * sizeof(unsigned int)), D(N / B) {
|
||||
// TODO: check if the data is null.
|
||||
N = ceilf(N % 32); // take ceil incase N % 32 != 0
|
||||
block_clear(); // clear the data
|
||||
}
|
||||
|
||||
// Use all threads in the current block to clear all bits of this BitMask
|
||||
__device__ void block_clear() {
|
||||
for (int i = threadIdx.x; i < H * W * D; i += blockDim.x) {
|
||||
data[i] = 0;
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
__device__ int _get_elem_idx(int y, int x, int d) {
|
||||
return y * W * D + x * D + d / B;
|
||||
}
|
||||
|
||||
__device__ int _get_bit_idx(int d) {
|
||||
return d % B;
|
||||
}
|
||||
|
||||
// Turn on a single bit (y, x, d)
|
||||
__device__ void set(int y, int x, int d) {
|
||||
int elem_idx = _get_elem_idx(y, x, d);
|
||||
int bit_idx = _get_bit_idx(d);
|
||||
const unsigned int mask = 1U << bit_idx;
|
||||
atomicOr(data + elem_idx, mask);
|
||||
}
|
||||
|
||||
// Turn off a single bit (y, x, d)
|
||||
__device__ void unset(int y, int x, int d) {
|
||||
int elem_idx = _get_elem_idx(y, x, d);
|
||||
int bit_idx = _get_bit_idx(d);
|
||||
const unsigned int mask = ~(1U << bit_idx);
|
||||
atomicAnd(data + elem_idx, mask);
|
||||
}
|
||||
|
||||
// Check whether the bit (y, x, d) is on or off
|
||||
__device__ bool get(int y, int x, int d) {
|
||||
int elem_idx = _get_elem_idx(y, x, d);
|
||||
int bit_idx = _get_bit_idx(d);
|
||||
return (data[elem_idx] >> bit_idx) & 1U;
|
||||
}
|
||||
|
||||
// Compute the number of bits set in the row (y, x, :)
|
||||
__device__ int count(int y, int x) {
|
||||
int total = 0;
|
||||
for (int i = 0; i < D; ++i) {
|
||||
int elem_idx = y * W * D + x * D + i;
|
||||
unsigned int elem = data[elem_idx];
|
||||
total += __popc(elem);
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
private:
|
||||
unsigned int* data;
|
||||
int H, W, B, D;
|
||||
};
|
||||
33
pytorch3d/csrc/rasterize_points/rasterization_utils.cuh
Normal file
33
pytorch3d/csrc/rasterize_points/rasterization_utils.cuh
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
// Given a pixel coordinate 0 <= i < S, convert it to a normalized device
|
||||
// coordinate in the range [-1, 1]. We divide the NDC range into S evenly-sized
|
||||
// pixels, and assume that each pixel falls in the *center* of its range.
|
||||
__device__ inline float PixToNdc(int i, int S) {
|
||||
// NDC x-offset + (i * pixel_width + half_pixel_width)
|
||||
return -1 + (2 * i + 1.0f) / S;
|
||||
}
|
||||
|
||||
// The maximum number of points per pixel that we can return. Since we use
|
||||
// thread-local arrays to hold and sort points, the maximum size of the array
|
||||
// needs to be known at compile time. There might be some fancy template magic
|
||||
// we could use to make this more dynamic, but for now just fix a constant.
|
||||
// TODO: is 8 enough? Would increasing have performance considerations?
|
||||
const int32_t kMaxPointsPerPixel = 150;
|
||||
|
||||
template <typename T>
|
||||
__device__ inline void BubbleSort(T* arr, int n) {
|
||||
// Bubble sort. We only use it for tiny thread-local arrays (n < 8); in this
|
||||
// regime we care more about warp divergence than computational complexity.
|
||||
for (int i = 0; i < n - 1; ++i) {
|
||||
for (int j = 0; j < n - i - 1; ++j) {
|
||||
if (arr[j + 1] < arr[j]) {
|
||||
T temp = arr[j];
|
||||
arr[j] = arr[j + 1];
|
||||
arr[j + 1] = temp;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
511
pytorch3d/csrc/rasterize_points/rasterize_points.cu
Normal file
511
pytorch3d/csrc/rasterize_points/rasterize_points.cu
Normal file
@@ -0,0 +1,511 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#include <math.h>
|
||||
#include <torch/extension.h>
|
||||
#include <cstdio>
|
||||
#include <sstream>
|
||||
#include <tuple>
|
||||
#include "rasterize_points/bitmask.cuh"
|
||||
#include "rasterize_points/rasterization_utils.cuh"
|
||||
|
||||
namespace {
|
||||
// A little structure for holding details about a pixel.
|
||||
struct Pix {
|
||||
float z; // Depth of the reference point.
|
||||
int32_t idx; // Index of the reference point.
|
||||
float dist2; // Euclidean distance square to the reference point.
|
||||
};
|
||||
|
||||
__device__ inline bool operator<(const Pix& a, const Pix& b) {
|
||||
return a.z < b.z;
|
||||
}
|
||||
|
||||
// This function checks if a pixel given by xy location pxy lies within the
|
||||
// point with index p and batch index n. One of the inputs is a list (q)
|
||||
// which contains Pixel structs with the indices of the points which intersect
|
||||
// with this pixel sorted by closest z distance. If the pixel pxy lies in the
|
||||
// point, the list (q) is updated and re-orderered in place. In addition
|
||||
// the auxillary variables q_size, q_max_z and q_max_idx are also modified.
|
||||
// This code is shared between RasterizePointsNaiveCudaKernel and
|
||||
// RasterizePointsFineCudaKernel.
|
||||
template <typename PointQ>
|
||||
__device__ void CheckPixelInsidePoint(
|
||||
const float* points, // (N, P, 3)
|
||||
const int p,
|
||||
int& q_size,
|
||||
float& q_max_z,
|
||||
int& q_max_idx,
|
||||
PointQ& q,
|
||||
const float radius2,
|
||||
const float xf,
|
||||
const float yf,
|
||||
const int n,
|
||||
const int P,
|
||||
const int K) {
|
||||
const float px = points[n * P * 3 + p * 3 + 0];
|
||||
const float py = points[n * P * 3 + p * 3 + 1];
|
||||
const float pz = points[n * P * 3 + p * 3 + 2];
|
||||
if (pz < 0)
|
||||
return; // Don't render points behind the camera
|
||||
const float dx = xf - px;
|
||||
const float dy = yf - py;
|
||||
const float dist2 = dx * dx + dy * dy;
|
||||
if (dist2 < radius2) {
|
||||
if (q_size < K) {
|
||||
// Just insert it
|
||||
q[q_size] = {pz, p, dist2};
|
||||
if (pz > q_max_z) {
|
||||
q_max_z = pz;
|
||||
q_max_idx = q_size;
|
||||
}
|
||||
q_size++;
|
||||
} else if (pz < q_max_z) {
|
||||
// Overwrite the old max, and find the new max
|
||||
q[q_max_idx] = {pz, p, dist2};
|
||||
q_max_z = pz;
|
||||
for (int i = 0; i < K; i++) {
|
||||
if (q[i].z > q_max_z) {
|
||||
q_max_z = q[i].z;
|
||||
q_max_idx = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
// ****************************************************************************
|
||||
// * NAIVE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
__global__ void RasterizePointsNaiveCudaKernel(
|
||||
const float* points, // (N, P, 3)
|
||||
const float radius,
|
||||
const int N,
|
||||
const int P,
|
||||
const int S,
|
||||
const int K,
|
||||
int32_t* point_idxs, // (N, S, S, K)
|
||||
float* zbuf, // (N, S, S, K)
|
||||
float* pix_dists) { // (N, S, S, K)
|
||||
// Simple version: One thread per output pixel
|
||||
const int num_threads = gridDim.x * blockDim.x;
|
||||
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
|
||||
const float radius2 = radius * radius;
|
||||
for (int i = tid; i < N * S * S; i += num_threads) {
|
||||
// Convert linear index to 3D index
|
||||
const int n = i / (S * S); // Batch index
|
||||
const int pix_idx = i % (S * S);
|
||||
const int yi = pix_idx / S;
|
||||
const int xi = pix_idx % S;
|
||||
|
||||
const float xf = PixToNdc(xi, S);
|
||||
const float yf = PixToNdc(yi, S);
|
||||
|
||||
// For keeping track of the K closest points we want a data structure
|
||||
// that (1) gives O(1) access to the closest point for easy comparisons,
|
||||
// and (2) allows insertion of new elements. In the CPU version we use
|
||||
// std::priority_queue; then (2) is O(log K). We can't use STL
|
||||
// containers in CUDA; we could roll our own max heap in an array, but
|
||||
// that would likely have a lot of warp divergence so we do something
|
||||
// simpler instead: keep the elements in an unsorted array, but keep
|
||||
// track of the max value and the index of the max value. Then (1) is
|
||||
// still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8
|
||||
// this should be fast enough for our purposes.
|
||||
// TODO(jcjohns) Abstract this out into a standalone data structure
|
||||
Pix q[kMaxPointsPerPixel];
|
||||
int q_size = 0;
|
||||
float q_max_z = -1000;
|
||||
int q_max_idx = -1;
|
||||
for (int p = 0; p < P; ++p) {
|
||||
CheckPixelInsidePoint(
|
||||
points, p, q_size, q_max_z, q_max_idx, q, radius2, xf, yf, n, P, K);
|
||||
}
|
||||
BubbleSort(q, q_size);
|
||||
int idx = n * S * S * K + yi * S * K + xi * K;
|
||||
for (int k = 0; k < q_size; ++k) {
|
||||
point_idxs[idx + k] = q[k].idx;
|
||||
zbuf[idx + k] = q[k].z;
|
||||
pix_dists[idx + k] = q[k].dist2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
|
||||
RasterizePointsNaiveCuda(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int points_per_pixel) {
|
||||
const int N = points.size(0);
|
||||
const int P = points.size(1);
|
||||
const int S = image_size;
|
||||
const int K = points_per_pixel;
|
||||
if (K > kMaxPointsPerPixel) {
|
||||
std::stringstream ss;
|
||||
ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel;
|
||||
AT_ERROR(ss.str());
|
||||
}
|
||||
|
||||
auto int_opts = points.options().dtype(torch::kInt32);
|
||||
auto float_opts = points.options().dtype(torch::kFloat32);
|
||||
torch::Tensor point_idxs = torch::full({N, S, S, K}, -1, int_opts);
|
||||
torch::Tensor zbuf = torch::full({N, S, S, K}, -1, float_opts);
|
||||
torch::Tensor pix_dists = torch::full({N, S, S, K}, -1, float_opts);
|
||||
|
||||
const size_t blocks = 1024;
|
||||
const size_t threads = 64;
|
||||
RasterizePointsNaiveCudaKernel<<<blocks, threads>>>(
|
||||
points.contiguous().data<float>(),
|
||||
radius,
|
||||
N,
|
||||
P,
|
||||
S,
|
||||
K,
|
||||
point_idxs.contiguous().data<int32_t>(),
|
||||
zbuf.contiguous().data<float>(),
|
||||
pix_dists.contiguous().data<float>());
|
||||
return std::make_tuple(point_idxs, zbuf, pix_dists);
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * COARSE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
__global__ void RasterizePointsCoarseCudaKernel(
|
||||
const float* points,
|
||||
const float radius,
|
||||
const int N,
|
||||
const int P,
|
||||
const int S,
|
||||
const int bin_size,
|
||||
const int chunk_size,
|
||||
const int max_points_per_bin,
|
||||
int* points_per_bin,
|
||||
int* bin_points) {
|
||||
extern __shared__ char sbuf[];
|
||||
const int M = max_points_per_bin;
|
||||
const int num_bins = 1 + (S - 1) / bin_size; // Integer divide round up
|
||||
const float half_pix = 1.0f / S; // Size of half a pixel in NDC units
|
||||
|
||||
// This is a boolean array of shape (num_bins, num_bins, chunk_size)
|
||||
// stored in shared memory that will track whether each point in the chunk
|
||||
// falls into each bin of the image.
|
||||
BitMask binmask((unsigned int*)sbuf, num_bins, num_bins, chunk_size);
|
||||
|
||||
// Have each block handle a chunk of points and build a 3D bitmask in
|
||||
// shared memory to mark which points hit which bins. In this first phase,
|
||||
// each thread processes one point at a time. After processing the chunk,
|
||||
// one thread is assigned per bin, and the thread counts and writes the
|
||||
// points for the bin out to global memory.
|
||||
const int chunks_per_batch = 1 + (P - 1) / chunk_size;
|
||||
const int num_chunks = N * chunks_per_batch;
|
||||
for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) {
|
||||
const int batch_idx = chunk / chunks_per_batch;
|
||||
const int chunk_idx = chunk % chunks_per_batch;
|
||||
const int point_start_idx = chunk_idx * chunk_size;
|
||||
|
||||
binmask.block_clear();
|
||||
|
||||
// Have each thread handle a different point within the chunk
|
||||
for (int p = threadIdx.x; p < chunk_size; p += blockDim.x) {
|
||||
const int p_idx = point_start_idx + p;
|
||||
if (p_idx >= P)
|
||||
break;
|
||||
const float px = points[batch_idx * P * 3 + p_idx * 3 + 0];
|
||||
const float py = points[batch_idx * P * 3 + p_idx * 3 + 1];
|
||||
const float pz = points[batch_idx * P * 3 + p_idx * 3 + 2];
|
||||
if (pz < 0)
|
||||
continue; // Don't render points behind the camera
|
||||
const float px0 = px - radius;
|
||||
const float px1 = px + radius;
|
||||
const float py0 = py - radius;
|
||||
const float py1 = py + radius;
|
||||
|
||||
// Brute-force search over all bins; TODO something smarter?
|
||||
// For example we could compute the exact bin where the point falls,
|
||||
// then check neighboring bins. This way we wouldn't have to check
|
||||
// all bins (however then we might have more warp divergence?)
|
||||
for (int by = 0; by < num_bins; ++by) {
|
||||
// Get y extent for the bin. PixToNdc gives us the location of
|
||||
// the center of each pixel, so we need to add/subtract a half
|
||||
// pixel to get the true extent of the bin.
|
||||
const float by0 = PixToNdc(by * bin_size, S) - half_pix;
|
||||
const float by1 = PixToNdc((by + 1) * bin_size - 1, S) + half_pix;
|
||||
const bool y_overlap = (py0 <= by1) && (by0 <= py1);
|
||||
if (!y_overlap) {
|
||||
continue;
|
||||
}
|
||||
for (int bx = 0; bx < num_bins; ++bx) {
|
||||
// Get x extent for the bin; again we need to adjust the
|
||||
// output of PixToNdc by half a pixel.
|
||||
const float bx0 = PixToNdc(bx * bin_size, S) - half_pix;
|
||||
const float bx1 = PixToNdc((bx + 1) * bin_size - 1, S) + half_pix;
|
||||
const bool x_overlap = (px0 <= bx1) && (bx0 <= px1);
|
||||
if (x_overlap) {
|
||||
binmask.set(by, bx, p);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
// Now we have processed every point in the current chunk. We need to
|
||||
// count the number of points in each bin so we can write the indices
|
||||
// out to global memory. We have each thread handle a different bin.
|
||||
for (int byx = threadIdx.x; byx < num_bins * num_bins; byx += blockDim.x) {
|
||||
const int by = byx / num_bins;
|
||||
const int bx = byx % num_bins;
|
||||
const int count = binmask.count(by, bx);
|
||||
const int points_per_bin_idx =
|
||||
batch_idx * num_bins * num_bins + by * num_bins + bx;
|
||||
|
||||
// This atomically increments the (global) number of points found
|
||||
// in the current bin, and gets the previous value of the counter;
|
||||
// this effectively allocates space in the bin_points array for the
|
||||
// points in the current chunk that fall into this bin.
|
||||
const int start = atomicAdd(points_per_bin + points_per_bin_idx, count);
|
||||
|
||||
// Now loop over the binmask and write the active bits for this bin
|
||||
// out to bin_points.
|
||||
int next_idx = batch_idx * num_bins * num_bins * M + by * num_bins * M +
|
||||
bx * M + start;
|
||||
for (int p = 0; p < chunk_size; ++p) {
|
||||
if (binmask.get(by, bx, p)) {
|
||||
// TODO: Throw an error if next_idx >= M -- this means that
|
||||
// we got more than max_points_per_bin in this bin
|
||||
// TODO: check if atomicAdd is needed in line 265.
|
||||
bin_points[next_idx] = point_start_idx + p;
|
||||
next_idx++;
|
||||
}
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
|
||||
torch::Tensor RasterizePointsCoarseCuda(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int max_points_per_bin) {
|
||||
const int N = points.size(0);
|
||||
const int P = points.size(1);
|
||||
const int num_bins = 1 + (image_size - 1) / bin_size; // divide round up
|
||||
const int M = max_points_per_bin;
|
||||
if (num_bins >= 22) {
|
||||
// Make sure we do not use too much shared memory.
|
||||
std::stringstream ss;
|
||||
ss << "Got " << num_bins << "; that's too many!";
|
||||
AT_ERROR(ss.str());
|
||||
}
|
||||
auto opts = points.options().dtype(torch::kInt32);
|
||||
torch::Tensor points_per_bin = torch::zeros({N, num_bins, num_bins}, opts);
|
||||
torch::Tensor bin_points = torch::full({N, num_bins, num_bins, M}, -1, opts);
|
||||
const int chunk_size = 512;
|
||||
const size_t shared_size = num_bins * num_bins * chunk_size / 8;
|
||||
const size_t blocks = 64;
|
||||
const size_t threads = 512;
|
||||
RasterizePointsCoarseCudaKernel<<<blocks, threads, shared_size>>>(
|
||||
points.contiguous().data<float>(),
|
||||
radius,
|
||||
N,
|
||||
P,
|
||||
image_size,
|
||||
bin_size,
|
||||
chunk_size,
|
||||
M,
|
||||
points_per_bin.contiguous().data<int32_t>(),
|
||||
bin_points.contiguous().data<int32_t>());
|
||||
return bin_points;
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * FINE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
__global__ void RasterizePointsFineCudaKernel(
|
||||
const float* points, // (N, P, 3)
|
||||
const int32_t* bin_points, // (N, B, B, T)
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int N,
|
||||
const int P,
|
||||
const int B,
|
||||
const int M,
|
||||
const int S,
|
||||
const int K,
|
||||
int32_t* point_idxs, // (N, S, S, K)
|
||||
float* zbuf, // (N, S, S, K)
|
||||
float* pix_dists) { // (N, S, S, K)
|
||||
// This can be more than S^2 if S is not dividable by bin_size.
|
||||
const int num_pixels = N * B * B * bin_size * bin_size;
|
||||
const int num_threads = gridDim.x * blockDim.x;
|
||||
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
const float radius2 = radius * radius;
|
||||
for (int pid = tid; pid < num_pixels; pid += num_threads) {
|
||||
// Convert linear index into bin and pixel indices. We make the within
|
||||
// block pixel ids move the fastest, so that adjacent threads will fall
|
||||
// into the same bin; this should give them coalesced memory reads when
|
||||
// they read from points and bin_points.
|
||||
int i = pid;
|
||||
const int n = i / (B * B * bin_size * bin_size);
|
||||
i %= B * B * bin_size * bin_size;
|
||||
const int by = i / (B * bin_size * bin_size);
|
||||
i %= B * bin_size * bin_size;
|
||||
const int bx = i / (bin_size * bin_size);
|
||||
i %= bin_size * bin_size;
|
||||
const int yi = i / bin_size + by * bin_size;
|
||||
const int xi = i % bin_size + bx * bin_size;
|
||||
|
||||
if (yi >= S || xi >= S)
|
||||
continue;
|
||||
const float xf = PixToNdc(xi, S);
|
||||
const float yf = PixToNdc(yi, S);
|
||||
|
||||
// This part looks like the naive rasterization kernel, except we use
|
||||
// bin_points to only look at a subset of points already known to fall
|
||||
// in this bin. TODO abstract out this logic into some data structure
|
||||
// that is shared by both kernels?
|
||||
Pix q[kMaxPointsPerPixel];
|
||||
int q_size = 0;
|
||||
float q_max_z = -1000;
|
||||
int q_max_idx = -1;
|
||||
for (int m = 0; m < M; ++m) {
|
||||
const int p = bin_points[n * B * B * M + by * B * M + bx * M + m];
|
||||
if (p < 0) {
|
||||
// bin_points uses -1 as a sentinal value
|
||||
continue;
|
||||
}
|
||||
CheckPixelInsidePoint(
|
||||
points, p, q_size, q_max_z, q_max_idx, q, radius2, xf, yf, n, P, K);
|
||||
}
|
||||
// Now we've looked at all the points for this bin, so we can write
|
||||
// output for the current pixel.
|
||||
BubbleSort(q, q_size);
|
||||
const int pix_idx = n * S * S * K + yi * S * K + xi * K;
|
||||
for (int k = 0; k < q_size; ++k) {
|
||||
point_idxs[pix_idx + k] = q[k].idx;
|
||||
zbuf[pix_idx + k] = q[k].z;
|
||||
pix_dists[pix_idx + k] = q[k].dist2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsFineCuda(
|
||||
const torch::Tensor& points,
|
||||
const torch::Tensor& bin_points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int points_per_pixel) {
|
||||
const int N = points.size(0);
|
||||
const int P = points.size(1);
|
||||
const int B = bin_points.size(1);
|
||||
const int M = bin_points.size(3);
|
||||
const int S = image_size;
|
||||
const int K = points_per_pixel;
|
||||
if (K > kMaxPointsPerPixel) {
|
||||
AT_ERROR("Must have num_closest <= 8");
|
||||
}
|
||||
auto int_opts = points.options().dtype(torch::kInt32);
|
||||
auto float_opts = points.options().dtype(torch::kFloat32);
|
||||
torch::Tensor point_idxs = torch::full({N, S, S, K}, -1, int_opts);
|
||||
torch::Tensor zbuf = torch::full({N, S, S, K}, -1, float_opts);
|
||||
torch::Tensor pix_dists = torch::full({N, S, S, K}, -1, float_opts);
|
||||
|
||||
const size_t blocks = 1024;
|
||||
const size_t threads = 64;
|
||||
RasterizePointsFineCudaKernel<<<blocks, threads>>>(
|
||||
points.contiguous().data<float>(),
|
||||
bin_points.contiguous().data<int32_t>(),
|
||||
radius,
|
||||
bin_size,
|
||||
N,
|
||||
P,
|
||||
B,
|
||||
M,
|
||||
S,
|
||||
K,
|
||||
point_idxs.contiguous().data<int32_t>(),
|
||||
zbuf.contiguous().data<float>(),
|
||||
pix_dists.contiguous().data<float>());
|
||||
|
||||
return std::make_tuple(point_idxs, zbuf, pix_dists);
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * BACKWARD PASS *
|
||||
// ****************************************************************************
|
||||
// TODO(T55115174) Add more documentation for backward kernel.
|
||||
__global__ void RasterizePointsBackwardCudaKernel(
|
||||
const float* points, // (N, P, 3)
|
||||
const int32_t* idxs, // (N, H, W, K)
|
||||
const int N,
|
||||
const int P,
|
||||
const int H,
|
||||
const int W,
|
||||
const int K,
|
||||
const float* grad_zbuf, // (N, H, W, K)
|
||||
const float* grad_dists, // (N, H, W, K)
|
||||
float* grad_points) { // (N, P, 3)
|
||||
// Parallelized over each of K points per pixel, for each pixel in images of
|
||||
// size H * W, for each image in the batch of size N.
|
||||
int num_threads = gridDim.x * blockDim.x;
|
||||
int tid = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
for (int i = tid; i < N * H * W * K; i += num_threads) {
|
||||
const int n = i / (H * W * K);
|
||||
const int yxk = i % (H * W * K);
|
||||
const int yi = yxk / (W * K);
|
||||
const int xk = yxk % (W * K);
|
||||
const int xi = xk / K;
|
||||
// k = xk % K (We don't actually need k, but this would be it.)
|
||||
const float xf = PixToNdc(xi, W);
|
||||
const float yf = PixToNdc(yi, H);
|
||||
|
||||
const int p = idxs[i];
|
||||
if (p < 0)
|
||||
continue;
|
||||
const float grad_dist2 = grad_dists[i];
|
||||
const int p_ind = n * P * 3 + p * 3;
|
||||
const float px = points[p_ind];
|
||||
const float py = points[p_ind + 1];
|
||||
const float dx = px - xf;
|
||||
const float dy = py - yf;
|
||||
const float grad_px = 2.0f * grad_dist2 * dx;
|
||||
const float grad_py = 2.0f * grad_dist2 * dy;
|
||||
const float grad_pz = grad_zbuf[i];
|
||||
atomicAdd(grad_points + p_ind, grad_px);
|
||||
atomicAdd(grad_points + p_ind + 1, grad_py);
|
||||
atomicAdd(grad_points + p_ind + 2, grad_pz);
|
||||
}
|
||||
}
|
||||
|
||||
torch::Tensor RasterizePointsBackwardCuda(
|
||||
const torch::Tensor& points, // (N, P, 3)
|
||||
const torch::Tensor& idxs, // (N, H, W, K)
|
||||
const torch::Tensor& grad_zbuf, // (N, H, W, K)
|
||||
const torch::Tensor& grad_dists) { // (N, H, W, K)
|
||||
const int N = points.size(0);
|
||||
const int P = points.size(1);
|
||||
const int H = idxs.size(1);
|
||||
const int W = idxs.size(2);
|
||||
const int K = idxs.size(3);
|
||||
|
||||
torch::Tensor grad_points = torch::zeros({N, P, 3}, points.options());
|
||||
const size_t blocks = 1024;
|
||||
const size_t threads = 64;
|
||||
|
||||
RasterizePointsBackwardCudaKernel<<<blocks, threads>>>(
|
||||
points.contiguous().data<float>(),
|
||||
idxs.contiguous().data<int32_t>(),
|
||||
N,
|
||||
P,
|
||||
H,
|
||||
W,
|
||||
K,
|
||||
grad_zbuf.contiguous().data<float>(),
|
||||
grad_dists.contiguous().data<float>(),
|
||||
grad_points.contiguous().data<float>());
|
||||
|
||||
return grad_points;
|
||||
}
|
||||
230
pytorch3d/csrc/rasterize_points/rasterize_points.h
Normal file
230
pytorch3d/csrc/rasterize_points/rasterize_points.h
Normal file
@@ -0,0 +1,230 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
#include <torch/extension.h>
|
||||
#include <cstdio>
|
||||
#include <tuple>
|
||||
|
||||
// ****************************************************************************
|
||||
// * NAIVE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsNaiveCpu(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int points_per_pixel);
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
|
||||
RasterizePointsNaiveCuda(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int points_per_pixel);
|
||||
|
||||
// Naive (forward) pointcloud rasterization: For each pixel, for each point,
|
||||
// check whether that point hits the pixel.
|
||||
//
|
||||
// Args:
|
||||
// points: Tensor of shape (N, P, 3) (in NDC)
|
||||
// radius: Radius of each point (in NDC units)
|
||||
// image_size: (S) Size of the image to return (in pixels)
|
||||
// points_per_pixel: (K) The number closest of points to return for each pixel
|
||||
//
|
||||
// Returns:
|
||||
// idxs: int32 Tensor of shape (N, S, S, K) giving the indices of the
|
||||
// closest K points along the z-axis for each pixel, padded with -1 for
|
||||
// pixels
|
||||
// hit by fewer than K points.
|
||||
// zbuf: float32 Tensor of shape (N, S, S, K) giving the depth of each
|
||||
// closest point for each pixel.
|
||||
// dists: float32 Tensor of shape (N, S, S, K) giving squared Euclidean
|
||||
// distance in the (NDC) x/y plane between each pixel and its K closest
|
||||
// points along the z axis.
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsNaive(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int points_per_pixel) {
|
||||
if (points.type().is_cuda()) {
|
||||
return RasterizePointsNaiveCuda(
|
||||
points, image_size, radius, points_per_pixel);
|
||||
} else {
|
||||
return RasterizePointsNaiveCpu(
|
||||
points, image_size, radius, points_per_pixel);
|
||||
}
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * COARSE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
torch::Tensor RasterizePointsCoarseCpu(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int max_points_per_bin);
|
||||
|
||||
torch::Tensor RasterizePointsCoarseCuda(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int max_points_per_bin);
|
||||
|
||||
// Args:
|
||||
// points: Tensor of shape (N, P, 3)
|
||||
// radius: Radius of points to rasterize (in NDC units)
|
||||
// image_size: Size of the image to generate (in pixels)
|
||||
// bin_size: Size of each bin within the image (in pixels)
|
||||
//
|
||||
// Returns:
|
||||
// points_per_bin: Tensor of shape (N, num_bins, num_bins) giving the number
|
||||
// of points that fall in each bin
|
||||
// bin_points: Tensor of shape (N, num_bins, num_bins, K) giving the indices
|
||||
// of points that fall into each bin.
|
||||
torch::Tensor RasterizePointsCoarse(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int max_points_per_bin) {
|
||||
if (points.type().is_cuda()) {
|
||||
return RasterizePointsCoarseCuda(
|
||||
points, image_size, radius, bin_size, max_points_per_bin);
|
||||
} else {
|
||||
return RasterizePointsCoarseCpu(
|
||||
points, image_size, radius, bin_size, max_points_per_bin);
|
||||
}
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * FINE RASTERIZATION *
|
||||
// ****************************************************************************
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsFineCuda(
|
||||
const torch::Tensor& points,
|
||||
const torch::Tensor& bin_points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int points_per_pixel);
|
||||
|
||||
// Args:
|
||||
// points: float32 Tensor of shape (N, P, 3)
|
||||
// bin_points: int32 Tensor of shape (N, B, B, M) giving the indices of points
|
||||
// that fall into each bin (output from coarse rasterization)
|
||||
// image_size: Size of image to generate (in pixels)
|
||||
// radius: Radius of points to rasterize (NDC units)
|
||||
// bin_size: Size of each bin (in pixels)
|
||||
// points_per_pixel: How many points to rasterize for each pixel
|
||||
//
|
||||
// Returns (same as rasterize_points):
|
||||
// idxs: int32 Tensor of shape (N, S, S, K) giving the indices of the closest
|
||||
// points_per_pixel points along the z-axis for each pixel, padded with
|
||||
// -1 for pixels hit by fewer than points_per_pixel points
|
||||
// zbuf: float32 Tensor of shape (N, S, S, K) giving the depth of each of each
|
||||
// closest point for each pixel
|
||||
// dists: float32 Tensor of shape (N, S, S, K) giving squared Euclidean
|
||||
// distance in the (NDC) x/y plane between each pixel and its K closest
|
||||
// points along the z axis.
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsFine(
|
||||
const torch::Tensor& points,
|
||||
const torch::Tensor& bin_points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int points_per_pixel) {
|
||||
if (points.type().is_cuda()) {
|
||||
return RasterizePointsFineCuda(
|
||||
points, bin_points, image_size, radius, bin_size, points_per_pixel);
|
||||
} else {
|
||||
AT_ERROR("NOT IMPLEMENTED");
|
||||
}
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * BACKWARD PASS *
|
||||
// ****************************************************************************
|
||||
|
||||
torch::Tensor RasterizePointsBackwardCpu(
|
||||
const torch::Tensor& points,
|
||||
const torch::Tensor& idxs,
|
||||
const torch::Tensor& grad_zbuf,
|
||||
const torch::Tensor& grad_dists);
|
||||
|
||||
torch::Tensor RasterizePointsBackwardCuda(
|
||||
const torch::Tensor& points,
|
||||
const torch::Tensor& idxs,
|
||||
const torch::Tensor& grad_zbuf,
|
||||
const torch::Tensor& grad_dists);
|
||||
|
||||
// Args:
|
||||
// points: float32 Tensor of shape (N, P, 3)
|
||||
// idxs: int32 Tensor of shape (N, H, W, K) (from forward pass)
|
||||
// grad_zbuf: float32 Tensor of shape (N, H, W, K) giving upstream gradient
|
||||
// d(loss)/d(zbuf) of the distances from each pixel to its nearest
|
||||
// points.
|
||||
// grad_dists: Tensor of shape (N, H, W, K) giving upstream gradient
|
||||
// d(loss)/d(dists) of the dists tensor returned by the forward
|
||||
// pass.
|
||||
//
|
||||
// Returns:
|
||||
// grad_points: float32 Tensor of shape (N, P, 3) giving downstream gradients
|
||||
torch::Tensor RasterizePointsBackward(
|
||||
const torch::Tensor& points,
|
||||
const torch::Tensor& idxs,
|
||||
const torch::Tensor& grad_zbuf,
|
||||
const torch::Tensor& grad_dists) {
|
||||
if (points.type().is_cuda()) {
|
||||
return RasterizePointsBackwardCuda(points, idxs, grad_zbuf, grad_dists);
|
||||
} else {
|
||||
return RasterizePointsBackwardCpu(points, idxs, grad_zbuf, grad_dists);
|
||||
}
|
||||
}
|
||||
|
||||
// ****************************************************************************
|
||||
// * MAIN ENTRY POINT *
|
||||
// ****************************************************************************
|
||||
|
||||
// This is the main entry point for the forward pass of the point rasterizer;
|
||||
// it uses either naive or coarse-to-fine rasterization based on bin_size.
|
||||
//
|
||||
// Args:
|
||||
// points: Tensor of shape (N, P, 3) (in NDC)
|
||||
// radius: Radius of each point (in NDC units)
|
||||
// image_size: (S) Size of the image to return (in pixels)
|
||||
// points_per_pixel: (K) The number of points to return for each pixel
|
||||
// bin_size: Bin size (in pixels) for coarse-to-fine rasterization. Setting
|
||||
// bin_size=0 uses naive rasterization instead.
|
||||
// max_points_per_bin: The maximum number of points allowed to fall into each
|
||||
// bin when using coarse-to-fine rasterization.
|
||||
//
|
||||
// Returns:
|
||||
// idxs: int32 Tensor of shape (N, S, S, K) giving the indices of the
|
||||
// closest points_per_pixel points along the z-axis for each pixel,
|
||||
// padded with -1 for pixels hit by fewer than points_per_pixel points
|
||||
// zbuf: float32 Tensor of shape (N, S, S, K) giving the depth of each of each
|
||||
// closest point for each pixel
|
||||
// dists: float32 Tensor of shape (N, S, S, K) giving squared Euclidean
|
||||
// distance in the (NDC) x/y plane between each pixel and its K closest
|
||||
// points along the z axis.
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePoints(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int points_per_pixel,
|
||||
const int bin_size,
|
||||
const int max_points_per_bin) {
|
||||
if (bin_size == 0) {
|
||||
// Use the naive per-pixel implementation
|
||||
return RasterizePointsNaive(points, image_size, radius, points_per_pixel);
|
||||
} else {
|
||||
// Use coarse-to-fine rasterization
|
||||
const auto bin_points = RasterizePointsCoarse(
|
||||
points, image_size, radius, bin_size, max_points_per_bin);
|
||||
return RasterizePointsFine(
|
||||
points, bin_points, image_size, radius, bin_size, points_per_pixel);
|
||||
}
|
||||
}
|
||||
196
pytorch3d/csrc/rasterize_points/rasterize_points_cpu.cpp
Normal file
196
pytorch3d/csrc/rasterize_points/rasterize_points_cpu.cpp
Normal file
@@ -0,0 +1,196 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
#include <torch/extension.h>
|
||||
#include <queue>
|
||||
#include <tuple>
|
||||
|
||||
// Given a pixel coordinate 0 <= i < S, convert it to a normalized device
|
||||
// coordinate in the range [-1, 1]. The NDC range is divided into S evenly-sized
|
||||
// pixels, and assume that each pixel falls in the *center* of its range.
|
||||
inline float PixToNdc(const int i, const int S) {
|
||||
// NDC x-offset + (i * pixel_width + half_pixel_width)
|
||||
return -1 + (2 * i + 1.0f) / S;
|
||||
}
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> RasterizePointsNaiveCpu(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int points_per_pixel) {
|
||||
const int N = points.size(0);
|
||||
const int P = points.size(1);
|
||||
const int S = image_size;
|
||||
const int K = points_per_pixel;
|
||||
auto int_opts = points.options().dtype(torch::kInt32);
|
||||
auto float_opts = points.options().dtype(torch::kFloat32);
|
||||
torch::Tensor point_idxs = torch::full({N, S, S, K}, -1, int_opts);
|
||||
torch::Tensor zbuf = torch::full({N, S, S, K}, -1, float_opts);
|
||||
torch::Tensor pix_dists = torch::full({N, S, S, K}, -1, float_opts);
|
||||
|
||||
auto points_a = points.accessor<float, 3>();
|
||||
auto point_idxs_a = point_idxs.accessor<int32_t, 4>();
|
||||
auto zbuf_a = zbuf.accessor<float, 4>();
|
||||
auto pix_dists_a = pix_dists.accessor<float, 4>();
|
||||
|
||||
const float radius2 = radius * radius;
|
||||
for (int n = 0; n < N; ++n) {
|
||||
for (int yi = 0; yi < S; ++yi) {
|
||||
float yf = PixToNdc(yi, S);
|
||||
for (int xi = 0; xi < S; ++xi) {
|
||||
float xf = PixToNdc(xi, S);
|
||||
// Use a priority queue to hold (z, idx, r)
|
||||
std::priority_queue<std::tuple<float, int, float>> q;
|
||||
for (int p = 0; p < P; ++p) {
|
||||
const float px = points_a[n][p][0];
|
||||
const float py = points_a[n][p][1];
|
||||
const float pz = points_a[n][p][2];
|
||||
if (pz < 0) {
|
||||
continue;
|
||||
}
|
||||
const float dx = px - xf;
|
||||
const float dy = py - yf;
|
||||
const float dist2 = dx * dx + dy * dy;
|
||||
if (dist2 < radius2) {
|
||||
// The current point hit the current pixel
|
||||
q.emplace(pz, p, dist2);
|
||||
if ((int)q.size() > K) {
|
||||
q.pop();
|
||||
}
|
||||
}
|
||||
}
|
||||
// Now all the points have been seen, so pop elements off the queue
|
||||
// one by one and write them into the output tensors.
|
||||
while (!q.empty()) {
|
||||
auto t = q.top();
|
||||
q.pop();
|
||||
int i = q.size();
|
||||
zbuf_a[n][yi][xi][i] = std::get<0>(t);
|
||||
point_idxs_a[n][yi][xi][i] = std::get<1>(t);
|
||||
pix_dists_a[n][yi][xi][i] = std::get<2>(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return std::make_tuple(point_idxs, zbuf, pix_dists);
|
||||
}
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor> RasterizePointsCoarseCpu(
|
||||
const torch::Tensor& points,
|
||||
const int image_size,
|
||||
const float radius,
|
||||
const int bin_size,
|
||||
const int max_points_per_bin) {
|
||||
const int N = points.size(0);
|
||||
const int P = points.size(1);
|
||||
const int B = 1 + (image_size - 1) / bin_size; // Integer division round up
|
||||
const int M = max_points_per_bin;
|
||||
auto opts = points.options().dtype(torch::kInt32);
|
||||
torch::Tensor points_per_bin = torch::zeros({N, B, B}, opts);
|
||||
torch::Tensor bin_points = torch::full({N, B, B, M}, -1, opts);
|
||||
|
||||
auto points_a = points.accessor<float, 3>();
|
||||
auto points_per_bin_a = points_per_bin.accessor<int32_t, 3>();
|
||||
auto bin_points_a = bin_points.accessor<int32_t, 4>();
|
||||
|
||||
const float pixel_width = 2.0f / image_size;
|
||||
const float bin_width = pixel_width * bin_size;
|
||||
for (int n = 0; n < N; ++n) {
|
||||
float bin_y_min = -1.0f;
|
||||
float bin_y_max = bin_y_min + bin_width;
|
||||
for (int by = 0; by < B; by++) {
|
||||
float bin_x_min = -1.0f;
|
||||
float bin_x_max = bin_x_min + bin_width;
|
||||
for (int bx = 0; bx < B; bx++) {
|
||||
int32_t points_hit = 0;
|
||||
for (int32_t p = 0; p < P; p++) {
|
||||
float px = points_a[n][p][0];
|
||||
float py = points_a[n][p][1];
|
||||
float pz = points_a[n][p][2];
|
||||
if (pz < 0) {
|
||||
continue;
|
||||
}
|
||||
float point_x_min = px - radius;
|
||||
float point_x_max = px + radius;
|
||||
float point_y_min = py - radius;
|
||||
float point_y_max = py + radius;
|
||||
// Use a half-open interval so that points exactly on the
|
||||
// boundary between bins will fall into exactly one bin.
|
||||
bool x_hit = (point_x_min <= bin_x_max) && (bin_x_min <= point_x_max);
|
||||
bool y_hit = (point_y_min <= bin_y_max) && (bin_y_min <= point_y_max);
|
||||
if (x_hit && y_hit) {
|
||||
// Got too many points for this bin, so throw an error.
|
||||
if (points_hit >= max_points_per_bin) {
|
||||
AT_ERROR("Got too many points per bin");
|
||||
}
|
||||
// The current point falls in the current bin, so
|
||||
// record it.
|
||||
bin_points_a[n][by][bx][points_hit] = p;
|
||||
points_hit++;
|
||||
}
|
||||
}
|
||||
// Record the number of points found in this bin
|
||||
points_per_bin_a[n][by][bx] = points_hit;
|
||||
|
||||
// Shift the bin to the right for the next loop iteration
|
||||
bin_x_min = bin_x_max;
|
||||
bin_x_max = bin_x_min + bin_width;
|
||||
}
|
||||
// Shift the bin down for the next loop iteration
|
||||
bin_y_min = bin_y_max;
|
||||
bin_y_max = bin_y_min + bin_width;
|
||||
}
|
||||
}
|
||||
return std::make_tuple(points_per_bin, bin_points);
|
||||
}
|
||||
|
||||
torch::Tensor RasterizePointsBackwardCpu(
|
||||
const torch::Tensor& points, // (N, P, 3)
|
||||
const torch::Tensor& idxs, // (N, H, W, K)
|
||||
const torch::Tensor& grad_zbuf, // (N, H, W, K)
|
||||
const torch::Tensor& grad_dists) { // (N, H, W, K)
|
||||
const int N = points.size(0);
|
||||
const int P = points.size(1);
|
||||
const int H = idxs.size(1);
|
||||
const int W = idxs.size(2);
|
||||
const int K = idxs.size(3);
|
||||
|
||||
// For now only support square images.
|
||||
// TODO(jcjohns): Extend to non-square images.
|
||||
if (H != W) {
|
||||
AT_ERROR("RasterizePointsBackwardCpu only supports square images");
|
||||
}
|
||||
torch::Tensor grad_points = torch::zeros({N, P, 3}, points.options());
|
||||
|
||||
auto points_a = points.accessor<float, 3>();
|
||||
auto idxs_a = idxs.accessor<int32_t, 4>();
|
||||
auto grad_dists_a = grad_dists.accessor<float, 4>();
|
||||
auto grad_zbuf_a = grad_zbuf.accessor<float, 4>();
|
||||
auto grad_points_a = grad_points.accessor<float, 3>();
|
||||
|
||||
for (int n = 0; n < N; ++n) { // Loop over images in the batch
|
||||
for (int y = 0; y < H; ++y) { // Loop over rows in the image
|
||||
const float yf = PixToNdc(y, H);
|
||||
for (int x = 0; x < W; ++x) { // Loop over pixels in the row
|
||||
const float xf = PixToNdc(x, W);
|
||||
for (int k = 0; k < K; ++k) { // Loop over points for the pixel
|
||||
const int p = idxs_a[n][y][x][k];
|
||||
if (p < 0) {
|
||||
break;
|
||||
}
|
||||
const float grad_dist2 = grad_dists_a[n][y][x][k];
|
||||
const float px = points_a[n][p][0];
|
||||
const float py = points_a[n][p][1];
|
||||
const float dx = px - xf;
|
||||
const float dy = py - yf;
|
||||
// Remember: dists[n][y][x][k] = dx * dx + dy * dy;
|
||||
const float grad_px = 2.0f * grad_dist2 * dx;
|
||||
const float grad_py = 2.0f * grad_dist2 * dy;
|
||||
grad_points_a[n][p][0] += grad_px;
|
||||
grad_points_a[n][p][1] += grad_py;
|
||||
grad_points_a[n][p][2] += grad_zbuf_a[n][y][x][k];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return grad_points;
|
||||
}
|
||||
Reference in New Issue
Block a user