20 Commits

Author SHA1 Message Date
generatedunixname2645487282517272
7a6157e38e Fix CQS signal modernize-use-using in fbcode/vision/fair
Reviewed By: bottler

Differential Revision: D94879733

fbshipit-source-id: fc35eaaa723a2a035b3b204732add7ba8b225c57
2026-03-02 05:59:34 -08:00
generatedunixname1417043136753450
d9839a95f2 fbcode/vision/fair/pytorch3d/pytorch3d/ops/cameras_alignment.py
Reviewed By: sgrigory

Differential Revision: D93710806

fbshipit-source-id: da6c1e1e5b7a1c5cdfbf5026993c42c7ec387415
2026-02-23 15:52:03 -08:00
generatedunixname1417043136753450
7b5c78460a fbcode/vision/fair/pytorch3d/pytorch3d/transforms/se3.py
Reviewed By: sgrigory

Differential Revision: D93709801

fbshipit-source-id: e4bae81fe1a88fed547304e6e21b248c5a345277
2026-02-23 14:51:32 -08:00
generatedunixname1417043136753450
e3c80a4368 fbcode/vision/fair/pytorch3d/pytorch3d/renderer/splatter_blend.py
Reviewed By: sgrigory

Differential Revision: D93710022

fbshipit-source-id: 39253258b93a467fbda6b51ef8d6d3975bb49810
2026-02-23 12:43:53 -08:00
generatedunixname1417043136753450
b9b5ea3428 fbcode/vision/fair/pytorch3d/pytorch3d/common/workaround/symeig3x3.py
Reviewed By: sgrigory

Differential Revision: D93715209

fbshipit-source-id: 1880a8dd72e35ce5cc93cdeecf770aab6469ca31
2026-02-23 12:42:24 -08:00
generatedunixname1417043136753450
0e435c297c fbcode/vision/fair/pytorch3d/pytorch3d/ops/points_alignment.py
Reviewed By: sgrigory

Differential Revision: D93712744

fbshipit-source-id: 660560cdef9ff1d2173ae06de54df31766ee537f
2026-02-23 12:28:37 -08:00
generatedunixname1417043136753450
d631b56fba fbcode/vision/fair/pytorch3d/pytorch3d/ops/sample_farthest_points.py
Reviewed By: sgrigory

Differential Revision: D93708653

fbshipit-source-id: 112158092cd64ac8afddf1378b931cb44e19c372
2026-02-23 10:21:52 -08:00
generatedunixname915440834509264
3ba2030aa4 Fix CQS signal readability-braces-around-statements in fbcode/vision/fair
Reviewed By: bottler

Differential Revision: D94068738

fbshipit-source-id: cd47c67d4269ac7461acb73da6de9e4373da9d4c
2026-02-23 05:18:38 -08:00
generatedunixname1262449429094718
79a7fcf02b fbcode/vision/fair/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes_cpu.cpp
Reviewed By: bottler

Differential Revision: D94062914

fbshipit-source-id: 9147dc68d115ce5761ebb7d07c035ac4b664da0b
2026-02-23 05:10:19 -08:00
generatedunixname1417043136753450
e43ed8c76e fbcode/vision/fair/pytorch3d/pytorch3d/transforms/rotation_conversions.py
Reviewed By: bottler

Differential Revision: D93712828

fbshipit-source-id: 3465af450104bb1e5f491e3c0ee0259698cf8ceb
2026-02-22 07:53:20 -08:00
generatedunixname1417043136753450
49f43402c6 fbcode/vision/fair/pytorch3d/pytorch3d/renderer/mesh/textures.py
Reviewed By: bottler

Differential Revision: D93710616

fbshipit-source-id: 599fe7425066bc85c0999765168788f8df7e34ce
2026-02-22 07:13:45 -08:00
generatedunixname1417043136753450
90646d93ab fbcode/vision/fair/pytorch3d/pytorch3d/renderer/mesh/clip.py
Reviewed By: bottler

Differential Revision: D93715239

fbshipit-source-id: 7417015251fe96be72daf4894e946edd43bb9c46
2026-02-22 07:13:09 -08:00
generatedunixname1417043136753450
eabb511410 fbcode/vision/fair/pytorch3d/pytorch3d/loss/mesh_laplacian_smoothing.py
Reviewed By: bottler

Differential Revision: D93709347

fbshipit-source-id: 69710e6082a0785126a121e26f1d96a571360f1d
2026-02-22 07:08:02 -08:00
generatedunixname1417043136753450
e70188ebbc fbcode/vision/fair/pytorch3d/pytorch3d/transforms/transform3d.py
Reviewed By: bottler

Differential Revision: D93713606

fbshipit-source-id: a8aa52328a76d95d3985daec529cdce04ba12bd4
2026-02-22 07:06:34 -08:00
generatedunixname1417043136753450
1bd911d534 fbcode/vision/fair/pytorch3d/pytorch3d/renderer/cameras.py
Reviewed By: bottler

Differential Revision: D93712137

fbshipit-source-id: 3457f0f9fb7d7baa29be2eaf731074a49bdbb0c8
2026-02-22 07:05:45 -08:00
generatedunixname1417043136753450
3aadd19a2b fbcode/vision/fair/pytorch3d/pytorch3d/ops/laplacian_matrices.py
Reviewed By: bottler

Differential Revision: D93708383

fbshipit-source-id: 7576f0c9800ed3d28795e521be5c63799b7e6676
2026-02-22 06:57:57 -08:00
generatedunixname1417043136753450
42d66c1145 fbcode/vision/fair/pytorch3d/pytorch3d/loss/point_mesh_distance.py
Reviewed By: bottler

Differential Revision: D93708351

fbshipit-source-id: 06a877777e4cb72a497a44ff55db0b6222bda83b
2026-02-22 06:55:36 -08:00
generatedunixname1417043136753450
e9ed1cb178 fbcode/vision/fair/pytorch3d/pytorch3d/renderer/utils.py
Reviewed By: bottler

Differential Revision: D93708316

fbshipit-source-id: f8ae2432ad34116278b3f7f7de5146b89c3fe63e
2026-02-22 04:09:20 -08:00
Jeremy Reizenstein
cbcae096a0 Add atol=1e-4 to assertClose calls in test_inverse for Translate
Summary:
Added `atol=1e-4` tolerance parameter to the `assertClose` calls on lines 682 and 683 in the `test_inverse` method of `TestTranslate` class.

This is a retry of D90225548

Reviewed By: sgrigory

Differential Revision: D90682979

fbshipit-source-id: ac13f000174dd9962326296e1c3116d0d39c7751
2026-01-14 08:57:43 -08:00
generatedunixname537391475639613
5b1cce56bc Fix for T251460511 ("Your diff, D90498281, broke one test")
Reviewed By: sgrigory

Differential Revision: D90649493

fbshipit-source-id: 2a77c45ec8e6e5aa0a20437a765fbb9f0b566406
2026-01-14 08:53:26 -08:00
25 changed files with 133 additions and 126 deletions

View File

@@ -82,10 +82,12 @@ class _SymEig3x3(nn.Module):
q = inputs_trace / 3.0 q = inputs_trace / 3.0
# Calculate squared sum of elements outside the main diagonal / 2 # Calculate squared sum of elements outside the main diagonal / 2
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. p1 = (
p1 = ((inputs**2).sum(dim=(-1, -2)) - (inputs_diag**2).sum(-1)) / 2 torch.square(inputs).sum(dim=(-1, -2)) - torch.square(inputs_diag).sum(-1)
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. ) / 2
p2 = ((inputs_diag - q[..., None]) ** 2).sum(dim=-1) + 2.0 * p1.clamp(self._eps) p2 = torch.square(inputs_diag - q[..., None]).sum(dim=-1) + 2.0 * p1.clamp(
self._eps
)
p = torch.sqrt(p2 / 6.0) p = torch.sqrt(p2 / 6.0)
B = (inputs - q[..., None, None] * self._identity) / p[..., None, None] B = (inputs - q[..., None, None] * self._identity) / p[..., None, None]
@@ -104,7 +106,9 @@ class _SymEig3x3(nn.Module):
# Soft dispatch between the degenerate case (diagonal A) and general. # Soft dispatch between the degenerate case (diagonal A) and general.
# diag_soft_cond -> 1.0 when p1 < 6 * eps and diag_soft_cond -> 0.0 otherwise. # diag_soft_cond -> 1.0 when p1 < 6 * eps and diag_soft_cond -> 0.0 otherwise.
# We use 6 * eps to take into account the error accumulated during the p1 summation # We use 6 * eps to take into account the error accumulated during the p1 summation
diag_soft_cond = torch.exp(-((p1 / (6 * self._eps)) ** 2)).detach()[..., None] diag_soft_cond = torch.exp(-torch.square(p1 / (6 * self._eps))).detach()[
..., None
]
# Eigenvalues are the ordered elements of main diagonal in the degenerate case # Eigenvalues are the ordered elements of main diagonal in the degenerate case
diag_eigenvals, _ = torch.sort(inputs_diag, dim=-1) diag_eigenvals, _ = torch.sort(inputs_diag, dim=-1)
@@ -199,8 +203,7 @@ class _SymEig3x3(nn.Module):
cross_products[..., :1, :] cross_products[..., :1, :]
) )
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. norms_sq = torch.square(cross_products).sum(dim=-1)
norms_sq = (cross_products**2).sum(dim=-1)
max_norms_index = norms_sq.argmax(dim=-1) max_norms_index = norms_sq.argmax(dim=-1)
# Pick only the cross-product with highest squared norm for each input # Pick only the cross-product with highest squared norm for each input

View File

@@ -18,68 +18,89 @@ namespace Renderer {
template <bool DEV> template <bool DEV>
HOST void destruct(Renderer* self) { HOST void destruct(Renderer* self) {
if (self->result_d != NULL) if (self->result_d != NULL) {
FREE(self->result_d); FREE(self->result_d);
}
self->result_d = NULL; self->result_d = NULL;
if (self->min_depth_d != NULL) if (self->min_depth_d != NULL) {
FREE(self->min_depth_d); FREE(self->min_depth_d);
}
self->min_depth_d = NULL; self->min_depth_d = NULL;
if (self->min_depth_sorted_d != NULL) if (self->min_depth_sorted_d != NULL) {
FREE(self->min_depth_sorted_d); FREE(self->min_depth_sorted_d);
}
self->min_depth_sorted_d = NULL; self->min_depth_sorted_d = NULL;
if (self->ii_d != NULL) if (self->ii_d != NULL) {
FREE(self->ii_d); FREE(self->ii_d);
}
self->ii_d = NULL; self->ii_d = NULL;
if (self->ii_sorted_d != NULL) if (self->ii_sorted_d != NULL) {
FREE(self->ii_sorted_d); FREE(self->ii_sorted_d);
}
self->ii_sorted_d = NULL; self->ii_sorted_d = NULL;
if (self->ids_d != NULL) if (self->ids_d != NULL) {
FREE(self->ids_d); FREE(self->ids_d);
}
self->ids_d = NULL; self->ids_d = NULL;
if (self->ids_sorted_d != NULL) if (self->ids_sorted_d != NULL) {
FREE(self->ids_sorted_d); FREE(self->ids_sorted_d);
}
self->ids_sorted_d = NULL; self->ids_sorted_d = NULL;
if (self->workspace_d != NULL) if (self->workspace_d != NULL) {
FREE(self->workspace_d); FREE(self->workspace_d);
}
self->workspace_d = NULL; self->workspace_d = NULL;
if (self->di_d != NULL) if (self->di_d != NULL) {
FREE(self->di_d); FREE(self->di_d);
}
self->di_d = NULL; self->di_d = NULL;
if (self->di_sorted_d != NULL) if (self->di_sorted_d != NULL) {
FREE(self->di_sorted_d); FREE(self->di_sorted_d);
}
self->di_sorted_d = NULL; self->di_sorted_d = NULL;
if (self->region_flags_d != NULL) if (self->region_flags_d != NULL) {
FREE(self->region_flags_d); FREE(self->region_flags_d);
}
self->region_flags_d = NULL; self->region_flags_d = NULL;
if (self->num_selected_d != NULL) if (self->num_selected_d != NULL) {
FREE(self->num_selected_d); FREE(self->num_selected_d);
}
self->num_selected_d = NULL; self->num_selected_d = NULL;
if (self->forw_info_d != NULL) if (self->forw_info_d != NULL) {
FREE(self->forw_info_d); FREE(self->forw_info_d);
}
self->forw_info_d = NULL; self->forw_info_d = NULL;
if (self->min_max_pixels_d != NULL) if (self->min_max_pixels_d != NULL) {
FREE(self->min_max_pixels_d); FREE(self->min_max_pixels_d);
}
self->min_max_pixels_d = NULL; self->min_max_pixels_d = NULL;
if (self->grad_pos_d != NULL) if (self->grad_pos_d != NULL) {
FREE(self->grad_pos_d); FREE(self->grad_pos_d);
}
self->grad_pos_d = NULL; self->grad_pos_d = NULL;
if (self->grad_col_d != NULL) if (self->grad_col_d != NULL) {
FREE(self->grad_col_d); FREE(self->grad_col_d);
}
self->grad_col_d = NULL; self->grad_col_d = NULL;
if (self->grad_rad_d != NULL) if (self->grad_rad_d != NULL) {
FREE(self->grad_rad_d); FREE(self->grad_rad_d);
}
self->grad_rad_d = NULL; self->grad_rad_d = NULL;
if (self->grad_cam_d != NULL) if (self->grad_cam_d != NULL) {
FREE(self->grad_cam_d); FREE(self->grad_cam_d);
}
self->grad_cam_d = NULL; self->grad_cam_d = NULL;
if (self->grad_cam_buf_d != NULL) if (self->grad_cam_buf_d != NULL) {
FREE(self->grad_cam_buf_d); FREE(self->grad_cam_buf_d);
}
self->grad_cam_buf_d = NULL; self->grad_cam_buf_d = NULL;
if (self->grad_opy_d != NULL) if (self->grad_opy_d != NULL) {
FREE(self->grad_opy_d); FREE(self->grad_opy_d);
}
self->grad_opy_d = NULL; self->grad_opy_d = NULL;
if (self->n_grad_contributions_d != NULL) if (self->n_grad_contributions_d != NULL) {
FREE(self->n_grad_contributions_d); FREE(self->n_grad_contributions_d);
}
self->n_grad_contributions_d = NULL; self->n_grad_contributions_d = NULL;
} }

View File

@@ -64,8 +64,9 @@ GLOBAL void norm_sphere_gradients(Renderer renderer, const int num_balls) {
// The sphere only contributes to the camera gradients if it is // The sphere only contributes to the camera gradients if it is
// large enough in screen space. // large enough in screen space.
if (renderer.ids_sorted_d[idx] > 0 && ii.max.x >= ii.min.x + 3 && if (renderer.ids_sorted_d[idx] > 0 && ii.max.x >= ii.min.x + 3 &&
ii.max.y >= ii.min.y + 3) ii.max.y >= ii.min.y + 3) {
renderer.ids_sorted_d[idx] = 1; renderer.ids_sorted_d[idx] = 1;
}
END_PARALLEL_NORET(); END_PARALLEL_NORET();
}; };

View File

@@ -139,8 +139,9 @@ GLOBAL void render(
coord_y < cam_norm.film_border_top + cam_norm.film_height) { coord_y < cam_norm.film_border_top + cam_norm.film_height) {
// Initialize the result. // Initialize the result.
if (mode == 0u) { if (mode == 0u) {
for (uint c_id = 0; c_id < cam_norm.n_channels; ++c_id) for (uint c_id = 0; c_id < cam_norm.n_channels; ++c_id) {
result[c_id] = bg_col[c_id]; result[c_id] = bg_col[c_id];
}
} else { } else {
result[0] = 0.f; result[0] = 0.f;
} }
@@ -190,20 +191,22 @@ GLOBAL void render(
"render|found intersection with sphere %u.\n", "render|found intersection with sphere %u.\n",
sphere_id_l[write_idx]); sphere_id_l[write_idx]);
} }
if (ii.min.x == MAX_USHORT) if (ii.min.x == MAX_USHORT) {
// This is an invalid sphere (out of image). These spheres have // This is an invalid sphere (out of image). These spheres have
// maximum depth. Since we ordered the spheres by earliest possible // maximum depth. Since we ordered the spheres by earliest possible
// intersection depth we re certain that there will no other sphere // intersection depth we re certain that there will no other sphere
// that is relevant after this one. // that is relevant after this one.
loading_done = true; loading_done = true;
}
} }
// Reset n_pixels_done. // Reset n_pixels_done.
n_pixels_done = 0; n_pixels_done = 0;
thread_block.sync(); // Make sure n_loaded is updated. thread_block.sync(); // Make sure n_loaded is updated.
if (n_loaded > RENDER_BUFFER_LOAD_THRESH) { if (n_loaded > RENDER_BUFFER_LOAD_THRESH) {
// The load buffer is full enough. Draw. // The load buffer is full enough. Draw.
if (thread_block.thread_rank() == 0) if (thread_block.thread_rank() == 0) {
n_balls_loaded += n_loaded; n_balls_loaded += n_loaded;
}
max_closest_possible_intersection = 0.f; max_closest_possible_intersection = 0.f;
// This excludes threads outside of the image boundary. Also, it reduces // This excludes threads outside of the image boundary. Also, it reduces
// block artifacts. // block artifacts.
@@ -290,8 +293,9 @@ GLOBAL void render(
uint warp_done = thread_warp.ballot(done); uint warp_done = thread_warp.ballot(done);
int warp_done_bit_cnt = POPC(warp_done); int warp_done_bit_cnt = POPC(warp_done);
#endif //__CUDACC__ && __HIP_PLATFORM_AMD__ #endif //__CUDACC__ && __HIP_PLATFORM_AMD__
if (thread_warp.thread_rank() == 0) if (thread_warp.thread_rank() == 0) {
ATOMICADD_B(&n_pixels_done, warp_done_bit_cnt); ATOMICADD_B(&n_pixels_done, warp_done_bit_cnt);
}
// This sync is necessary to keep n_loaded until all threads are done with // This sync is necessary to keep n_loaded until all threads are done with
// painting. // painting.
thread_block.sync(); thread_block.sync();
@@ -299,8 +303,9 @@ GLOBAL void render(
} }
thread_block.sync(); thread_block.sync();
} }
if (thread_block.thread_rank() == 0) if (thread_block.thread_rank() == 0) {
n_balls_loaded += n_loaded; n_balls_loaded += n_loaded;
}
PULSAR_LOG_DEV_PIX( PULSAR_LOG_DEV_PIX(
PULSAR_LOG_RENDER_PIX, PULSAR_LOG_RENDER_PIX,
"render|loaded %d balls in total.\n", "render|loaded %d balls in total.\n",
@@ -386,8 +391,9 @@ GLOBAL void render(
static_cast<float>(tracker.get_n_hits()); static_cast<float>(tracker.get_n_hits());
} else { } else {
float sm_d_normfac = FRCP(FMAX(sm_d, FEPS)); float sm_d_normfac = FRCP(FMAX(sm_d, FEPS));
for (uint c_id = 0; c_id < cam_norm.n_channels; ++c_id) for (uint c_id = 0; c_id < cam_norm.n_channels; ++c_id) {
result[c_id] *= sm_d_normfac; result[c_id] *= sm_d_normfac;
}
int write_loc = (coord_y - cam_norm.film_border_top) * cam_norm.film_width * int write_loc = (coord_y - cam_norm.film_border_top) * cam_norm.film_width *
(3 + 2 * n_track) + (3 + 2 * n_track) +
(coord_x - cam_norm.film_border_left) * (3 + 2 * n_track); (coord_x - cam_norm.film_border_left) * (3 + 2 * n_track);

View File

@@ -860,8 +860,9 @@ std::tuple<torch::Tensor, torch::Tensor> Renderer::forward(
? (cudaStream_t) nullptr ? (cudaStream_t) nullptr
#endif #endif
: (cudaStream_t) nullptr); : (cudaStream_t) nullptr);
if (mode == 1) if (mode == 1) {
results[batch_i] = results[batch_i].slice(2, 0, 1, 1); results[batch_i] = results[batch_i].slice(2, 0, 1, 1);
}
forw_infos[batch_i] = from_blob( forw_infos[batch_i] = from_blob(
this->renderer_vec[batch_i].forw_info_d, this->renderer_vec[batch_i].forw_info_d,
{this->renderer_vec[0].cam.film_height, {this->renderer_vec[0].cam.film_height,

View File

@@ -128,8 +128,9 @@ struct Renderer {
stream << "pulsar::Renderer["; stream << "pulsar::Renderer[";
// Device info. // Device info.
stream << self.device_type; stream << self.device_type;
if (self.device_index != -1) if (self.device_index != -1) {
stream << ", ID " << self.device_index; stream << ", ID " << self.device_index;
}
stream << "]"; stream << "]";
return stream; return stream;
} }

View File

@@ -106,6 +106,8 @@ auto ComputeFaceAreas(const torch::Tensor& face_verts) {
return face_areas; return face_areas;
} }
namespace {
// Helper function to use with std::find_if to find the index of any // Helper function to use with std::find_if to find the index of any
// values in the top k struct which match a given idx. // values in the top k struct which match a given idx.
struct IsNeighbor { struct IsNeighbor {
@@ -118,7 +120,6 @@ struct IsNeighbor {
int neighbor_idx; int neighbor_idx;
}; };
namespace {
void RasterizeMeshesNaiveCpu_worker( void RasterizeMeshesNaiveCpu_worker(
const int start_yi, const int start_yi,
const int end_yi, const int end_yi,

View File

@@ -19,7 +19,7 @@ template <
std::is_same<T, double>::value || std::is_same<T, float>::value>> std::is_same<T, double>::value || std::is_same<T, float>::value>>
struct vec2 { struct vec2 {
T x, y; T x, y;
typedef T scalar_t; using scalar_t = T;
vec2(T x, T y) : x(x), y(y) {} vec2(T x, T y) : x(x), y(y) {}
}; };

View File

@@ -18,7 +18,7 @@ template <
std::is_same<T, double>::value || std::is_same<T, float>::value>> std::is_same<T, double>::value || std::is_same<T, float>::value>>
struct vec3 { struct vec3 {
T x, y, z; T x, y, z;
typedef T scalar_t; using scalar_t = T;
vec3(T x, T y, T z) : x(x), y(y), z(z) {} vec3(T x, T y, T z) : x(x), y(y), z(z) {}
}; };

View File

@@ -483,9 +483,10 @@ class SqlIndexDataset(DatasetBase, ReplaceableBase):
*self._get_pick_filters(), *self._get_pick_filters(),
*self._get_exclude_filters(), *self._get_exclude_filters(),
] ]
if self.pick_sequences_sql_clause: if pick_sequences_sql_clause := self.pick_sequences_sql_clause:
print("Applying the custom SQL clause.") print("Applying the custom SQL clause.")
where_conditions.append(sa.text(self.pick_sequences_sql_clause)) # pyre-ignore[6]: TextClause is compatible with where conditions
where_conditions.append(sa.text(pick_sequences_sql_clause))
def add_where(stmt): def add_where(stmt):
return stmt.where(*where_conditions) if where_conditions else stmt return stmt.where(*where_conditions) if where_conditions else stmt
@@ -505,6 +506,7 @@ class SqlIndexDataset(DatasetBase, ReplaceableBase):
subquery = add_where(subquery).subquery() subquery = add_where(subquery).subquery()
stmt = sa.select(subquery.c.sequence_name).where( stmt = sa.select(subquery.c.sequence_name).where(
# pyre-ignore[6]: SQLAlchemy column comparison returns ColumnElement, not bool
subquery.c.row_number <= self.limit_sequences_per_category_to subquery.c.row_number <= self.limit_sequences_per_category_to
) )
@@ -633,9 +635,10 @@ class SqlIndexDataset(DatasetBase, ReplaceableBase):
) )
) )
if self.pick_frames_sql_clause: if pick_frames_sql_clause := self.pick_frames_sql_clause:
logger.info("Applying the custom SQL clause.") logger.info("Applying the custom SQL clause.")
pick_frames_criteria.append(sa.text(self.pick_frames_sql_clause)) # pyre-ignore[6]: TextClause is compatible with where conditions
pick_frames_criteria.append(sa.text(pick_frames_sql_clause))
if pick_frames_criteria: if pick_frames_criteria:
index = self._pick_frames_by_criteria(index, pick_frames_criteria) index = self._pick_frames_by_criteria(index, pick_frames_criteria)
@@ -698,9 +701,10 @@ class SqlIndexDataset(DatasetBase, ReplaceableBase):
) )
) )
if self.pick_frames_sql_clause: if pick_frames_sql_clause := self.pick_frames_sql_clause:
logger.info(" applying custom SQL clause") logger.info(" applying custom SQL clause")
where_conditions.append(sa.text(self.pick_frames_sql_clause)) # pyre-ignore[6]: TextClause is compatible with where conditions
where_conditions.append(sa.text(pick_frames_sql_clause))
if where_conditions: if where_conditions:
stmt = stmt.where(*where_conditions) stmt = stmt.where(*where_conditions)

View File

@@ -114,9 +114,7 @@ def mesh_laplacian_smoothing(meshes, method: str = "uniform"):
if method == "cot": if method == "cot":
norm_w = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1) norm_w = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
idx = norm_w > 0 idx = norm_w > 0
# pyre-fixme[58]: `/` is not supported for operand types `float` and norm_w[idx] = torch.reciprocal(norm_w[idx])
# `Tensor`.
norm_w[idx] = 1.0 / norm_w[idx]
else: else:
L_sum = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1) L_sum = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
norm_w = 0.25 * inv_areas norm_w = 0.25 * inv_areas

View File

@@ -6,6 +6,7 @@
# pyre-unsafe # pyre-unsafe
import torch
from pytorch3d import _C from pytorch3d import _C
from pytorch3d.structures import Meshes, Pointclouds from pytorch3d.structures import Meshes, Pointclouds
from torch.autograd import Function from torch.autograd import Function
@@ -302,8 +303,7 @@ def point_mesh_edge_distance(meshes: Meshes, pcls: Pointclouds):
point_to_cloud_idx = pcls.packed_to_cloud_idx() # (sum(P_i), ) point_to_cloud_idx = pcls.packed_to_cloud_idx() # (sum(P_i), )
num_points_per_cloud = pcls.num_points_per_cloud() # (N,) num_points_per_cloud = pcls.num_points_per_cloud() # (N,)
weights_p = num_points_per_cloud.gather(0, point_to_cloud_idx) weights_p = num_points_per_cloud.gather(0, point_to_cloud_idx)
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. weights_p = torch.reciprocal(weights_p.float())
weights_p = 1.0 / weights_p.float()
point_to_edge = point_to_edge * weights_p point_to_edge = point_to_edge * weights_p
point_dist = point_to_edge.sum() / N point_dist = point_to_edge.sum() / N
@@ -377,8 +377,7 @@ def point_mesh_face_distance(
point_to_cloud_idx = pcls.packed_to_cloud_idx() # (sum(P_i),) point_to_cloud_idx = pcls.packed_to_cloud_idx() # (sum(P_i),)
num_points_per_cloud = pcls.num_points_per_cloud() # (N,) num_points_per_cloud = pcls.num_points_per_cloud() # (N,)
weights_p = num_points_per_cloud.gather(0, point_to_cloud_idx) weights_p = num_points_per_cloud.gather(0, point_to_cloud_idx)
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. weights_p = torch.reciprocal(weights_p.float())
weights_p = 1.0 / weights_p.float()
point_to_face = point_to_face * weights_p point_to_face = point_to_face * weights_p
point_dist = point_to_face.sum() / N point_dist = point_to_face.sum() / N

View File

@@ -223,8 +223,7 @@ def _align_camera_extrinsics(
# of centered A and centered B # of centered A and centered B
Ac = A - Amu Ac = A - Amu
Bc = B - Bmu Bc = B - Bmu
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. align_t_s = (Ac * Bc).mean() / torch.square(Ac).mean().clamp(eps)
align_t_s = (Ac * Bc).mean() / (Ac**2).mean().clamp(eps)
else: else:
# set the scale to identity # set the scale to identity
align_t_s = 1.0 align_t_s = 1.0

View File

@@ -55,11 +55,9 @@ def laplacian(verts: torch.Tensor, edges: torch.Tensor) -> torch.Tensor:
# We construct the Laplacian matrix by adding the non diagonal values # We construct the Laplacian matrix by adding the non diagonal values
# i.e. L[i, j] = 1 ./ deg(i) if (i, j) is an edge # i.e. L[i, j] = 1 ./ deg(i) if (i, j) is an edge
deg0 = deg[e0] deg0 = deg[e0]
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. deg0 = torch.where(deg0 > 0.0, torch.reciprocal(deg0), deg0)
deg0 = torch.where(deg0 > 0.0, 1.0 / deg0, deg0)
deg1 = deg[e1] deg1 = deg[e1]
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. deg1 = torch.where(deg1 > 0.0, torch.reciprocal(deg1), deg1)
deg1 = torch.where(deg1 > 0.0, 1.0 / deg1, deg1)
val = torch.cat([deg0, deg1]) val = torch.cat([deg0, deg1])
L = torch.sparse_coo_tensor(idx, val, (V, V), dtype=torch.float32) L = torch.sparse_coo_tensor(idx, val, (V, V), dtype=torch.float32)
@@ -137,8 +135,7 @@ def cot_laplacian(
val = torch.stack([area] * 3, dim=1).view(-1) val = torch.stack([area] * 3, dim=1).view(-1)
inv_areas.scatter_add_(0, idx, val) inv_areas.scatter_add_(0, idx, val)
idx = inv_areas > 0 idx = inv_areas > 0
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. inv_areas[idx] = torch.reciprocal(inv_areas[idx])
inv_areas[idx] = 1.0 / inv_areas[idx]
inv_areas = inv_areas.view(-1, 1) inv_areas = inv_areas.view(-1, 1)
return L, inv_areas return L, inv_areas

View File

@@ -182,8 +182,7 @@ def iterative_closest_point(
t_history.append(SimilarityTransform(R, T, s)) t_history.append(SimilarityTransform(R, T, s))
# compute the root mean squared error # compute the root mean squared error
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. Xt_sq_diff = torch.square(Xt - Xt_nn_points).sum(2)
Xt_sq_diff = ((Xt - Xt_nn_points) ** 2).sum(2)
rmse = oputil.wmean(Xt_sq_diff[:, :, None], mask_X).sqrt()[:, 0, 0] rmse = oputil.wmean(Xt_sq_diff[:, :, None], mask_X).sqrt()[:, 0, 0]
# compute the relative rmse # compute the relative rmse

View File

@@ -179,9 +179,7 @@ def sample_farthest_points_naive(
# and all the other points. If a point has already been selected # and all the other points. If a point has already been selected
# it's distance will be 0.0 so it will not be selected again as the max. # it's distance will be 0.0 so it will not be selected again as the max.
dist = points[n, selected_idx, :] - points[n, : lengths[n], :] dist = points[n, selected_idx, :] - points[n, : lengths[n], :]
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and dist_to_last_selected = torch.square(dist).sum(-1) # (P - i)
# `int`.
dist_to_last_selected = (dist**2).sum(-1) # (P - i)
# If closer than currently saved distance to one of the selected # If closer than currently saved distance to one of the selected
# points, then updated closest_dists # points, then updated closest_dists

View File

@@ -629,10 +629,8 @@ class FoVPerspectiveCameras(CamerasBase):
# so the so the z sign is 1.0. # so the so the z sign is 1.0.
z_sign = 1.0 z_sign = 1.0
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. K[:, 0, 0] = torch.div(2.0 * znear, max_x - min_x)
K[:, 0, 0] = 2.0 * znear / (max_x - min_x) K[:, 1, 1] = torch.div(2.0 * znear, max_y - min_y)
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`.
K[:, 1, 1] = 2.0 * znear / (max_y - min_y)
K[:, 0, 2] = (max_x + min_x) / (max_x - min_x) K[:, 0, 2] = (max_x + min_x) / (max_x - min_x)
K[:, 1, 2] = (max_y + min_y) / (max_y - min_y) K[:, 1, 2] = (max_y + min_y) / (max_y - min_y)
K[:, 3, 2] = z_sign * ones K[:, 3, 2] = z_sign * ones
@@ -1178,9 +1176,7 @@ class PerspectiveCameras(CamerasBase):
xy_inv_depth = torch.cat( xy_inv_depth = torch.cat(
# pyre-fixme[6]: For 1st argument expected `Union[List[Tensor], # pyre-fixme[6]: For 1st argument expected `Union[List[Tensor],
# tuple[Tensor, ...]]` but got `Tuple[Tensor, float]`. # tuple[Tensor, ...]]` but got `Tuple[Tensor, float]`.
# pyre-fixme[58]: `/` is not supported for operand types `float` and (xy_depth[..., :2], torch.reciprocal(xy_depth[..., 2:3])),
# `Tensor`.
(xy_depth[..., :2], 1.0 / xy_depth[..., 2:3]),
dim=-1, # type: ignore dim=-1, # type: ignore
) )
return unprojection_transform.transform_points(xy_inv_depth) return unprojection_transform.transform_points(xy_inv_depth)

View File

@@ -434,13 +434,7 @@ def clip_faces(
# These will then be filled in for each case. # These will then be filled in for each case.
########################################### ###########################################
F_clipped = ( F_clipped = (
F F + int(faces_delta_cum[-1].item()) + int(faces_delta[-1].item())
# pyre-fixme[58]: `+` is not supported for operand types `int` and
# `Union[bool, float, int]`.
+ faces_delta_cum[-1].item()
# pyre-fixme[58]: `+` is not supported for operand types `int` and
# `Union[bool, float, int]`.
+ faces_delta[-1].item()
) # Total number of faces in the new Meshes ) # Total number of faces in the new Meshes
face_verts_clipped = torch.zeros( face_verts_clipped = torch.zeros(
(F_clipped, 3, 3), dtype=face_verts_unclipped.dtype, device=device (F_clipped, 3, 3), dtype=face_verts_unclipped.dtype, device=device

View File

@@ -71,9 +71,7 @@ def _list_to_padded_wrapper(
# pyre-fixme[6]: For 2nd param expected `int` but got `Union[bool, float, int]`. # pyre-fixme[6]: For 2nd param expected `int` but got `Union[bool, float, int]`.
x_reshaped.append(y.reshape(-1, D)) x_reshaped.append(y.reshape(-1, D))
x_padded = list_to_padded(x_reshaped, pad_size=pad_size, pad_value=pad_value) x_padded = list_to_padded(x_reshaped, pad_size=pad_size, pad_value=pad_value)
# pyre-fixme[58]: `+` is not supported for operand types `Tuple[int, int]` and return x_padded.reshape((N, -1) + tuple(reshape_dims))
# `Size`.
return x_padded.reshape((N, -1) + reshape_dims)
def _padded_to_list_wrapper( def _padded_to_list_wrapper(
@@ -104,9 +102,7 @@ def _padded_to_list_wrapper(
# pyre-fixme[6]: For 3rd param expected `int` but got `Union[bool, float, int]`. # pyre-fixme[6]: For 3rd param expected `int` but got `Union[bool, float, int]`.
x_reshaped = x.reshape(N, M, D) x_reshaped = x.reshape(N, M, D)
x_list = padded_to_list(x_reshaped, split_size=split_size) x_list = padded_to_list(x_reshaped, split_size=split_size)
# pyre-fixme[58]: `+` is not supported for operand types `Tuple[typing.Any]` and x_list = [xl.reshape((xl.shape[0],) + tuple(reshape_dims)) for xl in x_list]
# `Size`.
x_list = [xl.reshape((xl.shape[0],) + reshape_dims) for xl in x_list]
return x_list return x_list

View File

@@ -132,15 +132,13 @@ def _get_splat_kernel_normalization(
epsilon = 0.05 epsilon = 0.05
normalization_constant = torch.exp( normalization_constant = torch.exp(
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. -torch.square(offsets).sum(dim=1) / (2 * sigma**2)
-(offsets**2).sum(dim=1) / (2 * sigma**2)
).sum() ).sum()
# We add an epsilon to the normalization constant to ensure the gradient will travel # We add an epsilon to the normalization constant to ensure the gradient will travel
# through non-boundary pixels' normalization factor, see Sec. 3.3.1 in "Differentia- # through non-boundary pixels' normalization factor, see Sec. 3.3.1 in "Differentia-
# ble Surface Rendering via Non-Differentiable Sampling", Cole et al. # ble Surface Rendering via Non-Differentiable Sampling", Cole et al.
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. return torch.div(1 + epsilon, normalization_constant)
return (1 + epsilon) / normalization_constant
def _compute_occlusion_layers( def _compute_occlusion_layers(
@@ -264,8 +262,9 @@ def _compute_splatting_colors_and_weights(
torch.floor(pixel_coords_screen[..., :2]) - pixel_coords_screen[..., :2] + 0.5 torch.floor(pixel_coords_screen[..., :2]) - pixel_coords_screen[..., :2] + 0.5
).view((N, H, W, K, 1, 2)) ).view((N, H, W, K, 1, 2))
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. dist2_p_q = torch.sum(
dist2_p_q = torch.sum((q_to_px_center + offsets) ** 2, dim=5) # (N, H, W, K, 9) torch.square(q_to_px_center + offsets), dim=5
) # (N, H, W, K, 9)
splat_weights = torch.exp(-dist2_p_q / (2 * sigma**2)) splat_weights = torch.exp(-dist2_p_q / (2 * sigma**2))
alpha = colors[..., 3:4] alpha = colors[..., 3:4]
splat_weights = (alpha * splat_kernel_normalization * splat_weights).unsqueeze( splat_weights = (alpha * splat_kernel_normalization * splat_weights).unsqueeze(
@@ -417,12 +416,12 @@ def _normalize_and_compose_all_layers(
device = splatted_colors_per_occlusion_layer.device device = splatted_colors_per_occlusion_layer.device
# Normalize each of bg/surface/fg splat layers separately. # Normalize each of bg/surface/fg splat layers separately.
normalization_scales = 1.0 / ( normalization_scales = torch.div(
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. 1.0,
torch.maximum( torch.maximum(
splatted_weights_per_occlusion_layer, splatted_weights_per_occlusion_layer,
torch.tensor([1.0], device=device), torch.tensor([1.0], device=device),
) ),
) # (N, H, W, 1, 3) ) # (N, H, W, 1, 3)
normalized_splatted_colors = ( normalized_splatted_colors = (

View File

@@ -269,9 +269,7 @@ class TensorProperties(nn.Module):
# to have the same shape as the input tensor. # to have the same shape as the input tensor.
new_dims = len(tensor_dims) - len(idx_dims) new_dims = len(tensor_dims) - len(idx_dims)
new_shape = idx_dims + (1,) * new_dims new_shape = idx_dims + (1,) * new_dims
# pyre-fixme[58]: `+` is not supported for operand types expand_dims = (-1,) + tuple(tensor_dims[1:])
# `Tuple[int]` and `torch._C.Size`
expand_dims = (-1,) + tensor_dims[1:]
_batch_idx = _batch_idx.view(*new_shape) _batch_idx = _batch_idx.view(*new_shape)
_batch_idx = _batch_idx.expand(*expand_dims) _batch_idx = _batch_idx.expand(*expand_dims)

View File

@@ -52,8 +52,7 @@ def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor:
Rotation matrices as tensor of shape (..., 3, 3). Rotation matrices as tensor of shape (..., 3, 3).
""" """
r, i, j, k = torch.unbind(quaternions, -1) r, i, j, k = torch.unbind(quaternions, -1)
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. two_s = torch.div(2.0, (quaternions * quaternions).sum(-1))
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack( o = torch.stack(
( (
@@ -137,18 +136,18 @@ def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:
# we produce the desired quaternion multiplied by each of r, i, j, k # we produce the desired quaternion multiplied by each of r, i, j, k
quat_by_rijk = torch.stack( quat_by_rijk = torch.stack(
[ [
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and torch.stack(
# `int`. [torch.square(q_abs[..., 0]), m21 - m12, m02 - m20, m10 - m01], dim=-1
torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1), ),
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and torch.stack(
# `int`. [m21 - m12, torch.square(q_abs[..., 1]), m10 + m01, m02 + m20], dim=-1
torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1), ),
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and torch.stack(
# `int`. [m02 - m20, m10 + m01, torch.square(q_abs[..., 2]), m12 + m21], dim=-1
torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1), ),
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and torch.stack(
# `int`. [m10 - m01, m20 + m02, m21 + m12, torch.square(q_abs[..., 3])], dim=-1
torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1), ),
], ],
dim=-2, dim=-2,
) )

View File

@@ -195,15 +195,15 @@ def _se3_V_matrix(
V = ( V = (
torch.eye(3, dtype=log_rotation.dtype, device=log_rotation.device)[None] torch.eye(3, dtype=log_rotation.dtype, device=log_rotation.device)[None]
+ log_rotation_hat + log_rotation_hat
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. * ((1 - torch.cos(rotation_angles)) / torch.square(rotation_angles))[
* ((1 - torch.cos(rotation_angles)) / (rotation_angles**2))[:, None, None] :, None, None
]
+ ( + (
log_rotation_hat_square log_rotation_hat_square
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and * (
# `int`. (rotation_angles - torch.sin(rotation_angles))
* ((rotation_angles - torch.sin(rotation_angles)) / (rotation_angles**3))[ / torch.pow(rotation_angles, 3)
:, None, None )[:, None, None]
]
) )
) )
@@ -215,8 +215,7 @@ def _get_se3_V_input(log_rotation: torch.Tensor, eps: float = 1e-4):
A helper function that computes the input variables to the `_se3_V_matrix` A helper function that computes the input variables to the `_se3_V_matrix`
function. function.
""" """
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. nrms = torch.square(log_rotation).sum(-1)
nrms = (log_rotation**2).sum(-1)
rotation_angles = torch.clamp(nrms, eps).sqrt() rotation_angles = torch.clamp(nrms, eps).sqrt()
log_rotation_hat = hat(log_rotation) log_rotation_hat = hat(log_rotation)
log_rotation_hat_square = torch.bmm(log_rotation_hat, log_rotation_hat) log_rotation_hat_square = torch.bmm(log_rotation_hat, log_rotation_hat)

View File

@@ -623,9 +623,7 @@ class Scale(Transform3d):
Return the inverse of self._matrix. Return the inverse of self._matrix.
""" """
xyz = torch.stack([self._matrix[:, i, i] for i in range(4)], dim=1) xyz = torch.stack([self._matrix[:, i, i] for i in range(4)], dim=1)
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. ixyz = torch.reciprocal(xyz)
ixyz = 1.0 / xyz
# pyre-fixme[6]: For 1st param expected `Tensor` but got `float`.
imat = torch.diag_embed(ixyz, dim1=1, dim2=2) imat = torch.diag_embed(ixyz, dim1=1, dim2=2)
return imat return imat

View File

@@ -679,8 +679,8 @@ class TestTranslate(TestCaseMixin, unittest.TestCase):
im = t.inverse()._matrix im = t.inverse()._matrix
im_2 = t._matrix.inverse() im_2 = t._matrix.inverse()
im_comp = t.get_matrix().inverse() im_comp = t.get_matrix().inverse()
self.assertClose(im, im_comp) self.assertClose(im, im_comp, atol=1e-4)
self.assertClose(im, im_2) self.assertClose(im, im_2, atol=1e-4)
def test_get_item(self, batch_size=5): def test_get_item(self, batch_size=5):
device = torch.device("cuda:0") device = torch.device("cuda:0")