mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-08-02 03:42:50 +08:00
test & compilation fixes
Summary:
Fixes mostly related to the "main" build on circleci.
-Avoid error to do with tuple copy from initializer_list which is `explicit` on old compiler.
-Add better reporting to copyright test.
-Move to PackedTensorAccessor64 from the deprecated PackedTensorAccessor
-Avoid some warnings about mismatched comparisons.
The "main" build is the only one that runs the test_build stuff. In that area
-Fix my bad copyright fix D26275931 (3463f418b8
) / 965c9c
-Add test that all tutorials are valid json.
Reviewed By: nikhilaravi
Differential Revision: D26366466
fbshipit-source-id: c4ab8b7e6647987069f7cb7144aa6ab7c24bcdac
This commit is contained in:
parent
e13e63a811
commit
5ac2f42184
@ -1 +1 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
@ -9,9 +9,9 @@
|
||||
template <typename scalar_t>
|
||||
__global__ void SigmoidAlphaBlendForwardKernel(
|
||||
// clang-format off
|
||||
const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> distances, // (N, H, W, K)
|
||||
const torch::PackedTensorAccessor<int64_t, 4, torch::RestrictPtrTraits, size_t> pix_to_face, // (N, H, W, K)
|
||||
torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> alphas, // (N, H, W)
|
||||
const torch::PackedTensorAccessor64<scalar_t, 4, torch::RestrictPtrTraits> distances, // (N, H, W, K)
|
||||
const torch::PackedTensorAccessor64<int64_t, 4, torch::RestrictPtrTraits> pix_to_face, // (N, H, W, K)
|
||||
torch::PackedTensorAccessor64<scalar_t, 3, torch::RestrictPtrTraits> alphas, // (N, H, W)
|
||||
// clang-format on
|
||||
const scalar_t sigma,
|
||||
const int N,
|
||||
@ -93,9 +93,9 @@ torch::Tensor SigmoidAlphaBlendForwardCuda(
|
||||
distances.scalar_type(), "sigmoid_alpha_blend_kernel", ([&] {
|
||||
// clang-format off
|
||||
SigmoidAlphaBlendForwardKernel<scalar_t><<<blocks, threads, 0, stream>>>(
|
||||
distances.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(),
|
||||
pix_to_face.packed_accessor<int64_t, 4, torch::RestrictPtrTraits, size_t>(),
|
||||
alphas.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(),
|
||||
distances.packed_accessor64<scalar_t, 4, torch::RestrictPtrTraits>(),
|
||||
pix_to_face.packed_accessor64<int64_t, 4, torch::RestrictPtrTraits>(),
|
||||
alphas.packed_accessor64<scalar_t, 3, torch::RestrictPtrTraits>(),
|
||||
sigma,
|
||||
N,
|
||||
H,
|
||||
@ -111,11 +111,11 @@ torch::Tensor SigmoidAlphaBlendForwardCuda(
|
||||
template <typename scalar_t>
|
||||
__global__ void SigmoidAlphaBlendBackwardKernel(
|
||||
// clang-format off
|
||||
const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grad_alphas, // (N, H, W)
|
||||
const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> alphas, // (N, H, W)
|
||||
const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> distances, // (N, H, W, K)
|
||||
const torch::PackedTensorAccessor<int64_t, 4, torch::RestrictPtrTraits, size_t> pix_to_face, // (N, H, W, K)
|
||||
torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grad_distances, // (N, H, W)
|
||||
const torch::PackedTensorAccessor64<scalar_t, 3, torch::RestrictPtrTraits> grad_alphas, // (N, H, W)
|
||||
const torch::PackedTensorAccessor64<scalar_t, 3, torch::RestrictPtrTraits> alphas, // (N, H, W)
|
||||
const torch::PackedTensorAccessor64<scalar_t, 4, torch::RestrictPtrTraits> distances, // (N, H, W, K)
|
||||
const torch::PackedTensorAccessor64<int64_t, 4, torch::RestrictPtrTraits> pix_to_face, // (N, H, W, K)
|
||||
torch::PackedTensorAccessor64<scalar_t, 4, torch::RestrictPtrTraits> grad_distances, // (N, H, W)
|
||||
// clang-format on
|
||||
const scalar_t sigma,
|
||||
const int N,
|
||||
@ -192,11 +192,11 @@ torch::Tensor SigmoidAlphaBlendBackwardCuda(
|
||||
SigmoidAlphaBlendBackwardKernel<scalar_t>
|
||||
<<<blocks, threads, 0, stream>>>(
|
||||
// clang-format off
|
||||
grad_alphas.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(),
|
||||
alphas.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(),
|
||||
distances.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(),
|
||||
pix_to_face.packed_accessor<int64_t, 4, torch::RestrictPtrTraits, size_t>(),
|
||||
grad_distances.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(),
|
||||
grad_alphas.packed_accessor64<scalar_t, 3, torch::RestrictPtrTraits>(),
|
||||
alphas.packed_accessor64<scalar_t, 3, torch::RestrictPtrTraits>(),
|
||||
distances.packed_accessor64<scalar_t, 4, torch::RestrictPtrTraits>(),
|
||||
pix_to_face.packed_accessor64<int64_t, 4, torch::RestrictPtrTraits>(),
|
||||
grad_distances.packed_accessor64<scalar_t, 4, torch::RestrictPtrTraits>(),
|
||||
// clang-format on
|
||||
sigma,
|
||||
N,
|
||||
|
@ -214,57 +214,63 @@ std::tuple<size_t, size_t, bool, torch::Tensor> Renderer::arg_check(
|
||||
batch_processing = true;
|
||||
batch_size = vert_pos.size(0);
|
||||
THArgCheck(
|
||||
vert_col.ndimension() == 3 && vert_col.size(0) == batch_size,
|
||||
vert_col.ndimension() == 3 &&
|
||||
vert_col.size(0) == static_cast<int64_t>(batch_size),
|
||||
2,
|
||||
"vert_col needs to have batch size.");
|
||||
THArgCheck(
|
||||
vert_radii.ndimension() == 2 && vert_radii.size(0) == batch_size,
|
||||
vert_radii.ndimension() == 2 &&
|
||||
vert_radii.size(0) == static_cast<int64_t>(batch_size),
|
||||
3,
|
||||
"vert_radii must be specified per batch.");
|
||||
THArgCheck(
|
||||
cam_pos.ndimension() == 2 && cam_pos.size(0) == batch_size,
|
||||
cam_pos.ndimension() == 2 &&
|
||||
cam_pos.size(0) == static_cast<int64_t>(batch_size),
|
||||
4,
|
||||
"cam_pos must be specified per batch and have the correct batch size.");
|
||||
THArgCheck(
|
||||
pixel_0_0_center.ndimension() == 2 &&
|
||||
pixel_0_0_center.size(0) == batch_size,
|
||||
pixel_0_0_center.size(0) == static_cast<int64_t>(batch_size),
|
||||
5,
|
||||
"pixel_0_0_center must be specified per batch.");
|
||||
THArgCheck(
|
||||
pixel_vec_x.ndimension() == 2 && pixel_vec_x.size(0) == batch_size,
|
||||
pixel_vec_x.ndimension() == 2 &&
|
||||
pixel_vec_x.size(0) == static_cast<int64_t>(batch_size),
|
||||
6,
|
||||
"pixel_vec_x must be specified per batch.");
|
||||
THArgCheck(
|
||||
pixel_vec_y.ndimension() == 2 && pixel_vec_y.size(0) == batch_size,
|
||||
pixel_vec_y.ndimension() == 2 &&
|
||||
pixel_vec_y.size(0) == static_cast<int64_t>(batch_size),
|
||||
7,
|
||||
"pixel_vec_y must be specified per batch.");
|
||||
THArgCheck(
|
||||
focal_length.ndimension() == 1 && focal_length.size(0) == batch_size,
|
||||
focal_length.ndimension() == 1 &&
|
||||
focal_length.size(0) == static_cast<int64_t>(batch_size),
|
||||
8,
|
||||
"focal_length must be specified per batch.");
|
||||
THArgCheck(
|
||||
principal_point_offsets.ndimension() == 2 &&
|
||||
principal_point_offsets.size(0) == batch_size,
|
||||
principal_point_offsets.size(0) == static_cast<int64_t>(batch_size),
|
||||
9,
|
||||
"principal_point_offsets must be specified per batch.");
|
||||
if (opacity.has_value()) {
|
||||
THArgCheck(
|
||||
opacity.value().ndimension() == 2 &&
|
||||
opacity.value().size(0) == batch_size,
|
||||
opacity.value().size(0) == static_cast<int64_t>(batch_size),
|
||||
13,
|
||||
"Opacity needs to be specified batch-wise.");
|
||||
}
|
||||
// Check all parameters are for a matching number of points.
|
||||
n_points = vert_pos.size(1);
|
||||
THArgCheck(
|
||||
vert_col.size(1) == n_points,
|
||||
vert_col.size(1) == static_cast<int64_t>(n_points),
|
||||
2,
|
||||
("The number of points for vertex positions (" +
|
||||
std::to_string(n_points) + ") and vertex colors (" +
|
||||
std::to_string(vert_col.size(1)) + ") doesn't agree.")
|
||||
.c_str());
|
||||
THArgCheck(
|
||||
vert_radii.size(1) == n_points,
|
||||
vert_radii.size(1) == static_cast<int64_t>(n_points),
|
||||
3,
|
||||
("The number of points for vertex positions (" +
|
||||
std::to_string(n_points) + ") and vertex radii (" +
|
||||
@ -272,7 +278,7 @@ std::tuple<size_t, size_t, bool, torch::Tensor> Renderer::arg_check(
|
||||
.c_str());
|
||||
if (opacity.has_value()) {
|
||||
THArgCheck(
|
||||
opacity.value().size(1) == n_points,
|
||||
opacity.value().size(1) == static_cast<int64_t>(n_points),
|
||||
13,
|
||||
"Opacity needs to be specified per point.");
|
||||
}
|
||||
@ -352,14 +358,14 @@ std::tuple<size_t, size_t, bool, torch::Tensor> Renderer::arg_check(
|
||||
// Check each.
|
||||
n_points = vert_pos.size(0);
|
||||
THArgCheck(
|
||||
vert_col.size(0) == n_points,
|
||||
vert_col.size(0) == static_cast<int64_t>(n_points),
|
||||
2,
|
||||
("The number of points for vertex positions (" +
|
||||
std::to_string(n_points) + ") and vertex colors (" +
|
||||
std::to_string(vert_col.size(0)) + ") doesn't agree.")
|
||||
.c_str());
|
||||
THArgCheck(
|
||||
vert_radii.size(0) == n_points,
|
||||
vert_radii.size(0) == static_cast<int64_t>(n_points),
|
||||
3,
|
||||
("The number of points for vertex positions (" +
|
||||
std::to_string(n_points) + ") and vertex radii (" +
|
||||
@ -367,7 +373,7 @@ std::tuple<size_t, size_t, bool, torch::Tensor> Renderer::arg_check(
|
||||
.c_str());
|
||||
if (opacity.has_value()) {
|
||||
THArgCheck(
|
||||
opacity.value().size(0) == n_points,
|
||||
opacity.value().size(0) == static_cast<int64_t>(n_points),
|
||||
12,
|
||||
"Opacity needs to be specified per point.");
|
||||
}
|
||||
@ -958,12 +964,15 @@ Renderer::backward(
|
||||
}
|
||||
if (batch_processing) {
|
||||
THArgCheck(
|
||||
grad_im.size(0) == batch_size,
|
||||
grad_im.size(0) == static_cast<int64_t>(batch_size),
|
||||
1,
|
||||
"Gradient image batch size must agree.");
|
||||
THArgCheck(image.size(0) == batch_size, 2, "Image batch size must agree.");
|
||||
THArgCheck(
|
||||
forw_info.size(0) == batch_size,
|
||||
image.size(0) == static_cast<int64_t>(batch_size),
|
||||
2,
|
||||
"Image batch size must agree.");
|
||||
THArgCheck(
|
||||
forw_info.size(0) == static_cast<int64_t>(batch_size),
|
||||
3,
|
||||
"forward info must have batch size.");
|
||||
}
|
||||
|
@ -291,8 +291,8 @@ RasterizeMeshesNaiveCpu(
|
||||
const float dist_neighbor = std::abs(std::get<2>(neighbor));
|
||||
if (dist < dist_neighbor) {
|
||||
// Overwrite the neighbor face values.
|
||||
q[idx_top_k] = {
|
||||
pz, f, signed_dist, bary_clip.x, bary_clip.y, bary_clip.z};
|
||||
q[idx_top_k] = std::make_tuple(
|
||||
pz, f, signed_dist, bary_clip.x, bary_clip.y, bary_clip.z);
|
||||
}
|
||||
} else {
|
||||
// Handle as a normal face.
|
||||
|
@ -1,4 +1,5 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
import json
|
||||
import os
|
||||
import unittest
|
||||
from collections import Counter
|
||||
@ -39,18 +40,33 @@ class TestBuild(unittest.TestCase):
|
||||
+ " All rights reserved.\n"
|
||||
)
|
||||
|
||||
files_missing_copyright_header = []
|
||||
|
||||
for extension in extensions:
|
||||
for i in root_dir.glob(f"**/*.{extension}"):
|
||||
if str(i).endswith(
|
||||
for path in root_dir.glob(f"**/*.{extension}"):
|
||||
if str(path).endswith(
|
||||
"pytorch3d/transforms/external/kornia_angle_axis_to_rotation_matrix.py"
|
||||
):
|
||||
continue
|
||||
if str(i).endswith("pytorch3d/csrc/pulsar/include/fastermath.h"):
|
||||
if str(path).endswith("pytorch3d/csrc/pulsar/include/fastermath.h"):
|
||||
continue
|
||||
with open(i) as f:
|
||||
with open(path) as f:
|
||||
firstline = f.readline()
|
||||
if firstline.startswith(("# -*-", "#!")):
|
||||
firstline = f.readline()
|
||||
self.assertTrue(
|
||||
firstline.endswith(expect), f"{i} missing copyright header."
|
||||
)
|
||||
if not firstline.endswith(expect):
|
||||
files_missing_copyright_header.append(str(path))
|
||||
|
||||
if len(files_missing_copyright_header) != 0:
|
||||
self.fail("\n".join(files_missing_copyright_header))
|
||||
|
||||
@unittest.skipIf(in_conda_build, "In conda build")
|
||||
def test_valid_ipynbs(self):
|
||||
# Check that the ipython notebooks are valid json
|
||||
test_dir = Path(__file__).resolve().parent
|
||||
tutorials_dir = test_dir.parent / "docs" / "tutorials"
|
||||
tutorials = sorted(tutorials_dir.glob("*.ipynb"))
|
||||
|
||||
for tutorial in tutorials:
|
||||
with open(tutorial) as f:
|
||||
json.load(f)
|
||||
|
Loading…
x
Reference in New Issue
Block a user