Summary: Collection of spelling things, mostly in docs / tutorials.

Reviewed By: gkioxari

Differential Revision: D26101323

fbshipit-source-id: 652f62bc9d71a4ff872efa21141225e43191353a
This commit is contained in:
Jeremy Reizenstein
2021-04-09 09:57:55 -07:00
committed by Facebook GitHub Bot
parent c2e62a5087
commit 124bb5e391
75 changed files with 220 additions and 217 deletions

View File

@@ -21,7 +21,7 @@ def interpolate_face_attributes(
pixel in the image. A value < 0 indicates that the pixel does not
overlap any face and should be skipped.
barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
the barycentric coordianates of each pixel
the barycentric coordinates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
face_attributes: packed attributes of shape (total_faces, 3, D),

View File

@@ -147,7 +147,7 @@ def knn_points(
p2_nn = knn_gather(p2, p1_idx, lengths2)
which is a helper function that allows indexing any tensor of shape (N, P2, U) with
the indices `p1_idx` returned by `knn_points`. The outout is a tensor
the indices `p1_idx` returned by `knn_points`. The output is a tensor
of shape (N, P1, K, U).
"""

View File

@@ -184,7 +184,7 @@ def _gen_pairs(input, dim=-2, reducer=lambda a, b: ((a - b) ** 2).sum(dim=-1)):
def _kernel_vec_distances(v):
"""Computes the coefficients for linearisation of the quadratic system
"""Computes the coefficients for linearization of the quadratic system
to match all pairwise distances between 4 control points (dim=1).
The last dimension corresponds to the coefficients for quadratic terms
Bij = Bi * Bj, where Bi and Bj correspond to kernel vectors.

View File

@@ -28,7 +28,7 @@ def estimate_pointcloud_normals(
**neighborhood_size**: The size of the neighborhood used to estimate the
geometry around each point.
**disambiguate_directions**: If `True`, uses the algorithm from [1] to
ensure sign consistency of the normals of neigboring points.
ensure sign consistency of the normals of neighboring points.
Returns:
**normals**: A tensor of normals for each input point
@@ -83,7 +83,7 @@ def estimate_pointcloud_local_coord_frames(
**neighborhood_size**: The size of the neighborhood used to estimate the
geometry around each point.
**disambiguate_directions**: If `True`, uses the algorithm from [1] to
ensure sign consistency of the normals of neigboring points.
ensure sign consistency of the normals of neighboring points.
Returns:
**curvatures**: The three principal curvatures of each point

View File

@@ -140,7 +140,7 @@ def add_points_features_to_volume_densities_features(
volume_features: Batch of input feature volumes of shape
`(minibatch, feature_dim, D, H, W)`
If set to `None`, the `volume_features` will be automatically
instantiatied with a correct size and filled with 0s.
instantiated with a correct size and filled with 0s.
mode: The mode of the conversion of individual points into the volume.
Set either to `nearest` or `trilinear`:
`nearest`: Each 3D point is first rounded to the volumetric
@@ -310,7 +310,7 @@ def splat_points_to_volumes(
# minibatch x n_points x feature_dim -> minibatch x feature_dim x n_points
points_features = points_features.permute(0, 2, 1).contiguous()
# XYZ = the upper-left volume index of the 8-neigborhood of every point
# XYZ = the upper-left volume index of the 8-neighborhood of every point
# grid_sizes is of the form (minibatch, depth-height-width)
grid_sizes_xyz = grid_sizes[:, [2, 1, 0]]

View File

@@ -25,8 +25,9 @@ def sample_points_from_meshes(
Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
]:
"""
Convert a batch of meshes to a pointcloud by uniformly sampling points on
the surface of the mesh with probability proportional to the face area.
Convert a batch of meshes to a batch of pointclouds by uniformly sampling
points on the surface of the mesh with probability proportional to the
face area.
Args:
meshes: A Meshes object with a batch of N meshes.
@@ -54,7 +55,7 @@ def sample_points_from_meshes(
.. code-block:: python
Poinclouds(samples, normals=normals, features=textures)
Pointclouds(samples, normals=normals, features=textures)
"""
if meshes.isempty():
raise ValueError("Meshes are empty.")
@@ -71,7 +72,7 @@ def sample_points_from_meshes(
num_meshes = len(meshes)
num_valid_meshes = torch.sum(meshes.valid) # Non empty meshes.
# Intialize samples tensor with fill value 0 for empty meshes.
# Initialize samples tensor with fill value 0 for empty meshes.
samples = torch.zeros((num_meshes, num_samples, 3), device=meshes.device)
# Only compute samples for non empty meshes
@@ -104,7 +105,7 @@ def sample_points_from_meshes(
samples[meshes.valid] = w0[:, :, None] * a + w1[:, :, None] * b + w2[:, :, None] * c
if return_normals:
# Intialize normals tensor with fill value 0 for empty meshes.
# Initialize normals tensor with fill value 0 for empty meshes.
# Normals for the sampled points are face normals computed from
# the vertices of the face in which the sampled point lies.
normals = torch.zeros((num_meshes, num_samples, 3), device=meshes.device)

View File

@@ -27,7 +27,7 @@ def wmean(
the last (spatial) dimension are assumed same;
dim: dimension(s) in `x` to average over;
keepdim: tells whether to keep the resulting singleton dimension.
eps: minumum clamping value in the denominator.
eps: minimum clamping value in the denominator.
Returns:
the mean tensor:
* if `weights` is None => `mean(x, dim)`,

View File

@@ -15,7 +15,7 @@ def vert_align(
) -> torch.Tensor:
"""
Sample vertex features from a feature map. This operation is called
"perceptual feaure pooling" in [1] or "vert align" in [2].
"perceptual feature pooling" in [1] or "vert align" in [2].
[1] Wang et al, "Pixel2Mesh: Generating 3D Mesh Models from Single
RGB Images", ECCV 2018.
@@ -45,7 +45,7 @@ def vert_align(
Returns:
feats_sampled: FloatTensor of shape (N, V, C) giving sampled features for each
vertex. If feats is a list, we return concatentated features in axis=2 of
vertex. If feats is a list, we return concatenated features in axis=2 of
shape (N, V, sum(C_n)) where C_n = feats[n].shape[1].
If return_packed = True, the features are transformed to a packed
representation of shape (sum(V), C)