, T, T, T> BarycentricPerspectiveCorrectionBackward(
// Clip negative barycentric coordinates to 0.0 and renormalize so
// the barycentric coordinates for a point sum to 1. When the blur_radius
// is greater than 0, a face will still be recorded as overlapping a pixel
-// if the pixel is outisde the face. In this case at least one of the
+// if the pixel is outside the face. In this case at least one of the
// barycentric coordinates for the pixel relative to the face will be negative.
// Clipping will ensure that the texture and z buffer are interpolated
// correctly.
diff --git a/pytorch3d/datasets/r2n2/r2n2.py b/pytorch3d/datasets/r2n2/r2n2.py
index b4ef58c0..dff335c5 100644
--- a/pytorch3d/datasets/r2n2/r2n2.py
+++ b/pytorch3d/datasets/r2n2/r2n2.py
@@ -99,7 +99,7 @@ class R2N2(ShapeNetBase):
path.join(SYNSET_DICT_DIR, "r2n2_synset_dict.json"), "r"
) as read_dict:
self.synset_dict = json.load(read_dict)
- # Inverse dicitonary mapping synset labels to corresponding offsets.
+ # Inverse dictionary mapping synset labels to corresponding offsets.
self.synset_inv = {label: offset for offset, label in self.synset_dict.items()}
# Store synset and model ids of objects mentioned in the splits_file.
@@ -383,7 +383,7 @@ class R2N2(ShapeNetBase):
view_idxs: each model will be rendered with the orientation(s) of the specified
views. Only render by view_idxs if no camera or args for BlenderCamera is
supplied.
- Accepts any of the args of the render function in ShapnetBase:
+ Accepts any of the args of the render function in ShapeNetBase:
model_ids: List[str] of model_ids of models intended to be rendered.
categories: List[str] of categories intended to be rendered. categories
and sample_nums must be specified at the same time. categories can be given
diff --git a/pytorch3d/datasets/r2n2/utils.py b/pytorch3d/datasets/r2n2/utils.py
index f82636b1..5a1042da 100644
--- a/pytorch3d/datasets/r2n2/utils.py
+++ b/pytorch3d/datasets/r2n2/utils.py
@@ -97,8 +97,8 @@ def compute_extrinsic_matrix(azimuth, elevation, distance):
Copied from meshrcnn codebase:
https://github.com/facebookresearch/meshrcnn/blob/master/shapenet/utils/coords.py#L96
- Compute 4x4 extrinsic matrix that converts from homogenous world coordinates
- to homogenous camera coordinates. We assume that the camera is looking at the
+ Compute 4x4 extrinsic matrix that converts from homogeneous world coordinates
+ to homogeneous camera coordinates. We assume that the camera is looking at the
origin.
Used in R2N2 Dataset when computing calibration matrices.
@@ -189,7 +189,7 @@ def _compute_idxs(vals, counts):
Args:
vals: tensor of binary values indicating voxel presence in a dense format.
- counts: tensor of number of occurence of each value in vals.
+ counts: tensor of number of occurrence of each value in vals.
Returns:
idxs: A tensor of shape (N), where N is the number of nonzero voxels.
@@ -379,7 +379,7 @@ def project_verts(verts, P, eps=1e-1):
Copied from meshrcnn codebase:
https://github.com/facebookresearch/meshrcnn/blob/master/shapenet/utils/coords.py#L159
- Project verticies using a 4x4 transformation matrix.
+ Project vertices using a 4x4 transformation matrix.
Args:
verts: FloatTensor of shape (N, V, 3) giving a batch of vertex positions or of
@@ -403,7 +403,7 @@ def project_verts(verts, P, eps=1e-1):
# Add an extra row of ones to the world-space coordinates of verts before
# multiplying by the projection matrix. We could avoid this allocation by
- # instead multiplying by a 4x3 submatrix of the projectio matrix, then
+ # instead multiplying by a 4x3 submatrix of the projection matrix, then
# adding the remaining 4x1 vector. Not sure whether there will be much
# performance difference between the two.
ones = torch.ones(N, V, 1, dtype=dtype, device=device)
diff --git a/pytorch3d/datasets/shapenet/shapenet_core.py b/pytorch3d/datasets/shapenet/shapenet_core.py
index 78184be8..a8635fd9 100644
--- a/pytorch3d/datasets/shapenet/shapenet_core.py
+++ b/pytorch3d/datasets/shapenet/shapenet_core.py
@@ -37,7 +37,7 @@ class ShapeNetCore(ShapeNetBase):
synset offsets or labels. A combination of both is also accepted.
When no category is specified, all categories in data_dir are loaded.
version: (int) version of ShapeNetCore data in data_dir, 1 or 2.
- Default is set to be 1. Version 1 has 57 categories and verions 2 has 55
+ Default is set to be 1. Version 1 has 57 categories and version 2 has 55
categories.
Note: version 1 has two categories 02858304(boat) and 02992529(cellphone)
that are hyponyms of categories 04530566(watercraft) and 04401088(telephone)
@@ -63,7 +63,7 @@ class ShapeNetCore(ShapeNetBase):
dict_file = "shapenet_synset_dict_v%d.json" % version
with open(path.join(SYNSET_DICT_DIR, dict_file), "r") as read_dict:
self.synset_dict = json.load(read_dict)
- # Inverse dicitonary mapping synset labels to corresponding offsets.
+ # Inverse dictionary mapping synset labels to corresponding offsets.
self.synset_inv = {label: offset for offset, label in self.synset_dict.items()}
# If categories are specified, check if each category is in the form of either
diff --git a/pytorch3d/datasets/shapenet_base.py b/pytorch3d/datasets/shapenet_base.py
index 32d5c33b..6eca6f84 100644
--- a/pytorch3d/datasets/shapenet_base.py
+++ b/pytorch3d/datasets/shapenet_base.py
@@ -250,7 +250,7 @@ class ShapeNetBase(torch.utils.data.Dataset):
Helper function for sampling a number of indices from the given category.
Args:
- sample_num: number of indicies to be sampled from the given category.
+ sample_num: number of indices to be sampled from the given category.
category: category synset of the category to be sampled from. If not
specified, sample from all models in the loaded dataset.
"""
diff --git a/pytorch3d/io/mtl_io.py b/pytorch3d/io/mtl_io.py
index 79f65d01..6d3fa602 100644
--- a/pytorch3d/io/mtl_io.py
+++ b/pytorch3d/io/mtl_io.py
@@ -28,7 +28,7 @@ def make_mesh_texture_atlas(
Args:
material_properties: dict of properties for each material. If a material
- does not have any properties it will have an emtpy dict.
+ does not have any properties it will have an empty dict.
texture_images: dict of material names and texture images
face_material_names: numpy array of the material name corresponding to each
face. Faces which don't have an associated material will be an empty string.
@@ -220,13 +220,13 @@ def make_material_atlas(
For each grid cell we can now calculate the centroid `(c_y, c_x)`
of the corresponding texture triangle:
- - if `(x + y) < R`, then offsett the centroid of
+ - if `(x + y) < R`, then offset the centroid of
triangle 0 by `(y, x) * (1/R)`
- if `(x + y) > R`, then offset the centroid of
triangle 8 by `((R-1-y), (R-1-x)) * (1/R)`.
This is equivalent to updating the portion of Grid 1
- above the diagnonal, replacing `(y, x)` with `((R-1-y), (R-1-x))`:
+ above the diagonal, replacing `(y, x)` with `((R-1-y), (R-1-x))`:
..code-block::python
diff --git a/pytorch3d/io/obj_io.py b/pytorch3d/io/obj_io.py
index e6fe9d80..bb329fb4 100644
--- a/pytorch3d/io/obj_io.py
+++ b/pytorch3d/io/obj_io.py
@@ -109,13 +109,13 @@ def load_obj(
Faces are interpreted as follows:
::
- 5/2/1 describes the first vertex of the first triange
+ 5/2/1 describes the first vertex of the first triangle
- 5: index of vertex [1.000000 1.000000 -1.000000]
- 2: index of texture coordinate [0.749279 0.501284]
- 1: index of normal [0.000000 0.000000 -1.000000]
If there are faces with more than 3 vertices
- they are subdivided into triangles. Polygonal faces are assummed to have
+ they are subdivided into triangles. Polygonal faces are assumed to have
vertices ordered counter-clockwise so the (right-handed) normal points
out of the screen e.g. a proper rectangular face would be specified like this:
::
@@ -368,7 +368,7 @@ def _parse_face(
face_normals.append(int(vert_props[2]))
if len(vert_props) > 3:
raise ValueError(
- "Face vertices can ony have 3 properties. \
+ "Face vertices can only have 3 properties. \
Face vert %s, Line: %s"
% (str(vert_props), str(line))
)
diff --git a/pytorch3d/io/pluggable.py b/pytorch3d/io/pluggable.py
index 6d1b4691..161233fa 100644
--- a/pytorch3d/io/pluggable.py
+++ b/pytorch3d/io/pluggable.py
@@ -33,7 +33,7 @@ and to save a point cloud you might do
```
pcl = Pointclouds(...)
-IO().save_pointcloud(pcl, "output_poincloud.obj")
+IO().save_pointcloud(pcl, "output_pointcloud.obj")
```
"""
@@ -43,7 +43,7 @@ class IO:
"""
This class is the interface to flexible loading and saving of meshes and point clouds.
- In simple cases the user will just initialise an instance of this class as `IO()`
+ In simple cases the user will just initialize an instance of this class as `IO()`
and then use its load and save functions. The arguments of the initializer are not
usually needed.
@@ -53,7 +53,7 @@ class IO:
Args:
include_default_formats: If False, the built-in file formats will not be available.
Then only user-registered formats can be used.
- path_manager: Used to customise how paths given as strings are interpreted.
+ path_manager: Used to customize how paths given as strings are interpreted.
"""
def __init__(
diff --git a/pytorch3d/io/ply_io.py b/pytorch3d/io/ply_io.py
index 2df8461f..9fe557ba 100644
--- a/pytorch3d/io/ply_io.py
+++ b/pytorch3d/io/ply_io.py
@@ -380,7 +380,7 @@ def _try_read_ply_constant_list_ascii(f, definition: _PlyElementType):
return data[:, 1:]
-def _parse_heterogenous_property_ascii(datum, line_iter, property: _Property):
+def _parse_heterogeneous_property_ascii(datum, line_iter, property: _Property):
"""
Read a general data property from an ascii .ply file.
@@ -431,7 +431,7 @@ def _read_ply_element_ascii(f, definition: _PlyElementType):
In simple cases where every element has the same size, 2D numpy array
corresponding to the data. The rows are the different values.
Otherwise a list of lists of values, where the outer list is
- each occurence of the element, and the inner lists have one value per
+ each occurrence of the element, and the inner lists have one value per
property.
"""
if not definition.count:
@@ -454,7 +454,7 @@ def _read_ply_element_ascii(f, definition: _PlyElementType):
datum = []
line_iter = iter(line_string.strip().split())
for property in definition.properties:
- _parse_heterogenous_property_ascii(datum, line_iter, property)
+ _parse_heterogeneous_property_ascii(datum, line_iter, property)
data.append(datum)
if next(line_iter, None) is not None:
raise ValueError("Too much data for an element.")
@@ -669,7 +669,7 @@ def _read_ply_element_binary(f, definition: _PlyElementType, big_endian: bool) -
In simple cases where every element has the same size, 2D numpy array
corresponding to the data. The rows are the different values.
Otherwise a list of lists/tuples of values, where the outer list is
- each occurence of the element, and the inner lists have one value per
+ each occurrence of the element, and the inner lists have one value per
property.
"""
if not definition.count:
@@ -1027,7 +1027,7 @@ def _save_ply(
Args:
f: File object to which the 3D data should be written.
verts: FloatTensor of shape (V, 3) giving vertex coordinates.
- faces: LongTensor of shsape (F, 3) giving faces.
+ faces: LongTensor of shape (F, 3) giving faces.
verts_normals: FloatTensor of shape (V, 3) giving vertex normals.
ascii: (bool) whether to use the ascii ply format.
decimal_places: Number of decimal places for saving if ascii=True.
diff --git a/pytorch3d/loss/mesh_laplacian_smoothing.py b/pytorch3d/loss/mesh_laplacian_smoothing.py
index 575a7c3e..571005ec 100644
--- a/pytorch3d/loss/mesh_laplacian_smoothing.py
+++ b/pytorch3d/loss/mesh_laplacian_smoothing.py
@@ -9,7 +9,7 @@ def mesh_laplacian_smoothing(meshes, method: str = "uniform"):
Computes the laplacian smoothing objective for a batch of meshes.
This function supports three variants of Laplacian smoothing,
namely with uniform weights("uniform"), with cotangent weights ("cot"),
- and cotangent cuvature ("cotcurv").For more details read [1, 2].
+ and cotangent curvature ("cotcurv").For more details read [1, 2].
Args:
meshes: Meshes object with a batch of meshes.
diff --git a/pytorch3d/loss/point_mesh_distance.py b/pytorch3d/loss/point_mesh_distance.py
index da4fcb59..bffe99e4 100644
--- a/pytorch3d/loss/point_mesh_distance.py
+++ b/pytorch3d/loss/point_mesh_distance.py
@@ -91,7 +91,7 @@ class _FacePointDistance(Function):
max_tris: Scalar equal to maximum number of faces in the batch
Returns:
dists: FloatTensor of shape `(T,)`, where `dists[t]` is the squared
- euclidean distance of `t`-th trianguar face to the closest point in the
+ euclidean distance of `t`-th triangular face to the closest point in the
corresponding example in the batch
idxs: LongTensor of shape `(T,)` indicating the closest point in the
corresponding example in the batch.
diff --git a/pytorch3d/ops/interp_face_attrs.py b/pytorch3d/ops/interp_face_attrs.py
index da11b0df..e22392d0 100644
--- a/pytorch3d/ops/interp_face_attrs.py
+++ b/pytorch3d/ops/interp_face_attrs.py
@@ -21,7 +21,7 @@ def interpolate_face_attributes(
pixel in the image. A value < 0 indicates that the pixel does not
overlap any face and should be skipped.
barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
- the barycentric coordianates of each pixel
+ the barycentric coordinates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
face_attributes: packed attributes of shape (total_faces, 3, D),
diff --git a/pytorch3d/ops/knn.py b/pytorch3d/ops/knn.py
index 4f3df23a..bfab9781 100644
--- a/pytorch3d/ops/knn.py
+++ b/pytorch3d/ops/knn.py
@@ -147,7 +147,7 @@ def knn_points(
p2_nn = knn_gather(p2, p1_idx, lengths2)
which is a helper function that allows indexing any tensor of shape (N, P2, U) with
- the indices `p1_idx` returned by `knn_points`. The outout is a tensor
+ the indices `p1_idx` returned by `knn_points`. The output is a tensor
of shape (N, P1, K, U).
"""
diff --git a/pytorch3d/ops/perspective_n_points.py b/pytorch3d/ops/perspective_n_points.py
index 9a3f73f6..89faf8c8 100644
--- a/pytorch3d/ops/perspective_n_points.py
+++ b/pytorch3d/ops/perspective_n_points.py
@@ -184,7 +184,7 @@ def _gen_pairs(input, dim=-2, reducer=lambda a, b: ((a - b) ** 2).sum(dim=-1)):
def _kernel_vec_distances(v):
- """Computes the coefficients for linearisation of the quadratic system
+ """Computes the coefficients for linearization of the quadratic system
to match all pairwise distances between 4 control points (dim=1).
The last dimension corresponds to the coefficients for quadratic terms
Bij = Bi * Bj, where Bi and Bj correspond to kernel vectors.
diff --git a/pytorch3d/ops/points_normals.py b/pytorch3d/ops/points_normals.py
index 741465b3..0ec80780 100644
--- a/pytorch3d/ops/points_normals.py
+++ b/pytorch3d/ops/points_normals.py
@@ -28,7 +28,7 @@ def estimate_pointcloud_normals(
**neighborhood_size**: The size of the neighborhood used to estimate the
geometry around each point.
**disambiguate_directions**: If `True`, uses the algorithm from [1] to
- ensure sign consistency of the normals of neigboring points.
+ ensure sign consistency of the normals of neighboring points.
Returns:
**normals**: A tensor of normals for each input point
@@ -83,7 +83,7 @@ def estimate_pointcloud_local_coord_frames(
**neighborhood_size**: The size of the neighborhood used to estimate the
geometry around each point.
**disambiguate_directions**: If `True`, uses the algorithm from [1] to
- ensure sign consistency of the normals of neigboring points.
+ ensure sign consistency of the normals of neighboring points.
Returns:
**curvatures**: The three principal curvatures of each point
diff --git a/pytorch3d/ops/points_to_volumes.py b/pytorch3d/ops/points_to_volumes.py
index 29e0ed88..4356eb42 100644
--- a/pytorch3d/ops/points_to_volumes.py
+++ b/pytorch3d/ops/points_to_volumes.py
@@ -140,7 +140,7 @@ def add_points_features_to_volume_densities_features(
volume_features: Batch of input feature volumes of shape
`(minibatch, feature_dim, D, H, W)`
If set to `None`, the `volume_features` will be automatically
- instantiatied with a correct size and filled with 0s.
+ instantiated with a correct size and filled with 0s.
mode: The mode of the conversion of individual points into the volume.
Set either to `nearest` or `trilinear`:
`nearest`: Each 3D point is first rounded to the volumetric
@@ -310,7 +310,7 @@ def splat_points_to_volumes(
# minibatch x n_points x feature_dim -> minibatch x feature_dim x n_points
points_features = points_features.permute(0, 2, 1).contiguous()
- # XYZ = the upper-left volume index of the 8-neigborhood of every point
+ # XYZ = the upper-left volume index of the 8-neighborhood of every point
# grid_sizes is of the form (minibatch, depth-height-width)
grid_sizes_xyz = grid_sizes[:, [2, 1, 0]]
diff --git a/pytorch3d/ops/sample_points_from_meshes.py b/pytorch3d/ops/sample_points_from_meshes.py
index 9e6dac54..afc573f6 100644
--- a/pytorch3d/ops/sample_points_from_meshes.py
+++ b/pytorch3d/ops/sample_points_from_meshes.py
@@ -25,8 +25,9 @@ def sample_points_from_meshes(
Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
]:
"""
- Convert a batch of meshes to a pointcloud by uniformly sampling points on
- the surface of the mesh with probability proportional to the face area.
+ Convert a batch of meshes to a batch of pointclouds by uniformly sampling
+ points on the surface of the mesh with probability proportional to the
+ face area.
Args:
meshes: A Meshes object with a batch of N meshes.
@@ -54,7 +55,7 @@ def sample_points_from_meshes(
.. code-block:: python
- Poinclouds(samples, normals=normals, features=textures)
+ Pointclouds(samples, normals=normals, features=textures)
"""
if meshes.isempty():
raise ValueError("Meshes are empty.")
@@ -71,7 +72,7 @@ def sample_points_from_meshes(
num_meshes = len(meshes)
num_valid_meshes = torch.sum(meshes.valid) # Non empty meshes.
- # Intialize samples tensor with fill value 0 for empty meshes.
+ # Initialize samples tensor with fill value 0 for empty meshes.
samples = torch.zeros((num_meshes, num_samples, 3), device=meshes.device)
# Only compute samples for non empty meshes
@@ -104,7 +105,7 @@ def sample_points_from_meshes(
samples[meshes.valid] = w0[:, :, None] * a + w1[:, :, None] * b + w2[:, :, None] * c
if return_normals:
- # Intialize normals tensor with fill value 0 for empty meshes.
+ # Initialize normals tensor with fill value 0 for empty meshes.
# Normals for the sampled points are face normals computed from
# the vertices of the face in which the sampled point lies.
normals = torch.zeros((num_meshes, num_samples, 3), device=meshes.device)
diff --git a/pytorch3d/ops/utils.py b/pytorch3d/ops/utils.py
index 3a6ba4a9..cbc7f3d5 100644
--- a/pytorch3d/ops/utils.py
+++ b/pytorch3d/ops/utils.py
@@ -27,7 +27,7 @@ def wmean(
the last (spatial) dimension are assumed same;
dim: dimension(s) in `x` to average over;
keepdim: tells whether to keep the resulting singleton dimension.
- eps: minumum clamping value in the denominator.
+ eps: minimum clamping value in the denominator.
Returns:
the mean tensor:
* if `weights` is None => `mean(x, dim)`,
diff --git a/pytorch3d/ops/vert_align.py b/pytorch3d/ops/vert_align.py
index c53a1fb7..078f9678 100644
--- a/pytorch3d/ops/vert_align.py
+++ b/pytorch3d/ops/vert_align.py
@@ -15,7 +15,7 @@ def vert_align(
) -> torch.Tensor:
"""
Sample vertex features from a feature map. This operation is called
- "perceptual feaure pooling" in [1] or "vert align" in [2].
+ "perceptual feature pooling" in [1] or "vert align" in [2].
[1] Wang et al, "Pixel2Mesh: Generating 3D Mesh Models from Single
RGB Images", ECCV 2018.
@@ -45,7 +45,7 @@ def vert_align(
Returns:
feats_sampled: FloatTensor of shape (N, V, C) giving sampled features for each
- vertex. If feats is a list, we return concatentated features in axis=2 of
+ vertex. If feats is a list, we return concatenated features in axis=2 of
shape (N, V, sum(C_n)) where C_n = feats[n].shape[1].
If return_packed = True, the features are transformed to a packed
representation of shape (sum(V), C)
diff --git a/pytorch3d/renderer/cameras.py b/pytorch3d/renderer/cameras.py
index 22887b1e..35db1d29 100644
--- a/pytorch3d/renderer/cameras.py
+++ b/pytorch3d/renderer/cameras.py
@@ -30,7 +30,7 @@ class CamerasBase(TensorProperties):
The transformation from world -> view happens after applying a rotation (R)
and translation (T)
- NDC coordinate system: This is the normalized coordinate system that confines
- in a volume the renderered part of the object or scene. Also known as view volume.
+ in a volume the rendered part of the object or scene. Also known as view volume.
Given the PyTorch3D convention, (+1, +1, znear) is the top left near corner,
and (-1, -1, zfar) is the bottom right far corner of the volume.
The transformation from view -> NDC happens after applying the camera
@@ -78,7 +78,7 @@ class CamerasBase(TensorProperties):
def unproject_points(self):
"""
- Transform input points from NDC coodinates
+ Transform input points from NDC coordinates
to the world / camera coordinates.
Each of the input points `xy_depth` of shape (..., 3) is
@@ -210,7 +210,7 @@ class CamerasBase(TensorProperties):
For `CamerasBase.transform_points`, setting `eps > 0`
stabilizes gradients since it leads to avoiding division
- by excessivelly low numbers for points close to the
+ by excessively low numbers for points close to the
camera plane.
Returns
@@ -235,7 +235,7 @@ class CamerasBase(TensorProperties):
For `CamerasBase.transform_points`, setting `eps > 0`
stabilizes gradients since it leads to avoiding division
- by excessivelly low numbers for points close to the
+ by excessively low numbers for points close to the
camera plane.
Returns
@@ -318,7 +318,7 @@ def OpenGLPerspectiveCameras(
class FoVPerspectiveCameras(CamerasBase):
"""
A class which stores a batch of parameters to generate a batch of
- projection matrices by specifiying the field of view.
+ projection matrices by specifying the field of view.
The definition of the parameters follow the OpenGL perspective camera.
The extrinsics of the camera (R and T matrices) can also be set in the
@@ -405,7 +405,7 @@ class FoVPerspectiveCameras(CamerasBase):
degrees: bool, set to True if fov is specified in degrees.
Returns:
- torch.floatTensor of the calibration matrix with shape (N, 4, 4)
+ torch.FloatTensor of the calibration matrix with shape (N, 4, 4)
"""
K = torch.zeros((self._N, 4, 4), device=self.device, dtype=torch.float32)
ones = torch.ones((self._N), dtype=torch.float32, device=self.device)
@@ -421,7 +421,7 @@ class FoVPerspectiveCameras(CamerasBase):
min_x = -max_x
# NOTE: In OpenGL the projection matrix changes the handedness of the
- # coordinate frame. i.e the NDC space postive z direction is the
+ # coordinate frame. i.e the NDC space positive z direction is the
# camera space negative z direction. This is because the sign of the z
# in the projection matrix is set to -1.0.
# In pytorch3d we maintain a right handed coordinate system throughout
@@ -444,7 +444,7 @@ class FoVPerspectiveCameras(CamerasBase):
def get_projection_transform(self, **kwargs) -> Transform3d:
"""
- Calculate the perpective projection matrix with a symmetric
+ Calculate the perspective projection matrix with a symmetric
viewing frustrum. Use column major order.
The viewing frustrum will be projected into ndc, s.t.
(max_x, max_y) -> (+1, +1)
@@ -586,7 +586,7 @@ def OpenGLOrthographicCameras(
class FoVOrthographicCameras(CamerasBase):
"""
A class which stores a batch of parameters to generate a batch of
- projection matrices by specifiying the field of view.
+ projection matrices by specifying the field of view.
The definition of the parameters follow the OpenGL orthographic camera.
"""
@@ -612,7 +612,7 @@ class FoVOrthographicCameras(CamerasBase):
max_y: maximum y coordinate of the frustrum.
min_y: minimum y coordinate of the frustrum.
max_x: maximum x coordinate of the frustrum.
- min_x: minumum x coordinage of the frustrum
+ min_x: minimum x coordinate of the frustrum
scale_xyz: scale factors for each axis of shape (N, 3).
R: Rotation matrix of shape (N, 3, 3).
T: Translation of shape (N, 3).
@@ -649,7 +649,7 @@ class FoVOrthographicCameras(CamerasBase):
znear: near clipping plane of the view frustrum.
zfar: far clipping plane of the view frustrum.
max_x: maximum x coordinate of the frustrum.
- min_x: minumum x coordinage of the frustrum
+ min_x: minimum x coordinate of the frustrum
max_y: maximum y coordinate of the frustrum.
min_y: minimum y coordinate of the frustrum.
scale_xyz: scale factors for each axis of shape (N, 3).
@@ -693,7 +693,7 @@ class FoVOrthographicCameras(CamerasBase):
scale_z = 2 / (far-near)
mid_x = (max_x + min_x) / (max_x - min_x)
mix_y = (max_y + min_y) / (max_y - min_y)
- mid_z = (far + near) / (far−near)
+ mid_z = (far + near) / (far - near)
K = [
[scale_x, 0, 0, -mid_x],
@@ -811,7 +811,7 @@ class PerspectiveCameras(CamerasBase):
If you wish to provide parameters in screen space, you NEED to provide
the image_size = (imwidth, imheight).
If you wish to provide parameters in NDC space, you should NOT provide
- image_size. Providing valid image_size will triger a screen space to
+ image_size. Providing valid image_size will trigger a screen space to
NDC space transformation in the camera.
For example, here is how to define cameras on the two spaces.
@@ -978,7 +978,7 @@ class OrthographicCameras(CamerasBase):
If you wish to provide parameters in screen space, you NEED to provide
the image_size = (imwidth, imheight).
If you wish to provide parameters in NDC space, you should NOT provide
- image_size. Providing valid image_size will triger a screen space to
+ image_size. Providing valid image_size will trigger a screen space to
NDC space transformation in the camera.
For example, here is how to define cameras on the two spaces.
@@ -1120,7 +1120,7 @@ def _get_sfm_calibration_matrix(
image_size=None,
) -> torch.Tensor:
"""
- Returns a calibration matrix of a perspective/orthograpic camera.
+ Returns a calibration matrix of a perspective/orthographic camera.
Args:
N: Number of cameras.
@@ -1355,7 +1355,7 @@ def look_at_view_transform(
Args:
dist: distance of the camera from the object
- elev: angle in degres or radians. This is the angle between the
+ elev: angle in degrees or radians. This is the angle between the
vector from the object to the camera, and the horizontal plane y = 0 (xz-plane).
azim: angle in degrees or radians. The vector from the object to
the camera is projected onto a horizontal plane y = 0.
@@ -1365,7 +1365,7 @@ def look_at_view_transform(
degrees: boolean flag to indicate if the elevation and azimuth
angles are specified in degrees or radians.
eye: the position of the camera(s) in world coordinates. If eye is not
- None, it will overide the camera position derived from dist, elev, azim.
+ None, it will override the camera position derived from dist, elev, azim.
up: the direction of the x axis in the world coordinate system.
at: the position of the object(s) in world coordinates.
eye, up and at can be of shape (1, 3) or (N, 3).
diff --git a/pytorch3d/renderer/implicit/raymarching.py b/pytorch3d/renderer/implicit/raymarching.py
index b5164dab..bafb7da9 100644
--- a/pytorch3d/renderer/implicit/raymarching.py
+++ b/pytorch3d/renderer/implicit/raymarching.py
@@ -67,13 +67,13 @@ class EmissionAbsorptionRaymarcher(torch.nn.Module):
rays_features: Per-ray feature values represented with a tensor
of shape `(..., n_points_per_ray, feature_dim)`.
eps: A lower bound added to `rays_densities` before computing
- the absorbtion function (cumprod of `1-rays_densities` along
+ the absorption function (cumprod of `1-rays_densities` along
each ray). This prevents the cumprod to yield exact 0
which would inhibit any gradient-based learning.
Returns:
features_opacities: A tensor of shape `(..., feature_dim+1)`
- that concatenates two tensors alonng the last dimension:
+ that concatenates two tensors along the last dimension:
1) features: A tensor of per-ray renders
of shape `(..., feature_dim)`.
2) opacities: A tensor of per-ray opacity values
diff --git a/pytorch3d/renderer/implicit/raysampling.py b/pytorch3d/renderer/implicit/raysampling.py
index 9536a42c..b1e40bd5 100644
--- a/pytorch3d/renderer/implicit/raysampling.py
+++ b/pytorch3d/renderer/implicit/raysampling.py
@@ -16,7 +16,7 @@ This file defines three raysampling techniques:
class GridRaysampler(torch.nn.Module):
"""
- Samples a fixed number of points along rays which are regulary distributed
+ Samples a fixed number of points along rays which are regularly distributed
in a batch of rectangular image grids. Points along each ray
have uniformly-spaced z-coordinates between a predefined
minimum and maximum depth.
@@ -129,7 +129,7 @@ class GridRaysampler(torch.nn.Module):
class NDCGridRaysampler(GridRaysampler):
"""
- Samples a fixed number of points along rays which are regulary distributed
+ Samples a fixed number of points along rays which are regularly distributed
in a batch of rectangular image grids. Points along each ray
have uniformly-spaced z-coordinates between a predefined minimum and maximum depth.
diff --git a/pytorch3d/renderer/implicit/renderer.py b/pytorch3d/renderer/implicit/renderer.py
index 9f2116bc..b8ef24e6 100644
--- a/pytorch3d/renderer/implicit/renderer.py
+++ b/pytorch3d/renderer/implicit/renderer.py
@@ -18,7 +18,7 @@ from .utils import _validate_ray_bundle_variables, ray_bundle_variables_to_ray_p
# 1) The raysampler:
# - samples rays from input cameras
# - transforms the rays to world coordinates
-# 2) The volumetric_function (which is a callable argument of the forwad pass)
+# 2) The volumetric_function (which is a callable argument of the forward pass)
# evaluates ray_densities and ray_features at the sampled ray-points.
# 3) The raymarcher takes ray_densities and ray_features and uses a raymarching
# algorithm to render each ray.
@@ -64,7 +64,7 @@ class ImplicitRenderer(torch.nn.Module):
the an feature vector for each ray point.
Note that, in order to increase flexibility of the API, we allow multiple
- other arguments to enter the volumentric function via additional
+ other arguments to enter the volumetric function via additional
(optional) keyword arguments `**kwargs`.
A typical use-case is passing a `CamerasBase` object as an additional
keyword argument, which can allow the volumetric function to adjust its
@@ -131,7 +131,7 @@ class ImplicitRenderer(torch.nn.Module):
Args:
cameras: A batch of cameras that render the scene. A `self.raysampler`
takes the cameras as input and samples rays that pass through the
- domain of the volumentric function.
+ domain of the volumetric function.
volumetric_function: A `Callable` that accepts the parametrizations
of the rendering rays and returns the densities and features
at the respective 3D of the rendering rays. Please refer to
@@ -229,7 +229,7 @@ class VolumeRenderer(torch.nn.Module):
Args:
cameras: A batch of cameras that render the scene. A `self.raysampler`
takes the cameras as input and samples rays that pass through the
- domain of the volumentric function.
+ domain of the volumetric function.
volumes: An instance of the `Volumes` class representing a
batch of volumes that are being rendered.
@@ -247,7 +247,7 @@ class VolumeRenderer(torch.nn.Module):
class VolumeSampler(torch.nn.Module):
"""
- A class that allows to sample a batch of volumes `Volumes`
+ A module to sample a batch of volumes `Volumes`
at 3D points sampled along projection rays.
"""
@@ -255,7 +255,7 @@ class VolumeSampler(torch.nn.Module):
"""
Args:
volumes: An instance of the `Volumes` class representing a
- batch if volumes that are being rendered.
+ batch of volumes that are being rendered.
sample_mode: Defines the algorithm used to sample the volumetric
voxel grid. Can be either "bilinear" or "nearest".
"""
@@ -300,7 +300,7 @@ class VolumeSampler(torch.nn.Module):
Returns:
rays_densities: A tensor of shape
`(minibatch, ..., num_points_per_ray, opacity_dim)` containing the
- densitity vectors sampled from the volume at the locations of
+ density vectors sampled from the volume at the locations of
the ray points.
rays_features: A tensor of shape
`(minibatch, ..., num_points_per_ray, feature_dim)` containing the
diff --git a/pytorch3d/renderer/lighting.py b/pytorch3d/renderer/lighting.py
index 49285cc4..d4e198d2 100644
--- a/pytorch3d/renderer/lighting.py
+++ b/pytorch3d/renderer/lighting.py
@@ -44,7 +44,7 @@ def diffuse(normals, color, direction) -> torch.Tensor:
average/interpolated face coordinates.
"""
# TODO: handle multiple directional lights per batch element.
- # TODO: handle attentuation.
+ # TODO: handle attenuation.
# Ensure color and location have same batch dimension as normals
normals, color, direction = convert_to_tensors_and_broadcast(
@@ -107,7 +107,7 @@ def specular(
meshes.verts_packed_to_mesh_idx() or meshes.faces_packed_to_mesh_idx().
"""
# TODO: handle multiple directional lights
- # TODO: attentuate based on inverse squared distance to the light source
+ # TODO: attenuate based on inverse squared distance to the light source
if points.shape != normals.shape:
msg = "Expected points and normals to have the same shape: got %r, %r"
diff --git a/pytorch3d/renderer/mesh/rasterize_meshes.py b/pytorch3d/renderer/mesh/rasterize_meshes.py
index 5d37d1da..6af4a0f3 100644
--- a/pytorch3d/renderer/mesh/rasterize_meshes.py
+++ b/pytorch3d/renderer/mesh/rasterize_meshes.py
@@ -17,7 +17,7 @@ from .clip import (
# TODO make the epsilon user configurable
kEpsilon = 1e-8
-# Maxinum number of faces per bins for
+# Maximum number of faces per bins for
# coarse-to-fine rasterization
kMaxFacesPerBin = 22
@@ -68,7 +68,7 @@ def rasterize_meshes(
set it heuristically based on the shape of the input. This should not
affect the output, but can affect the speed of the forward pass.
faces_per_bin: Only applicable when using coarse-to-fine rasterization
- (bin_size > 0); this is the maxiumum number of faces allowed within each
+ (bin_size > 0); this is the maximum number of faces allowed within each
bin. If more than this many faces actually fall into a bin, an error
will be raised. This should not affect the output values, but can affect
the memory usage in the forward pass.
@@ -138,7 +138,7 @@ def rasterize_meshes(
num_faces_per_mesh = meshes.num_faces_per_mesh()
# In the case that H != W use the max image size to set the bin_size
- # to accommodate the num bins constraint in the coarse rasteizer.
+ # to accommodate the num bins constraint in the coarse rasterizer.
# If the ratio of H:W is large this might cause issues as the smaller
# dimension will have fewer bins.
# TODO: consider a better way of setting the bin size.
@@ -453,7 +453,7 @@ def rasterize_meshes_python(
mesh_to_face_first_idx = clipped_faces.mesh_to_face_first_idx
num_faces_per_mesh = clipped_faces.num_faces_per_mesh
- # Intialize output tensors.
+ # Initialize output tensors.
face_idxs = torch.full(
(N, H, W, K), fill_value=-1, dtype=torch.int64, device=device
)
@@ -662,7 +662,7 @@ def barycentric_coordinates_clip(bary):
Clip negative barycentric coordinates to 0.0 and renormalize so
the barycentric coordinates for a point sum to 1. When the blur_radius
is greater than 0, a face will still be recorded as overlapping a pixel
- if the pixel is outisde the face. In this case at least one of the
+ if the pixel is outside the face. In this case at least one of the
barycentric coordinates for the pixel relative to the face will be negative.
Clipping will ensure that the texture and z buffer are interpolated correctly.
diff --git a/pytorch3d/renderer/mesh/rasterizer.py b/pytorch3d/renderer/mesh/rasterizer.py
index 413a635e..e66acf3f 100644
--- a/pytorch3d/renderer/mesh/rasterizer.py
+++ b/pytorch3d/renderer/mesh/rasterizer.py
@@ -60,7 +60,7 @@ class RasterizationSettings:
class MeshRasterizer(nn.Module):
"""
- This class implements methods for rasterizing a batch of heterogenous
+ This class implements methods for rasterizing a batch of heterogeneous
Meshes.
"""
diff --git a/pytorch3d/renderer/mesh/textures.py b/pytorch3d/renderer/mesh/textures.py
index d3345296..2ad3db3b 100644
--- a/pytorch3d/renderer/mesh/textures.py
+++ b/pytorch3d/renderer/mesh/textures.py
@@ -240,7 +240,7 @@ class TexturesBase:
number of faces in the i-th mesh and C is the dimensional of
the feature (C = 3 for RGB textures).
You can use the utils function in structures.utils to convert the
- packed respresentation to a list or padded.
+ packed representation to a list or padded.
"""
raise NotImplementedError()
@@ -261,10 +261,10 @@ class TexturesBase:
def __getitem__(self, index):
"""
Each texture class should implement a method
- to get the texture properites for the
+ to get the texture properties for the
specified elements in the batch.
The TexturesBase._getitem(i) method
- can be used as a helper funtion to retrieve the
+ can be used as a helper function to retrieve the
class attributes for item i. Then, a new
instance of the child class can be created with
the attributes.
@@ -496,7 +496,7 @@ class TexturesAtlas(TexturesBase):
of the faces (in the packed representation) which
overlap each pixel in the image.
- barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
- the barycentric coordianates of each pixel
+ the barycentric coordinates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
@@ -536,7 +536,7 @@ class TexturesAtlas(TexturesBase):
For N meshes with {Fi} number of faces, it returns a
tensor of shape sum(Fi)x3xD (D = 3 for RGB).
You can use the utils function in structures.utils to convert the
- packed respresentation to a list or padded.
+ packed representation to a list or padded.
"""
atlas_packed = self.atlas_packed()
# assume each face consists of (v0, v1, v2).
@@ -892,7 +892,7 @@ class TexturesUV(TexturesBase):
of the faces (in the packed representation) which
overlap each pixel in the image.
- barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
- the barycentric coordianates of each pixel
+ the barycentric coordinates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
@@ -1233,7 +1233,7 @@ class TexturesVertex(TexturesBase):
Args:
verts_features: list of (Vi, D) or (N, V, D) tensor giving a feature
- vector with artbitrary dimensions for each vertex.
+ vector with arbitrary dimensions for each vertex.
"""
if isinstance(verts_features, (tuple, list)):
correct_shape = all(
@@ -1356,7 +1356,7 @@ class TexturesVertex(TexturesBase):
def sample_textures(self, fragments, faces_packed=None) -> torch.Tensor:
"""
- Detemine the color for each rasterized face. Interpolate the colors for
+ Determine the color for each rasterized face. Interpolate the colors for
vertices which form the face using the barycentric coordinates.
Args:
fragments:
@@ -1366,7 +1366,7 @@ class TexturesVertex(TexturesBase):
of the faces (in the packed representation) which
overlap each pixel in the image.
- barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
- the barycentric coordianates of each pixel
+ the barycentric coordinates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
@@ -1389,7 +1389,7 @@ class TexturesVertex(TexturesBase):
For N meshes with {Fi} number of faces, it returns a
tensor of shape sum(Fi)x3xC (C = 3 for RGB).
You can use the utils function in structures.utils to convert the
- packed respresentation to a list or padded.
+ packed representation to a list or padded.
"""
verts_features_packed = self.verts_features_packed()
faces_verts_features = verts_features_packed[faces_packed]
diff --git a/pytorch3d/renderer/mesh/utils.py b/pytorch3d/renderer/mesh/utils.py
index 3faf8f7a..3e9eb998 100644
--- a/pytorch3d/renderer/mesh/utils.py
+++ b/pytorch3d/renderer/mesh/utils.py
@@ -44,7 +44,7 @@ def _interpolate_zbuf(
of the faces (in the packed representation) which
overlap each pixel in the image.
barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
- the barycentric coordianates of each pixel
+ the barycentric coordinates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
meshes: Meshes object representing a batch of meshes.
@@ -98,7 +98,7 @@ def _try_place_rectangle(
Example:
(We always have placed the first rectangle horizontally and other
rectangles above it.)
- Let's say the placed boxes 1-4 are layed out like this.
+ Let's say the placed boxes 1-4 are laid out like this.
The coordinates of the points marked X are stored in occupied.
It is to the right of the X's that we seek to place rect.
diff --git a/pytorch3d/renderer/points/rasterize_points.py b/pytorch3d/renderer/points/rasterize_points.py
index 0500e750..83265f40 100644
--- a/pytorch3d/renderer/points/rasterize_points.py
+++ b/pytorch3d/renderer/points/rasterize_points.py
@@ -8,7 +8,7 @@ from pytorch3d import _C # pyre-fixme[21]: Could not find name `_C` in `pytorch
from pytorch3d.renderer.mesh.rasterize_meshes import pix_to_non_square_ndc
-# Maxinum number of faces per bins for
+# Maximum number of faces per bins for
# coarse-to-fine rasterization
kMaxPointsPerBin = 22
@@ -59,7 +59,7 @@ def rasterize_points(
set it heuristically based on the shape of the input. This should not
affect the output, but can affect the speed of the forward pass.
points_per_bin: Only applicable when using coarse-to-fine rasterization
- (bin_size > 0); this is the maxiumum number of points allowed within each
+ (bin_size > 0); this is the maximum number of points allowed within each
bin. If more than this many points actually fall into a bin, an error
will be raised. This should not affect the output values, but can affect
the memory usage in the forward pass.
@@ -95,7 +95,7 @@ def rasterize_points(
radius = _format_radius(radius, pointclouds)
# In the case that H != W use the max image size to set the bin_size
- # to accommodate the num bins constraint in the coarse rasteizer.
+ # to accommodate the num bins constraint in the coarse rasterizer.
# If the ratio of H:W is large this might cause issues as the smaller
# dimension will have fewer bins.
# TODO: consider a better way of setting the bin size.
@@ -276,7 +276,7 @@ def rasterize_points_python(
# Support variable size radius for each point in the batch
radius = _format_radius(radius, pointclouds)
- # Intialize output tensors.
+ # Initialize output tensors.
point_idxs = torch.full(
(N, H, W, K), fill_value=-1, dtype=torch.int32, device=device
)
diff --git a/pytorch3d/renderer/utils.py b/pytorch3d/renderer/utils.py
index b940bd21..e714e563 100644
--- a/pytorch3d/renderer/utils.py
+++ b/pytorch3d/renderer/utils.py
@@ -76,7 +76,7 @@ class TensorAccessor(nn.Module):
if hasattr(self.class_object, name):
return self.class_object.__dict__[name][self.index]
else:
- msg = "Attribue %s not found on %r"
+ msg = "Attribute %s not found on %r"
return AttributeError(msg % (name, self.class_object.__name__))
diff --git a/pytorch3d/structures/meshes.py b/pytorch3d/structures/meshes.py
index 445264c0..b5fc1baf 100644
--- a/pytorch3d/structures/meshes.py
+++ b/pytorch3d/structures/meshes.py
@@ -22,13 +22,13 @@ class Meshes(object):
- has specific batch dimension.
Packed
- no batch dimension.
- - has auxillary variables used to index into the padded representation.
+ - has auxiliary variables used to index into the padded representation.
Example:
Input list of verts V_n = [[V_1], [V_2], ... , [V_N]]
where V_1, ... , V_N are the number of verts in each mesh and N is the
- numer of meshes.
+ number of meshes.
Input list of faces F_n = [[F_1], [F_2], ... , [F_N]]
where F_1, ... , F_N are the number of faces in each mesh.
@@ -100,7 +100,7 @@ class Meshes(object):
| ]) |
-----------------------------------------------------------------------------
- Auxillary variables for packed representation
+ Auxiliary variables for packed representation
Name | Size | Example from above
-------------------------------|---------------------|-----------------------
@@ -139,7 +139,7 @@ class Meshes(object):
# SPHINX IGNORE
From the faces, edges are computed and have packed and padded
- representations with auxillary variables.
+ representations with auxiliary variables.
E_n = [[E_1], ... , [E_N]]
where E_1, ... , E_N are the number of unique edges in each mesh.
@@ -894,7 +894,7 @@ class Meshes(object):
def _compute_packed(self, refresh: bool = False):
"""
Computes the packed version of the meshes from verts_list and faces_list
- and sets the values of auxillary tensors.
+ and sets the values of auxiliary tensors.
Args:
refresh: Set to True to force recomputation of packed representations.
@@ -1022,7 +1022,7 @@ class Meshes(object):
# Remove duplicate edges: convert each edge (v0, v1) into an
# integer hash = V * v0 + v1; this allows us to use the scalar version of
# unique which is much faster than edges.unique(dim=1) which is very slow.
- # After finding the unique elements reconstruct the vertex indicies as:
+ # After finding the unique elements reconstruct the vertex indices as:
# (v0, v1) = (hash / V, hash % V)
# The inverse maps from unique_edges back to edges:
# unique_edges[inverse_idxs] == edges
diff --git a/pytorch3d/structures/pointclouds.py b/pytorch3d/structures/pointclouds.py
index e5194d24..929f6605 100644
--- a/pytorch3d/structures/pointclouds.py
+++ b/pytorch3d/structures/pointclouds.py
@@ -18,7 +18,7 @@ class Pointclouds(object):
- has specific batch dimension.
Packed
- no batch dimension.
- - has auxillary variables used to index into the padded representation.
+ - has auxiliary variables used to index into the padded representation.
Example
@@ -61,7 +61,7 @@ class Pointclouds(object):
| ]) |
-----------------------------------------------------------------------------
- Auxillary variables for packed representation
+ Auxiliary variables for packed representation
Name | Size | Example from above
-------------------------------|---------------------|-----------------------
@@ -265,7 +265,7 @@ class Pointclouds(object):
)
if d.device != self.device:
raise ValueError(
- "All auxillary inputs must be on the same device as the points."
+ "All auxiliary inputs must be on the same device as the points."
)
if p > 0:
if d.dim() != 2:
@@ -291,7 +291,7 @@ class Pointclouds(object):
)
if aux_input.device != self.device:
raise ValueError(
- "All auxillary inputs must be on the same device as the points."
+ "All auxiliary inputs must be on the same device as the points."
)
aux_input_C = aux_input.shape[2]
return None, aux_input, aux_input_C
@@ -508,7 +508,7 @@ class Pointclouds(object):
def padded_to_packed_idx(self):
"""
Return a 1D tensor x with length equal to the total number of points
- such that points_packed()[i] is element x[i] of the flattened padded
+ such that points_packed()[i] is element x[i] of the flattened padded
representation.
The packed representation can be calculated as follows.
@@ -573,7 +573,7 @@ class Pointclouds(object):
def _compute_packed(self, refresh: bool = False):
"""
Computes the packed version from points_list, normals_list and
- features_list and sets the values of auxillary tensors.
+ features_list and sets the values of auxiliary tensors.
Args:
refresh: Set to True to force recomputation of packed
@@ -910,7 +910,7 @@ class Pointclouds(object):
**neighborhood_size**: The size of the neighborhood used to estimate the
geometry around each point.
**disambiguate_directions**: If `True`, uses the algorithm from [1] to
- ensure sign consistency of the normals of neigboring points.
+ ensure sign consistency of the normals of neighboring points.
**normals**: A tensor of normals for each input point
of shape `(minibatch, num_point, 3)`.
If `pointclouds` are of `Pointclouds` class, returns a padded tensor.
@@ -985,7 +985,7 @@ class Pointclouds(object):
Args:
new_points_padded: FloatTensor of shape (N, P, 3)
new_normals_padded: (optional) FloatTensor of shape (N, P, 3)
- new_features_padded: (optional) FloatTensors of shape (N, P, C)
+ new_features_padded: (optional) FloatTensor of shape (N, P, C)
Returns:
Pointcloud with updated padded representations
diff --git a/pytorch3d/structures/volumes.py b/pytorch3d/structures/volumes.py
index 928c9fac..7e4e633c 100644
--- a/pytorch3d/structures/volumes.py
+++ b/pytorch3d/structures/volumes.py
@@ -77,7 +77,7 @@ class Volumes(object):
World coordinates:
- These define the locations of the centers of the volume cells
in the world coordinates.
- - They are specifiied with the following mapping that converts
+ - They are specified with the following mapping that converts
points `x_local` in the local coordinates to points `x_world`
in the world coordinates:
```
diff --git a/pytorch3d/transforms/rotation_conversions.py b/pytorch3d/transforms/rotation_conversions.py
index f2bfaa1b..b9050089 100644
--- a/pytorch3d/transforms/rotation_conversions.py
+++ b/pytorch3d/transforms/rotation_conversions.py
@@ -511,7 +511,7 @@ def quaternion_to_axis_angle(quaternions):
def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor:
"""
Converts 6D rotation representation by Zhou et al. [1] to rotation matrix
- using Gram--Schmidt orthogonalisation per Section B of [1].
+ using Gram--Schmidt orthogonalization per Section B of [1].
Args:
d6: 6D rotation representation, of size (*, 6)
diff --git a/pytorch3d/transforms/transform3d.py b/pytorch3d/transforms/transform3d.py
index 8cbb3e9d..c954e749 100644
--- a/pytorch3d/transforms/transform3d.py
+++ b/pytorch3d/transforms/transform3d.py
@@ -190,7 +190,7 @@ class Transform3d:
def compose(self, *others):
"""
- Return a new Transform3d with the tranforms to compose stored as
+ Return a new Transform3d with the transforms to compose stored as
an internal list.
Args:
@@ -254,7 +254,7 @@ class Transform3d:
independently without composing them.
Returns:
- A new Transform3D object contaning the inverse of the original
+ A new Transform3D object containing the inverse of the original
transformation.
"""
@@ -302,7 +302,7 @@ class Transform3d:
Args:
points: Tensor of shape (P, 3) or (N, P, 3)
eps: If eps!=None, the argument is used to clamp the
- last coordinate before peforming the final division.
+ last coordinate before performing the final division.
The clamping corresponds to:
last_coord := (last_coord.sign() + (last_coord==0)) *
torch.clamp(last_coord.abs(), eps),
@@ -681,7 +681,7 @@ def _broadcast_bmm(a, b):
b: torch tensor of shape (N, K, K)
Returns:
- a and b broadcast multipled. The output batch dimension is max(N, M).
+ a and b broadcast multiplied. The output batch dimension is max(N, M).
To broadcast transforms across a batch dimension if M != N then
expect that either M = 1 or N = 1. The tensor with batch dimension 1 is
diff --git a/pytorch3d/vis/plotly_vis.py b/pytorch3d/vis/plotly_vis.py
index 22e510e5..45d91c82 100644
--- a/pytorch3d/vis/plotly_vis.py
+++ b/pytorch3d/vis/plotly_vis.py
@@ -567,7 +567,7 @@ def _add_pointcloud_trace(
pointclouds: Pointclouds object to render. It can be batched.
trace_name: name to label the trace with.
subplot_idx: identifies the subplot, with 0 being the top left.
- ncols: the number of sublpots per row.
+ ncols: the number of subplots per row.
max_points_per_pointcloud: the number of points to render, which are randomly sampled.
marker_size: the size of the rendered points
"""
@@ -648,7 +648,7 @@ def _add_camera_trace(
cameras: the Cameras object to render. It can be batched.
trace_name: name to label the trace with.
subplot_idx: identifies the subplot, with 0 being the top left.
- ncols: the number of sublpots per row.
+ ncols: the number of subplots per row.
camera_scale: the size of the wireframe used to render the Cameras object.
"""
cam_wires = get_camera_wireframe(camera_scale).to(cameras.device)
diff --git a/tests/test_io_obj.py b/tests/test_io_obj.py
index f0e6a2d7..d5b331de 100644
--- a/tests/test_io_obj.py
+++ b/tests/test_io_obj.py
@@ -328,7 +328,9 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
with self.assertRaises(ValueError) as err:
load_obj(obj_file)
- self.assertTrue("Face vertices can ony have 3 properties" in str(err.exception))
+ self.assertTrue(
+ "Face vertices can only have 3 properties" in str(err.exception)
+ )
def test_load_obj_error_invalid_vertex_indices(self):
obj_file = "\n".join(
@@ -631,7 +633,7 @@ class TestMeshObjIO(TestCaseMixin, unittest.TestCase):
self.assertTrue(aux.normals is None)
self.assertTrue(aux.verts_uvs is None)
- def test_load_obj_mlt_no_image(self):
+ def test_load_obj_mtl_no_image(self):
obj_filename = "obj_mtl_no_image/model.obj"
filename = os.path.join(DATA_DIR, obj_filename)
R = 8
diff --git a/tests/test_io_ply.py b/tests/test_io_ply.py
index b60482fd..90efb1ff 100644
--- a/tests/test_io_ply.py
+++ b/tests/test_io_ply.py
@@ -317,7 +317,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
file.close()
self.assertLess(lengths[False], lengths[True], "ascii should be longer")
- def test_heterogenous_property(self):
+ def test_heterogeneous_property(self):
ply_file_ascii = "\n".join(
[
"ply",
@@ -670,7 +670,7 @@ class TestMeshPlyIO(TestCaseMixin, unittest.TestCase):
with self.assertRaisesRegex(ValueError, msg):
_load_ply_raw(StringIO("\n".join(lines2)))
- # Heterogenous cases
+ # Heterogeneous cases
lines2 = lines.copy()
lines2.insert(4, "property double y")
diff --git a/tests/test_points_alignment.py b/tests/test_points_alignment.py
index b4377319..31da12e9 100644
--- a/tests/test_points_alignment.py
+++ b/tests/test_points_alignment.py
@@ -155,7 +155,7 @@ class TestICP(TestCaseMixin, unittest.TestCase):
self.assertClose(s_init, s, atol=atol)
self.assertClose(Xt_init, Xt, atol=atol)
- def test_heterogenous_inputs(self, batch_size=10):
+ def test_heterogeneous_inputs(self, batch_size=10):
"""
Tests whether we get the same result when running ICP on
a set of randomly-sized Pointclouds and on their padded versions.
diff --git a/tests/test_points_to_volumes.py b/tests/test_points_to_volumes.py
index c72deeb9..e169f585 100644
--- a/tests/test_points_to_volumes.py
+++ b/tests/test_points_to_volumes.py
@@ -230,9 +230,9 @@ class TestPointsToVolumes(TestCaseMixin, unittest.TestCase):
def test_from_point_cloud(self, interp_mode="trilinear"):
"""
Generates a volume from a random point cloud sampled from faces
- of a 3D cube. Since each side of the cube is homogenously colored with
+ of a 3D cube. Since each side of the cube is homogeneously colored with
a different color, this should result in a volume with a
- predefined homogenous color of the cells along its borders
+ predefined homogeneous color of the cells along its borders
and black interior. The test is run for both cube and non-cube shaped
volumes.
"""
diff --git a/tests/test_rasterize_points.py b/tests/test_rasterize_points.py
index 77d2f84e..0ffb94fe 100644
--- a/tests/test_rasterize_points.py
+++ b/tests/test_rasterize_points.py
@@ -511,7 +511,7 @@ class TestRasterizePoints(TestCaseMixin, unittest.TestCase):
)
# Note that the order is only deterministic here for CUDA if all points
# fit in one chunk. This will the the case for this small example, but
- # to properly exercise coordianted writes among multiple chunks we need
+ # to properly exercise coordinated writes among multiple chunks we need
# to use a bigger test case.
bin_points_expected[0, 0, 1, :2] = torch.tensor([0, 3])
bin_points_expected[0, 1, 0, 0] = torch.tensor([2])
diff --git a/tests/test_rasterize_rectangle_images.py b/tests/test_rasterize_rectangle_images.py
index f732db29..4c9e4cae 100644
--- a/tests/test_rasterize_rectangle_images.py
+++ b/tests/test_rasterize_rectangle_images.py
@@ -62,7 +62,7 @@ verts0 = torch.tensor(
)
faces0 = torch.tensor([[1, 0, 2], [4, 3, 5]], dtype=torch.int64)
-# Points for a simple pointcloud. Get the vertices from a
+# Points for a simple point cloud. Get the vertices from a
# torus and apply rotations such that the points are no longer
# symmerical in X/Y.
torus_mesh = torus(r=0.25, R=1.0, sides=5, rings=2 * 5)
@@ -771,7 +771,7 @@ class TestRasterizeRectangleImagesPointclouds(TestCaseMixin, unittest.TestCase):
def test_render_pointcloud(self):
"""
- Test a textured poincloud is rendered correctly in a non square image.
+ Test a textured point cloud is rendered correctly in a non square image.
"""
device = torch.device("cuda:0")
pointclouds = Pointclouds(
diff --git a/tests/test_raymarching.py b/tests/test_raymarching.py
index 9b77b01b..93952349 100644
--- a/tests/test_raymarching.py
+++ b/tests/test_raymarching.py
@@ -16,7 +16,7 @@ class TestRaymarching(TestCaseMixin, unittest.TestCase):
n_rays=10, n_pts_per_ray=9, device="cuda", dtype=torch.float32
):
"""
- Generate a batch of ray points with features, densities, and z-coodinates
+ Generate a batch of ray points with features, densities, and z-coordinates
such that their EmissionAbsorption renderring results in
feature renders `features_gt`, depth renders `depths_gt`,
and opacity renders `opacities_gt`.
diff --git a/tests/test_render_meshes.py b/tests/test_render_meshes.py
index 8f8bd975..c585ecd1 100644
--- a/tests/test_render_meshes.py
+++ b/tests/test_render_meshes.py
@@ -1052,7 +1052,7 @@ class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
images[0, ...].sum().backward()
fragments = rasterizer(mesh, raster_settings=raster_settings)
- # Some of the bary coordinates are outisde the
+ # Some of the bary coordinates are outside the
# [0, 1] range as expected because the blur is > 0
self.assertTrue(fragments.bary_coords.ge(1.0).any())
self.assertIsNotNone(atlas.grad)
diff --git a/tests/test_render_volumes.py b/tests/test_render_volumes.py
index 5ff22fc7..0dac3e51 100644
--- a/tests/test_render_volumes.py
+++ b/tests/test_render_volumes.py
@@ -531,7 +531,7 @@ class TestRenderVolumes(TestCaseMixin, unittest.TestCase):
# get the EA raymarcher
raymarcher = EmissionAbsorptionRaymarcher()
- # intialize the renderer
+ # initialize the renderer
renderer = VolumeRenderer(
raysampler=raysampler,
raymarcher=raymarcher,
@@ -574,8 +574,8 @@ class TestRenderVolumes(TestCaseMixin, unittest.TestCase):
def test_rotating_cube_volume_render(self):
"""
Generates 4 renders of 4 sides of a volume representing a 3D cube.
- Since each side of the cube is homogenously colored with
- a different color, this should result in 4 images of homogenous color
+ Since each side of the cube is homogeneously colored with
+ a different color, this should result in 4 images of homogeneous color
with the depth of each pixel equal to a constant.
"""
diff --git a/tests/test_so3.py b/tests/test_so3.py
index 315e11da..b7958e4f 100644
--- a/tests/test_so3.py
+++ b/tests/test_so3.py
@@ -148,8 +148,8 @@ class TestSO3(TestCaseMixin, unittest.TestCase):
`so3_exponential_map(so3_log_map(so3_exponential_map(log_rot)))
== so3_exponential_map(log_rot)`
for a randomly generated batch of rotation matrix logarithms `log_rot`.
- Unlike `test_so3_log_to_exp_to_log`, this test allows to check the
- correctness of converting `log_rot` which contains values > math.pi.
+ Unlike `test_so3_log_to_exp_to_log`, this test checks the
+ correctness of converting a `log_rot` which contains values > math.pi.
"""
log_rot = 2.0 * TestSO3.init_log_rot(batch_size=batch_size)
# check also the singular cases where rot. angle = {0, pi, 2pi, 3pi}
diff --git a/website/pages/tutorials/index.js b/website/pages/tutorials/index.js
index 21a3a54c..bdd5dcd4 100644
--- a/website/pages/tutorials/index.js
+++ b/website/pages/tutorials/index.js
@@ -28,7 +28,7 @@ class TutorialHome extends React.Component {
Here you can learn about the structure and applications of
- Pytorch3D from examples which are in the form of ipython
+ PyTorch3D from examples which are in the form of ipython
notebooks.
Run interactively