more readthedocs

Summary: Quote formats, spelling

Reviewed By: shapovalov

Differential Revision: D40913734

fbshipit-source-id: d6dea65d5204b3c463c656a07ef9b447b7be6a0a
This commit is contained in:
Jeremy Reizenstein 2022-11-02 05:19:15 -07:00 committed by Facebook GitHub Bot
parent f7ac7b604a
commit c54e048666
12 changed files with 80 additions and 83 deletions

View File

@ -49,7 +49,7 @@ def iterate_directory(directory_path, dest):
toc = []
if not dest.exists():
dest.mkdir()
for file in directory_path.glob("*.py"):
for file in sorted(directory_path.glob("*.py")):
if file.stem.startswith("_"):
continue
module = paths_to_modules([file])
@ -121,7 +121,7 @@ basic_dataset = [
]
basic_dataset_modules = [f"pytorch3d.implicitron.dataset.{i}" for i in basic_dataset]
create_one_file(
"pytorch3d.implicitron.dataset",
"pytorch3d.implicitron.dataset in general",
"Basics of data for implicitron",
basic_dataset_modules,
DEST_DIR / "data_basics.rst",
@ -131,7 +131,7 @@ specific_dataset_files = [
i for i in dataset_files if i.stem.find("_dataset_map_provider") != -1
]
create_one_file(
"pytorch3d.impliciton.dataset",
"pytorch3d.implicitron.dataset specific datasets",
"specific datasets",
paths_to_modules(specific_dataset_files),
DEST_DIR / "datasets.rst",
@ -139,7 +139,7 @@ create_one_file(
evaluation_files = sorted(ROOT_DIR.glob("pytorch3d/implicitron/evaluation/*.py"))
create_one_file(
"pytorch3d.impliciton.evaluation",
"pytorch3d.implicitron.evaluation",
"evaluation",
paths_to_modules(evaluation_files),
DEST_DIR / "evaluation.rst",

View File

@ -1,5 +1,5 @@
pytorch3d.implicitron.dataset
=============================
pytorch3d.implicitron.dataset in general
========================================
Basics of data for implicitron

View File

@ -1,5 +1,5 @@
pytorch3d.impliciton.dataset
============================
pytorch3d.implicitron.dataset specific datasets
===============================================
specific datasets

View File

@ -1,5 +1,5 @@
pytorch3d.impliciton.evaluation
===============================
pytorch3d.implicitron.evaluation
================================
evaluation

View File

@ -4,10 +4,10 @@ pytorch3d.implicitron.models.implicit_function
.. toctree::
base
decoding_functions
idr_feature_field
neural_radiance_field
scene_representation_networks
utils
decoding_functions
voxel_grid
voxel_grid_implicit_function

View File

@ -57,8 +57,8 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
Generates the training, validation, and testing dataset objects for
a dataset laid out on disk like CO3Dv2, with annotations in gzipped json files.
The dataset is organized in the filesystem as follows:
```
The dataset is organized in the filesystem as follows::
self.dataset_root
<category_0>
<sequence_name_0>
@ -90,7 +90,6 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
<category_1>
...
<category_K>
```
The dataset contains sequences named `<sequence_name_i>` from `K` categories with
names `<category_j>`. Each category comprises sequence folders
@ -106,8 +105,8 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
the list of all frames and sequences of the given category stored as lists of
`FrameAnnotation` and `SequenceAnnotation` objects respectivelly.
Each `set_lists_<subset_name_l>.json` file contains the following dictionary:
```
Each `set_lists_<subset_name_l>.json` file contains the following dictionary::
{
"train": [
(sequence_name: str, frame_number: int, image_path: str),
@ -122,7 +121,7 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
...
],
]
```
defining the list of frames (identified with their `sequence_name` and `frame_number`)
in the "train", "val", and "test" subsets of the dataset.
Note that `frame_number` can be obtained only from `frame_annotations.jgz` and
@ -131,8 +130,8 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
have its frame number set to `20`, not 5).
Each `eval_batches_<subset_name_l>.json` file contains a list of evaluation examples
in the following form:
```
in the following form::
[
[ # batch 1
(sequence_name: str, frame_number: int, image_path: str),
@ -143,7 +142,7 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
...
],
]
```
Note that the evaluation examples always come from the `"test"` subset of the dataset.
(test frames can repeat across batches).
@ -341,14 +340,13 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
Returns:
category_to_subset_name_list: A dictionary containing subset names available
per category of the following form:
```
per category of the following form::
{
category_0: [category_0_subset_name_0, category_0_subset_name_1, ...],
category_1: [category_1_subset_name_0, category_1_subset_name_1, ...],
...
}
```
"""
category_to_subset_name_list_json = "category_to_subset_name_list.json"

View File

@ -554,8 +554,8 @@ def _get_flat_nvs_metric_key(result, metric_name) -> str:
def flatten_nvs_results(results):
"""
Takes input `results` list of dicts of the form:
```
Takes input `results` list of dicts of the form::
[
{
'subset':'train/test/...',
@ -564,12 +564,14 @@ def flatten_nvs_results(results):
},
...
]
```
And converts to a flat dict as follows:
{
'subset=train/test/...|subsubset=src=1/src=2/...': nvs_eval_metrics,
...
}
And converts to a flat dict as follows::
{
'subset=train/test/...|subsubset=src=1/src=2/...': nvs_eval_metrics,
...
}
"""
results_flat = {}
for result in results:

View File

@ -29,21 +29,21 @@ class MultiPassEmissionAbsorptionRenderer( # pyre-ignore: 13
During each ray marching pass, features, depth map, and masks
are integrated: Let o_i be the opacity estimated by the implicit function,
and d_i be the offset between points `i` and `i+1` along the respective ray.
Ray marching is performed using the following equations:
```
ray_opacity_n = cap_fn(sum_i=1^n cap_fn(d_i * o_i)),
weight_n = weight_fn(cap_fn(d_i * o_i), 1 - ray_opacity_{n-1}),
```
Ray marching is performed using the following equations::
ray_opacity_n = cap_fn(sum_i=1^n cap_fn(d_i * o_i)),
weight_n = weight_fn(cap_fn(d_i * o_i), 1 - ray_opacity_{n-1}),
and the final rendered quantities are computed by a dot-product of ray values
with the weights, e.g. `features = sum_n(weight_n * ray_features_n)`.
By default, for the EA raymarcher from [1] (
activated with `self.raymarcher_class_type="EmissionAbsorptionRaymarcher"`
):
```
)::
cap_fn(x) = 1 - exp(-x),
weight_fn(x) = w * x.
```
Note that the latter can altered by changing `self.raymarcher_class_type`,
e.g. to "CumsumRaymarcher" which implements the cumulative-sum raymarcher
from NeuralVolumes [2].

View File

@ -250,12 +250,11 @@ class AngleWeightedReductionFeatureAggregator(torch.nn.Module, FeatureAggregator
Performs a weighted aggregation using a set of predefined `reduction_functions`
and concatenates the results of each aggregation function along the
channel dimension. The weights are proportional to the cosine of the
angle between the target ray and the source ray:
```
weight = (
dot(target_ray, source_ray) * 0.5 + 0.5 + self.min_ray_angle_weight
)**self.weight_by_ray_angle_gamma
```
angle between the target ray and the source ray::
weight = (
dot(target_ray, source_ray) * 0.5 + 0.5 + self.min_ray_angle_weight
)**self.weight_by_ray_angle_gamma
The reduction functions singularize the second dimension
of the sampled features which stacks the source views.
@ -359,12 +358,11 @@ class AngleWeightedIdentityFeatureAggregator(torch.nn.Module, FeatureAggregatorB
"""
This aggregator does not perform any feature aggregation. It only weights
the features by the weights proportional to the cosine of the
angle between the target ray and the source ray:
```
weight = (
dot(target_ray, source_ray) * 0.5 + 0.5 + self.min_ray_angle_weight
)**self.weight_by_ray_angle_gamma
```
angle between the target ray and the source ray::
weight = (
dot(target_ray, source_ray) * 0.5 + 0.5 + self.min_ray_angle_weight
)**self.weight_by_ray_angle_gamma
Settings:
min_ray_angle_weight: The minimum possible aggregation weight

View File

@ -218,8 +218,8 @@ def cameras_points_cartesian_product(
) -> Tuple[CamerasBase, torch.Tensor]:
"""
Generates all pairs of pairs of elements from 'camera' and 'pts' and returns
`camera_rep` and `pts_rep` such that:
```
`camera_rep` and `pts_rep` such that::
camera_rep = [ pts_rep = [
camera[0] pts[0],
camera[0] pts[1],
@ -235,15 +235,14 @@ def cameras_points_cartesian_product(
camera[n_cameras-1] ...,
... pts[batch_pts-1],
] ]
```
Args:
camera: A batch of `n_cameras` cameras.
pts: A batch of `batch_pts` points of shape `(batch_pts, ..., dim)`
Returns:
camera_rep: A batch of batch_pts*n_cameras cameras such that:
```
camera_rep: A batch of batch_pts*n_cameras cameras such that::
camera_rep = [
camera[0]
camera[0]
@ -258,11 +257,11 @@ def cameras_points_cartesian_product(
camera[n_cameras-1]
camera[n_cameras-1]
]
```
pts_rep: Repeated `pts` of shape `(batch_pts*n_cameras, ..., dim)`,
such that:
```
such that::
pts_rep = [
pts[0],
pts[1],
@ -278,7 +277,7 @@ def cameras_points_cartesian_product(
...,
pts[batch_pts-1],
]
```
"""
n_cameras = camera.R.shape[0]
batch_pts = pts.shape[0]

View File

@ -73,29 +73,29 @@ class Stats(object):
# TODO: update this with context manager
"""
stats logging object useful for gathering statistics of training a deep net in pytorch
Example:
```
# init stats structure that logs statistics 'objective' and 'top1e'
stats = Stats( ('objective','top1e') )
network = init_net() # init a pytorch module (=nueral network)
dataloader = init_dataloader() # init a dataloader
for epoch in range(10):
# start of epoch -> call new_epoch
stats.new_epoch()
Example::
# iterate over batches
for batch in dataloader:
# init stats structure that logs statistics 'objective' and 'top1e'
stats = Stats( ('objective','top1e') )
network = init_net() # init a pytorch module (=nueral network)
dataloader = init_dataloader() # init a dataloader
for epoch in range(10):
# start of epoch -> call new_epoch
stats.new_epoch()
output = network(batch) # run and save into a dict of output variables "output"
# iterate over batches
for batch in dataloader:
output = network(batch) # run and save into a dict of output variables
# stats.update() automatically parses the 'objective' and 'top1e' from
# the "output" dict and stores this into the db
stats.update(output)
stats.print() # prints the averages over given epoch
# stores the training plots into '/tmp/epoch_stats.pdf'
# and plots into a visdom server running at localhost (if running)
stats.plot_stats(plot_file='/tmp/epoch_stats.pdf')
# stats.update() automatically parses the 'objective' and 'top1e' from
# the "output" dict and stores this into the db
stats.update(output)
stats.print() # prints the averages over given epoch
# stores the training plots into '/tmp/epoch_stats.pdf'
# and plots into a visdom server running at localhost (if running)
stats.plot_stats(plot_file='/tmp/epoch_stats.pdf')
```
"""
def __init__(

View File

@ -181,11 +181,11 @@ class Timer:
"""
A simple class for timing execution.
Example:
```
Example::
with Timer():
print("This print statement is timed.")
```
"""
def __init__(self, name="timer", quiet=False):