mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-08-02 03:42:50 +08:00
Summary: LLFF (and most/all non-synth datasets) will have no background/foreground distinction. Add support for data with no fg mask. Also, we had a bug in stats loading, like this: * Load stats * One of the stats has a history of length 0 * That's fine, e.g. maybe it's fg_error but the dataset has no notion of fg/bg. So leave it as len 0 * Check whether all the stats have the same history length as an arbitrarily chosen "reference-stat" * Ooops the reference-stat happened to be the stat with length 0 * assert (legit_stat_len == reference_stat_len (=0)) ---> failed assert Also some minor fixes (from Jeremy's other diff) to support LLFF Reviewed By: davnov134 Differential Revision: D38475272 fbshipit-source-id: 5b35ac86d1d5239759f537621f41a3aa4eb3bd68
48 lines
1.6 KiB
YAML
48 lines
1.6 KiB
YAML
defaults:
|
|
- repro_singleseq_base
|
|
- _self_
|
|
exp_dir: "./data/nerf_blender_repro/${oc.env:BLENDER_SINGLESEQ_CLASS}"
|
|
data_source_ImplicitronDataSource_args:
|
|
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
|
|
dataset_length_train: 100
|
|
dataset_map_provider_class_type: BlenderDatasetMapProvider
|
|
dataset_map_provider_BlenderDatasetMapProvider_args:
|
|
base_dir: ${oc.env:BLENDER_DATASET_ROOT}
|
|
n_known_frames_for_test: null
|
|
object_name: ${oc.env:BLENDER_SINGLESEQ_CLASS}
|
|
path_manager_factory_class_type: PathManagerFactory
|
|
path_manager_factory_PathManagerFactory_args:
|
|
silence_logs: true
|
|
|
|
model_factory_ImplicitronModelFactory_args:
|
|
model_GenericModel_args:
|
|
mask_images: false
|
|
raysampler_class_type: NearFarRaySampler
|
|
raysampler_NearFarRaySampler_args:
|
|
n_rays_per_image_sampled_from_mask: 4096
|
|
min_depth: 2
|
|
max_depth: 6
|
|
renderer_MultiPassEmissionAbsorptionRenderer_args:
|
|
density_noise_std_train: 1.0
|
|
n_pts_per_ray_fine_training: 128
|
|
n_pts_per_ray_fine_evaluation: 128
|
|
raymarcher_EmissionAbsorptionRaymarcher_args:
|
|
blend_output: false
|
|
loss_weights:
|
|
loss_rgb_mse: 1.0
|
|
loss_prev_stage_rgb_mse: 1.0
|
|
loss_mask_bce: 0.0
|
|
loss_prev_stage_mask_bce: 0.0
|
|
loss_autodecoder_norm: 0.00
|
|
|
|
optimizer_factory_ImplicitronOptimizerFactory_args:
|
|
exponential_lr_step_size: 2500
|
|
lr_policy: Exponential
|
|
|
|
training_loop_ImplicitronTrainingLoop_args:
|
|
max_epochs: 2000
|
|
metric_print_interval: 100
|
|
store_checkpoints_purge: 3
|
|
test_when_finished: true
|
|
validation_interval: 100
|