pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml
Jeremy Reizenstein a39cad40f4 LinearExponential LR
Summary: Linear followed by exponential LR progression. Needed for making Blender scenes converge.

Reviewed By: kjchalup

Differential Revision: D38557007

fbshipit-source-id: ad630dbc5b8fabcb33eeb5bdeed5e4f31360bac2
2022-08-09 18:18:46 -07:00

49 lines
1.6 KiB
YAML

defaults:
- repro_singleseq_base
- _self_
exp_dir: "./data/nerf_blender_repro/${oc.env:BLENDER_SINGLESEQ_CLASS}"
data_source_ImplicitronDataSource_args:
data_loader_map_provider_SequenceDataLoaderMapProvider_args:
dataset_length_train: 100
dataset_map_provider_class_type: BlenderDatasetMapProvider
dataset_map_provider_BlenderDatasetMapProvider_args:
base_dir: ${oc.env:BLENDER_DATASET_ROOT}
n_known_frames_for_test: null
object_name: ${oc.env:BLENDER_SINGLESEQ_CLASS}
path_manager_factory_class_type: PathManagerFactory
path_manager_factory_PathManagerFactory_args:
silence_logs: true
model_factory_ImplicitronModelFactory_args:
model_GenericModel_args:
mask_images: false
raysampler_class_type: NearFarRaySampler
raysampler_NearFarRaySampler_args:
n_rays_per_image_sampled_from_mask: 4096
min_depth: 2
max_depth: 6
renderer_MultiPassEmissionAbsorptionRenderer_args:
density_noise_std_train: 1.0
n_pts_per_ray_fine_training: 128
n_pts_per_ray_fine_evaluation: 128
raymarcher_EmissionAbsorptionRaymarcher_args:
blend_output: false
loss_weights:
loss_rgb_mse: 1.0
loss_prev_stage_rgb_mse: 1.0
loss_mask_bce: 0.0
loss_prev_stage_mask_bce: 0.0
loss_autodecoder_norm: 0.00
optimizer_factory_ImplicitronOptimizerFactory_args:
exponential_lr_step_size: 3001
lr_policy: LinearExponential
linear_exponential_lr_milestone: 200
training_loop_ImplicitronTrainingLoop_args:
max_epochs: 3201
metric_print_interval: 10
store_checkpoints_purge: 3
test_when_finished: true
validation_interval: 100