diff --git a/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml b/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml index ab77215a..6a3729f8 100644 --- a/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml +++ b/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml @@ -36,12 +36,13 @@ model_factory_ImplicitronModelFactory_args: loss_autodecoder_norm: 0.00 optimizer_factory_ImplicitronOptimizerFactory_args: - exponential_lr_step_size: 2500 - lr_policy: Exponential + exponential_lr_step_size: 3001 + lr_policy: LinearExponential + linear_exponential_lr_milestone: 200 training_loop_ImplicitronTrainingLoop_args: - max_epochs: 2000 - metric_print_interval: 100 + max_epochs: 3201 + metric_print_interval: 10 store_checkpoints_purge: 3 test_when_finished: true validation_interval: 100 diff --git a/projects/implicitron_trainer/impl/optimizer_factory.py b/projects/implicitron_trainer/impl/optimizer_factory.py index 88547481..184adb92 100644 --- a/projects/implicitron_trainer/impl/optimizer_factory.py +++ b/projects/implicitron_trainer/impl/optimizer_factory.py @@ -72,6 +72,8 @@ class ImplicitronOptimizerFactory(OptimizerFactoryBase): momentum: float = 0.9 multistep_lr_milestones: tuple = () weight_decay: float = 0.0 + linear_exponential_lr_milestone: int = 200 + linear_exponential_start_gamma: float = 0.1 def __post_init__(self): run_auto_creation(self) @@ -156,6 +158,23 @@ class ImplicitronOptimizerFactory(OptimizerFactoryBase): lambda epoch: self.gamma ** (epoch / self.exponential_lr_step_size), verbose=False, ) + elif self.lr_policy.casefold() == "LinearExponential".casefold(): + # linear learning rate progression between epochs 0 to + # self.linear_exponential_lr_milestone, followed by exponential + # lr decay for the rest of the epochs + def _get_lr(epoch: int): + m = self.linear_exponential_lr_milestone + if epoch < m: + w = (m - epoch) / m + gamma = w * self.linear_exponential_start_gamma + (1 - w) + else: + epoch_rest = epoch - m + gamma = self.gamma ** (epoch_rest / self.exponential_lr_step_size) + return gamma + + scheduler = torch.optim.lr_scheduler.LambdaLR( + optimizer, _get_lr, verbose=False + ) else: raise ValueError("no such lr policy %s" % self.lr_policy) diff --git a/projects/implicitron_trainer/tests/experiment.yaml b/projects/implicitron_trainer/tests/experiment.yaml index e9b57ba1..f70a3c23 100644 --- a/projects/implicitron_trainer/tests/experiment.yaml +++ b/projects/implicitron_trainer/tests/experiment.yaml @@ -402,6 +402,8 @@ optimizer_factory_ImplicitronOptimizerFactory_args: momentum: 0.9 multistep_lr_milestones: [] weight_decay: 0.0 + linear_exponential_lr_milestone: 200 + linear_exponential_start_gamma: 0.1 training_loop_ImplicitronTrainingLoop_args: eval_only: false evaluator_class_type: ImplicitronEvaluator