LinearExponential LR

Summary: Linear followed by exponential LR progression. Needed for making Blender scenes converge.

Reviewed By: kjchalup

Differential Revision: D38557007

fbshipit-source-id: ad630dbc5b8fabcb33eeb5bdeed5e4f31360bac2
This commit is contained in:
Jeremy Reizenstein 2022-08-09 18:18:46 -07:00 committed by Facebook GitHub Bot
parent 65e5bb3ea1
commit a39cad40f4
3 changed files with 26 additions and 4 deletions

View File

@ -36,12 +36,13 @@ model_factory_ImplicitronModelFactory_args:
loss_autodecoder_norm: 0.00 loss_autodecoder_norm: 0.00
optimizer_factory_ImplicitronOptimizerFactory_args: optimizer_factory_ImplicitronOptimizerFactory_args:
exponential_lr_step_size: 2500 exponential_lr_step_size: 3001
lr_policy: Exponential lr_policy: LinearExponential
linear_exponential_lr_milestone: 200
training_loop_ImplicitronTrainingLoop_args: training_loop_ImplicitronTrainingLoop_args:
max_epochs: 2000 max_epochs: 3201
metric_print_interval: 100 metric_print_interval: 10
store_checkpoints_purge: 3 store_checkpoints_purge: 3
test_when_finished: true test_when_finished: true
validation_interval: 100 validation_interval: 100

View File

@ -72,6 +72,8 @@ class ImplicitronOptimizerFactory(OptimizerFactoryBase):
momentum: float = 0.9 momentum: float = 0.9
multistep_lr_milestones: tuple = () multistep_lr_milestones: tuple = ()
weight_decay: float = 0.0 weight_decay: float = 0.0
linear_exponential_lr_milestone: int = 200
linear_exponential_start_gamma: float = 0.1
def __post_init__(self): def __post_init__(self):
run_auto_creation(self) run_auto_creation(self)
@ -156,6 +158,23 @@ class ImplicitronOptimizerFactory(OptimizerFactoryBase):
lambda epoch: self.gamma ** (epoch / self.exponential_lr_step_size), lambda epoch: self.gamma ** (epoch / self.exponential_lr_step_size),
verbose=False, verbose=False,
) )
elif self.lr_policy.casefold() == "LinearExponential".casefold():
# linear learning rate progression between epochs 0 to
# self.linear_exponential_lr_milestone, followed by exponential
# lr decay for the rest of the epochs
def _get_lr(epoch: int):
m = self.linear_exponential_lr_milestone
if epoch < m:
w = (m - epoch) / m
gamma = w * self.linear_exponential_start_gamma + (1 - w)
else:
epoch_rest = epoch - m
gamma = self.gamma ** (epoch_rest / self.exponential_lr_step_size)
return gamma
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, _get_lr, verbose=False
)
else: else:
raise ValueError("no such lr policy %s" % self.lr_policy) raise ValueError("no such lr policy %s" % self.lr_policy)

View File

@ -402,6 +402,8 @@ optimizer_factory_ImplicitronOptimizerFactory_args:
momentum: 0.9 momentum: 0.9
multistep_lr_milestones: [] multistep_lr_milestones: []
weight_decay: 0.0 weight_decay: 0.0
linear_exponential_lr_milestone: 200
linear_exponential_start_gamma: 0.1
training_loop_ImplicitronTrainingLoop_args: training_loop_ImplicitronTrainingLoop_args:
eval_only: false eval_only: false
evaluator_class_type: ImplicitronEvaluator evaluator_class_type: ImplicitronEvaluator