mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-16 20:00:36 +08:00
support layerwise galore
This commit is contained in:
@@ -64,7 +64,7 @@ def run_ppo(
|
||||
)
|
||||
|
||||
# Create optimizer and scheduler
|
||||
optimizer = create_custom_optimzer(model, training_args, finetuning_args)
|
||||
optimizer = create_custom_optimzer(model, dataset, training_args, finetuning_args)
|
||||
if optimizer is None:
|
||||
optimizer = AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=training_args.learning_rate)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user