mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-16 11:50:35 +08:00
support layerwise galore
This commit is contained in:
@@ -44,7 +44,7 @@ def run_dpo(
|
||||
training_args.remove_unused_columns = False # important for pairwise dataset
|
||||
|
||||
# Initialize our Trainer
|
||||
optimizer = create_custom_optimzer(model, training_args, finetuning_args)
|
||||
optimizer = create_custom_optimzer(model, dataset, training_args, finetuning_args)
|
||||
trainer = CustomDPOTrainer(
|
||||
beta=finetuning_args.dpo_beta,
|
||||
loss_type=finetuning_args.dpo_loss,
|
||||
|
||||
Reference in New Issue
Block a user