From 857696ed9c3b193ed1986f3c7cc708ec053c623c Mon Sep 17 00:00:00 2001 From: hiyouga Date: Tue, 7 Nov 2023 16:36:06 +0800 Subject: [PATCH] fix args Former-commit-id: 44d0fa2ac6a6423c7ddaf91eb8998c1b9248c04e --- src/llmtuner/tuner/dpo/workflow.py | 4 ++-- src/llmtuner/tuner/pt/workflow.py | 4 ++-- src/llmtuner/tuner/rm/workflow.py | 4 ++-- src/llmtuner/tuner/sft/workflow.py | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/llmtuner/tuner/dpo/workflow.py b/src/llmtuner/tuner/dpo/workflow.py index 63968604..c4acb331 100644 --- a/src/llmtuner/tuner/dpo/workflow.py +++ b/src/llmtuner/tuner/dpo/workflow.py @@ -60,9 +60,9 @@ def run_dpo( plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) if training_args.push_to_hub: - trainer.push_to_hub(**generate_model_card()) + trainer.push_to_hub(**generate_model_card(model_args, data_args, finetuning_args)) else: - trainer.create_model_card(**generate_model_card()) + trainer.create_model_card(**generate_model_card(model_args, data_args, finetuning_args)) # Evaluation if training_args.do_eval: diff --git a/src/llmtuner/tuner/pt/workflow.py b/src/llmtuner/tuner/pt/workflow.py index 002d2dd1..c7edff21 100644 --- a/src/llmtuner/tuner/pt/workflow.py +++ b/src/llmtuner/tuner/pt/workflow.py @@ -46,9 +46,9 @@ def run_pt( plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) if training_args.push_to_hub: - trainer.push_to_hub(**generate_model_card()) + trainer.push_to_hub(**generate_model_card(model_args, data_args, finetuning_args)) else: - trainer.create_model_card(**generate_model_card()) + trainer.create_model_card(**generate_model_card(model_args, data_args, finetuning_args)) # Evaluation if training_args.do_eval: diff --git a/src/llmtuner/tuner/rm/workflow.py b/src/llmtuner/tuner/rm/workflow.py index c95f1cb6..eedec5e7 100644 --- a/src/llmtuner/tuner/rm/workflow.py +++ b/src/llmtuner/tuner/rm/workflow.py @@ -54,9 +54,9 @@ def run_rm( plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) if training_args.push_to_hub: - trainer.push_to_hub(**generate_model_card()) + trainer.push_to_hub(**generate_model_card(model_args, data_args, finetuning_args)) else: - trainer.create_model_card(**generate_model_card()) + trainer.create_model_card(**generate_model_card(model_args, data_args, finetuning_args)) # Evaluation if training_args.do_eval: diff --git a/src/llmtuner/tuner/sft/workflow.py b/src/llmtuner/tuner/sft/workflow.py index dc22904b..04b37ac7 100644 --- a/src/llmtuner/tuner/sft/workflow.py +++ b/src/llmtuner/tuner/sft/workflow.py @@ -73,9 +73,9 @@ def run_sft( plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) if training_args.push_to_hub: - trainer.push_to_hub(**generate_model_card()) + trainer.push_to_hub(**generate_model_card(model_args, data_args, finetuning_args)) else: - trainer.create_model_card(**generate_model_card()) + trainer.create_model_card(**generate_model_card(model_args, data_args, finetuning_args)) # Evaluation if training_args.do_eval: