From f0766a2ab0b2ca302cd40c0cb233004b2a4282dd Mon Sep 17 00:00:00 2001 From: hiyouga Date: Fri, 10 Nov 2023 14:38:18 +0800 Subject: [PATCH] add todo Former-commit-id: 0bd884feb11736d0ab24ca19885151cb47d9dcd3 --- src/llmtuner/hparams/model_args.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llmtuner/hparams/model_args.py b/src/llmtuner/hparams/model_args.py index 62404d9e..4b17c272 100644 --- a/src/llmtuner/hparams/model_args.py +++ b/src/llmtuner/hparams/model_args.py @@ -54,11 +54,11 @@ class ModelArguments: default=False, metadata={"help": "Enable shift short attention (S^2-Attn) proposed by LongLoRA."} ) - reward_model: Optional[str] = field( + reward_model: Optional[str] = field( # TODO: move it to FinetuningArguments default=None, metadata={"help": "Path to the directory containing the checkpoints of the reward model."} ) - plot_loss: Optional[bool] = field( + plot_loss: Optional[bool] = field( # TODO: move it to FinetuningArguments default=False, metadata={"help": "Whether to plot the training loss after fine-tuning or not."} )