Former-commit-id: a0c31c68c4909637b86c90c319c321fd887c4910
This commit is contained in:
hiyouga 2023-11-10 14:38:18 +08:00
parent 0fbaa42752
commit 55e097aaac

View File

@ -54,11 +54,11 @@ class ModelArguments:
default=False, default=False,
metadata={"help": "Enable shift short attention (S^2-Attn) proposed by LongLoRA."} metadata={"help": "Enable shift short attention (S^2-Attn) proposed by LongLoRA."}
) )
reward_model: Optional[str] = field( reward_model: Optional[str] = field( # TODO: move it to FinetuningArguments
default=None, default=None,
metadata={"help": "Path to the directory containing the checkpoints of the reward model."} metadata={"help": "Path to the directory containing the checkpoints of the reward model."}
) )
plot_loss: Optional[bool] = field( plot_loss: Optional[bool] = field( # TODO: move it to FinetuningArguments
default=False, default=False,
metadata={"help": "Whether to plot the training loss after fine-tuning or not."} metadata={"help": "Whether to plot the training loss after fine-tuning or not."}
) )