From 67f7034a212cdc7611457a4dc27f6edafc905d48 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Thu, 21 Dec 2023 17:33:01 +0800 Subject: [PATCH] fix param type Former-commit-id: 11b99f344416ade1cdac52e11ba7f36fcf689221 --- src/llmtuner/hparams/finetuning_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llmtuner/hparams/finetuning_args.py b/src/llmtuner/hparams/finetuning_args.py index 7af896fa..1730a6b9 100644 --- a/src/llmtuner/hparams/finetuning_args.py +++ b/src/llmtuner/hparams/finetuning_args.py @@ -157,7 +157,7 @@ class ExportArguments: default=128, metadata={"help": "The number of samples used for quantization."} ) - export_quantization_maxlen: Optional[str] = field( + export_quantization_maxlen: Optional[int] = field( default=1024, metadata={"help": "The maximum length of the model inputs used for quantization."} )