From ce79528bb1e589812cf21f6f67682ef24101efa4 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Thu, 21 Dec 2023 17:33:01 +0800 Subject: [PATCH] fix param type Former-commit-id: ba69378841778410f8004385df3fd4c41e5fa573 --- src/llmtuner/hparams/finetuning_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llmtuner/hparams/finetuning_args.py b/src/llmtuner/hparams/finetuning_args.py index 7af896fa..1730a6b9 100644 --- a/src/llmtuner/hparams/finetuning_args.py +++ b/src/llmtuner/hparams/finetuning_args.py @@ -157,7 +157,7 @@ class ExportArguments: default=128, metadata={"help": "The number of samples used for quantization."} ) - export_quantization_maxlen: Optional[str] = field( + export_quantization_maxlen: Optional[int] = field( default=1024, metadata={"help": "The maximum length of the model inputs used for quantization."} )