diff --git a/README.md b/README.md index cdcb2046..36351a65 100644 --- a/README.md +++ b/README.md @@ -479,7 +479,9 @@ python src/export_model.py \ --adapter_name_or_path path_to_checkpoint \ --template default \ --finetuning_type lora \ - --export_dir path_to_export + --export_dir path_to_export \ + --export_size 2 \ + --export_legacy_format False ``` > [!WARNING] diff --git a/README_zh.md b/README_zh.md index 2adef5af..a5e13e3b 100644 --- a/README_zh.md +++ b/README_zh.md @@ -479,7 +479,9 @@ python src/export_model.py \ --adapter_name_or_path path_to_checkpoint \ --template default \ --finetuning_type lora \ - --export_dir path_to_export + --export_dir path_to_export \ + --export_size 2 \ + --export_legacy_format False ``` > [!WARNING] diff --git a/src/llmtuner/hparams/model_args.py b/src/llmtuner/hparams/model_args.py index 3f415bee..36ff1e3f 100644 --- a/src/llmtuner/hparams/model_args.py +++ b/src/llmtuner/hparams/model_args.py @@ -102,7 +102,7 @@ class ModelArguments: default=1024, metadata={"help": "The maximum length of the model inputs used for quantization."} ) - export_lecacy_format: Optional[bool] = field( + export_legacy_format: Optional[bool] = field( default=False, metadata={"help": "Whether or not to save the `.bin` files instead of `.safetensors`."} ) diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index 033c20f5..63813edd 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -50,7 +50,7 @@ def export_model(args: Optional[Dict[str, Any]] = None): model.save_pretrained( save_directory=model_args.export_dir, max_shard_size="{}GB".format(model_args.export_size), - safe_serialization=(not model_args.export_lecacy_format) + safe_serialization=(not model_args.export_legacy_format) ) try: