diff --git a/README.md b/README.md index 1a7ee616..bc7b3c4b 100644 --- a/README.md +++ b/README.md @@ -368,8 +368,7 @@ python src/export_model.py \ --template default \ --finetuning_type lora \ --checkpoint_dir path_to_checkpoint \ - --export_dir path_to_export \ - --fp16 + --export_dir path_to_export ``` ### API Demo diff --git a/README_zh.md b/README_zh.md index 6a893084..c0722d5c 100644 --- a/README_zh.md +++ b/README_zh.md @@ -367,8 +367,7 @@ python src/export_model.py \ --template default \ --finetuning_type lora \ --checkpoint_dir path_to_checkpoint \ - --output_dir path_to_export \ - --fp16 + --export_dir path_to_export ``` ### API 服务 diff --git a/src/llmtuner/tuner/core/loader.py b/src/llmtuner/tuner/core/loader.py index 8f35183c..ef0e5d01 100644 --- a/src/llmtuner/tuner/core/loader.py +++ b/src/llmtuner/tuner/core/loader.py @@ -240,4 +240,7 @@ def load_model_and_tokenizer( trainable_params, all_param, 100 * trainable_params / all_param )) + if not is_trainable: + logger.info("This IS expected that the trainable params is 0 if you are using model for inference only.") + return model, tokenizer