From 3a45cfb6040a4533bad4bfd36ca88a1521b5ec41 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Fri, 15 Dec 2023 23:52:50 +0800 Subject: [PATCH] update tips Former-commit-id: 4432cbda6b7535bcbb40ba77df069fca631b4be8 --- src/llmtuner/train/tuner.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index 984a28d0..66b53877 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -36,6 +36,10 @@ def run_exp(args: Optional[Dict[str, Any]] = None, callbacks: Optional[List["Tra def export_model(args: Optional[Dict[str, Any]] = None): model_args, _, finetuning_args, _ = get_infer_args(args) + + if model_args.adapter_name_or_path is not None and finetuning_args.export_quantization_bit is not None: + raise ValueError("Please merge adapters before quantizing the model.") + model, tokenizer = load_model_and_tokenizer(model_args, finetuning_args) if getattr(model, "quantization_method", None) and model_args.adapter_name_or_path is not None: