mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-10-14 23:58:11 +08:00
update tips
Former-commit-id: 4432cbda6b7535bcbb40ba77df069fca631b4be8
This commit is contained in:
parent
393e4b0f5a
commit
3a45cfb604
@ -36,6 +36,10 @@ def run_exp(args: Optional[Dict[str, Any]] = None, callbacks: Optional[List["Tra
|
||||
|
||||
def export_model(args: Optional[Dict[str, Any]] = None):
|
||||
model_args, _, finetuning_args, _ = get_infer_args(args)
|
||||
|
||||
if model_args.adapter_name_or_path is not None and finetuning_args.export_quantization_bit is not None:
|
||||
raise ValueError("Please merge adapters before quantizing the model.")
|
||||
|
||||
model, tokenizer = load_model_and_tokenizer(model_args, finetuning_args)
|
||||
|
||||
if getattr(model, "quantization_method", None) and model_args.adapter_name_or_path is not None:
|
||||
|
Loading…
x
Reference in New Issue
Block a user