mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-04 12:42:51 +08:00
update tips
Former-commit-id: 3551171d49f0f6aa5f745d80f71939408c9bb3a7
This commit is contained in:
parent
9a88387b91
commit
7db6fe4754
@ -36,6 +36,10 @@ def run_exp(args: Optional[Dict[str, Any]] = None, callbacks: Optional[List["Tra
|
|||||||
|
|
||||||
def export_model(args: Optional[Dict[str, Any]] = None):
|
def export_model(args: Optional[Dict[str, Any]] = None):
|
||||||
model_args, _, finetuning_args, _ = get_infer_args(args)
|
model_args, _, finetuning_args, _ = get_infer_args(args)
|
||||||
|
|
||||||
|
if model_args.adapter_name_or_path is not None and finetuning_args.export_quantization_bit is not None:
|
||||||
|
raise ValueError("Please merge adapters before quantizing the model.")
|
||||||
|
|
||||||
model, tokenizer = load_model_and_tokenizer(model_args, finetuning_args)
|
model, tokenizer = load_model_and_tokenizer(model_args, finetuning_args)
|
||||||
|
|
||||||
if getattr(model, "quantization_method", None) and model_args.adapter_name_or_path is not None:
|
if getattr(model, "quantization_method", None) and model_args.adapter_name_or_path is not None:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user