From 654116c0b1938097d6e8b73920e6077f53ccc6a6 Mon Sep 17 00:00:00 2001 From: hiyouga <467089858@qq.com> Date: Wed, 26 Jun 2024 19:43:16 +0800 Subject: [PATCH] fix #4556 Former-commit-id: 59e0b4f616736ede37cc37a13346b547f5a2d4e7 --- scripts/loftq_init.py | 8 ++++++-- scripts/pissa_init.py | 8 ++++++-- src/llamafactory/data/template.py | 2 +- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/scripts/loftq_init.py b/scripts/loftq_init.py index b9506fa3..4d2c01b9 100644 --- a/scripts/loftq_init.py +++ b/scripts/loftq_init.py @@ -36,15 +36,19 @@ def quantize_loftq( lora_alpha: int = None, lora_rank: int = 16, lora_dropout: float = 0, - lora_target: str = "q_proj,v_proj", + lora_target: tuple = ("q_proj", "v_proj"), save_safetensors: bool = True, ): r""" Initializes LoRA weights with LoRA-fine-tuning-aware Quantization (LoftQ) Usage: python loftq_init.py --model_name_or_path path_to_model --output_dir output_dir """ + if isinstance(lora_target, str): + lora_target = [name.strip() for name in lora_target.split(",")] + tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True, torch_dtype="auto") + loftq_config = LoftQConfig(loftq_bits=loftq_bits, loftq_iter=loftq_iter) lora_config = LoraConfig( task_type=TaskType.CAUSAL_LM, @@ -52,7 +56,7 @@ def quantize_loftq( r=lora_rank, lora_alpha=lora_alpha if lora_alpha is not None else lora_rank * 2, lora_dropout=lora_dropout, - target_modules=[name.strip() for name in lora_target.split(",")], + target_modules=lora_target, init_lora_weights="loftq", loftq_config=loftq_config, ) diff --git a/scripts/pissa_init.py b/scripts/pissa_init.py index 50239727..ad9d161c 100644 --- a/scripts/pissa_init.py +++ b/scripts/pissa_init.py @@ -35,21 +35,25 @@ def quantize_pissa( lora_alpha: int = None, lora_rank: int = 16, lora_dropout: float = 0, - lora_target: str = "q_proj,v_proj", + lora_target: tuple = ("q_proj", "v_proj"), save_safetensors: bool = True, ): r""" Initializes LoRA weights with Principal Singular values and Singular vectors Adaptation (PiSSA) Usage: python pissa_init.py --model_name_or_path path_to_model --output_dir output_dir """ + if isinstance(lora_target, str): + lora_target = [name.strip() for name in lora_target.split(",")] + tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True, torch_dtype="auto") + lora_config = LoraConfig( task_type=TaskType.CAUSAL_LM, r=lora_rank, lora_alpha=lora_alpha if lora_alpha is not None else lora_rank * 2, lora_dropout=lora_dropout, - target_modules=[name.strip() for name in lora_target.split(",")], + target_modules=lora_target, init_lora_weights="pissa" if pissa_iter == -1 else "pissa_niter_{}".format(pissa_iter), ) diff --git a/src/llamafactory/data/template.py b/src/llamafactory/data/template.py index 193ff482..53f16df4 100644 --- a/src/llamafactory/data/template.py +++ b/src/llamafactory/data/template.py @@ -618,7 +618,7 @@ _register_template( _register_template( name="default", - format_user=StringFormatter(slots=["Human: {{content}}\nAssistant: "]), + format_user=StringFormatter(slots=["Human: {{content}}\nAssistant:"]), format_system=StringFormatter(slots=["{{content}}\n"]), format_separator=EmptyFormatter(slots=["\n"]), )