Former-commit-id: 59e0b4f616736ede37cc37a13346b547f5a2d4e7
This commit is contained in:
hiyouga 2024-06-26 19:43:16 +08:00
parent 2300fb616b
commit 654116c0b1
3 changed files with 13 additions and 5 deletions

View File

@ -36,15 +36,19 @@ def quantize_loftq(
lora_alpha: int = None, lora_alpha: int = None,
lora_rank: int = 16, lora_rank: int = 16,
lora_dropout: float = 0, lora_dropout: float = 0,
lora_target: str = "q_proj,v_proj", lora_target: tuple = ("q_proj", "v_proj"),
save_safetensors: bool = True, save_safetensors: bool = True,
): ):
r""" r"""
Initializes LoRA weights with LoRA-fine-tuning-aware Quantization (LoftQ) Initializes LoRA weights with LoRA-fine-tuning-aware Quantization (LoftQ)
Usage: python loftq_init.py --model_name_or_path path_to_model --output_dir output_dir Usage: python loftq_init.py --model_name_or_path path_to_model --output_dir output_dir
""" """
if isinstance(lora_target, str):
lora_target = [name.strip() for name in lora_target.split(",")]
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True, torch_dtype="auto") model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True, torch_dtype="auto")
loftq_config = LoftQConfig(loftq_bits=loftq_bits, loftq_iter=loftq_iter) loftq_config = LoftQConfig(loftq_bits=loftq_bits, loftq_iter=loftq_iter)
lora_config = LoraConfig( lora_config = LoraConfig(
task_type=TaskType.CAUSAL_LM, task_type=TaskType.CAUSAL_LM,
@ -52,7 +56,7 @@ def quantize_loftq(
r=lora_rank, r=lora_rank,
lora_alpha=lora_alpha if lora_alpha is not None else lora_rank * 2, lora_alpha=lora_alpha if lora_alpha is not None else lora_rank * 2,
lora_dropout=lora_dropout, lora_dropout=lora_dropout,
target_modules=[name.strip() for name in lora_target.split(",")], target_modules=lora_target,
init_lora_weights="loftq", init_lora_weights="loftq",
loftq_config=loftq_config, loftq_config=loftq_config,
) )

View File

@ -35,21 +35,25 @@ def quantize_pissa(
lora_alpha: int = None, lora_alpha: int = None,
lora_rank: int = 16, lora_rank: int = 16,
lora_dropout: float = 0, lora_dropout: float = 0,
lora_target: str = "q_proj,v_proj", lora_target: tuple = ("q_proj", "v_proj"),
save_safetensors: bool = True, save_safetensors: bool = True,
): ):
r""" r"""
Initializes LoRA weights with Principal Singular values and Singular vectors Adaptation (PiSSA) Initializes LoRA weights with Principal Singular values and Singular vectors Adaptation (PiSSA)
Usage: python pissa_init.py --model_name_or_path path_to_model --output_dir output_dir Usage: python pissa_init.py --model_name_or_path path_to_model --output_dir output_dir
""" """
if isinstance(lora_target, str):
lora_target = [name.strip() for name in lora_target.split(",")]
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True, torch_dtype="auto") model = AutoModelForCausalLM.from_pretrained(model_name_or_path, trust_remote_code=True, torch_dtype="auto")
lora_config = LoraConfig( lora_config = LoraConfig(
task_type=TaskType.CAUSAL_LM, task_type=TaskType.CAUSAL_LM,
r=lora_rank, r=lora_rank,
lora_alpha=lora_alpha if lora_alpha is not None else lora_rank * 2, lora_alpha=lora_alpha if lora_alpha is not None else lora_rank * 2,
lora_dropout=lora_dropout, lora_dropout=lora_dropout,
target_modules=[name.strip() for name in lora_target.split(",")], target_modules=lora_target,
init_lora_weights="pissa" if pissa_iter == -1 else "pissa_niter_{}".format(pissa_iter), init_lora_weights="pissa" if pissa_iter == -1 else "pissa_niter_{}".format(pissa_iter),
) )