mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-04 20:52:59 +08:00
Update adapter.py
Former-commit-id: 2111b586b648caa150a8e41877c7fede75911da8
This commit is contained in:
parent
3dccd3c67e
commit
e5b4cb62e0
@ -139,20 +139,18 @@ def init_adapter(
|
|||||||
"lora_alpha": finetuning_args.lora_alpha,
|
"lora_alpha": finetuning_args.lora_alpha,
|
||||||
"lora_dropout": finetuning_args.lora_dropout,
|
"lora_dropout": finetuning_args.lora_dropout,
|
||||||
"use_rslora": finetuning_args.use_rslora,
|
"use_rslora": finetuning_args.use_rslora,
|
||||||
|
"modules_to_save": finetuning_args.additional_target,
|
||||||
}
|
}
|
||||||
|
|
||||||
if model_args.use_unsloth:
|
if model_args.use_unsloth:
|
||||||
from unsloth import FastLanguageModel # type: ignore
|
from unsloth import FastLanguageModel # type: ignore
|
||||||
|
|
||||||
unsloth_peft_kwargs = {"model": model, "max_seq_length": model_args.model_max_length}
|
unsloth_peft_kwargs = {"model": model, "max_seq_length": model_args.model_max_length}
|
||||||
if finetuning_args.additional_target:
|
|
||||||
unsloth_peft_kwargs["modules_to_save"] = finetuning_args.additional_target
|
|
||||||
model = FastLanguageModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs)
|
model = FastLanguageModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs)
|
||||||
else:
|
else:
|
||||||
lora_config = LoraConfig(
|
lora_config = LoraConfig(
|
||||||
task_type=TaskType.CAUSAL_LM,
|
task_type=TaskType.CAUSAL_LM,
|
||||||
inference_mode=False,
|
inference_mode=False,
|
||||||
modules_to_save=finetuning_args.additional_target,
|
|
||||||
use_dora=finetuning_args.use_dora,
|
use_dora=finetuning_args.use_dora,
|
||||||
**peft_kwargs,
|
**peft_kwargs,
|
||||||
)
|
)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user