Update adapter.py

Former-commit-id: 2111b586b648caa150a8e41877c7fede75911da8
This commit is contained in:
hoshi-hiyouga 2024-04-10 00:57:30 +08:00 committed by GitHub
parent 3dccd3c67e
commit e5b4cb62e0

View File

@ -139,20 +139,18 @@ def init_adapter(
"lora_alpha": finetuning_args.lora_alpha,
"lora_dropout": finetuning_args.lora_dropout,
"use_rslora": finetuning_args.use_rslora,
"modules_to_save": finetuning_args.additional_target,
}
if model_args.use_unsloth:
from unsloth import FastLanguageModel # type: ignore
unsloth_peft_kwargs = {"model": model, "max_seq_length": model_args.model_max_length}
if finetuning_args.additional_target:
unsloth_peft_kwargs["modules_to_save"] = finetuning_args.additional_target
unsloth_peft_kwargs = {"model": model, "max_seq_length": model_args.model_max_length}
model = FastLanguageModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs)
else:
lora_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
inference_mode=False,
modules_to_save=finetuning_args.additional_target,
use_dora=finetuning_args.use_dora,
**peft_kwargs,
)