From e5b4cb62e02a8abe870adc958f1da2b07074cd0e Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Wed, 10 Apr 2024 00:57:30 +0800 Subject: [PATCH] Update adapter.py Former-commit-id: 2111b586b648caa150a8e41877c7fede75911da8 --- src/llmtuner/model/adapter.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/llmtuner/model/adapter.py b/src/llmtuner/model/adapter.py index 861b8008..bf206907 100644 --- a/src/llmtuner/model/adapter.py +++ b/src/llmtuner/model/adapter.py @@ -139,20 +139,18 @@ def init_adapter( "lora_alpha": finetuning_args.lora_alpha, "lora_dropout": finetuning_args.lora_dropout, "use_rslora": finetuning_args.use_rslora, + "modules_to_save": finetuning_args.additional_target, } if model_args.use_unsloth: from unsloth import FastLanguageModel # type: ignore - unsloth_peft_kwargs = {"model": model, "max_seq_length": model_args.model_max_length} - if finetuning_args.additional_target: - unsloth_peft_kwargs["modules_to_save"] = finetuning_args.additional_target + unsloth_peft_kwargs = {"model": model, "max_seq_length": model_args.model_max_length} model = FastLanguageModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs) else: lora_config = LoraConfig( task_type=TaskType.CAUSAL_LM, inference_mode=False, - modules_to_save=finetuning_args.additional_target, use_dora=finetuning_args.use_dora, **peft_kwargs, )