From 95a4589bbf8c05df47751e07865b0458617ded17 Mon Sep 17 00:00:00 2001 From: Erich Schubert Date: Tue, 9 Apr 2024 17:53:40 +0200 Subject: [PATCH 1/3] Pass additional_target to unsloth Fixes #3200 Former-commit-id: f8f87f5b0549cba6a011749c42064047f82ba577 --- src/llmtuner/model/adapter.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/llmtuner/model/adapter.py b/src/llmtuner/model/adapter.py index eb6d3878..861b8008 100644 --- a/src/llmtuner/model/adapter.py +++ b/src/llmtuner/model/adapter.py @@ -145,6 +145,8 @@ def init_adapter( from unsloth import FastLanguageModel # type: ignore unsloth_peft_kwargs = {"model": model, "max_seq_length": model_args.model_max_length} + if finetuning_args.additional_target: + unsloth_peft_kwargs["modules_to_save"] = finetuning_args.additional_target model = FastLanguageModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs) else: lora_config = LoraConfig( From e25ddef08c9ed71e044c65a0714a1f9382097cd4 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Wed, 10 Apr 2024 00:57:30 +0800 Subject: [PATCH 2/3] Update adapter.py Former-commit-id: a84b8d17dbf221259212e81931d80bcdd6284ad7 --- src/llmtuner/model/adapter.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/llmtuner/model/adapter.py b/src/llmtuner/model/adapter.py index 861b8008..bf206907 100644 --- a/src/llmtuner/model/adapter.py +++ b/src/llmtuner/model/adapter.py @@ -139,20 +139,18 @@ def init_adapter( "lora_alpha": finetuning_args.lora_alpha, "lora_dropout": finetuning_args.lora_dropout, "use_rslora": finetuning_args.use_rslora, + "modules_to_save": finetuning_args.additional_target, } if model_args.use_unsloth: from unsloth import FastLanguageModel # type: ignore - unsloth_peft_kwargs = {"model": model, "max_seq_length": model_args.model_max_length} - if finetuning_args.additional_target: - unsloth_peft_kwargs["modules_to_save"] = finetuning_args.additional_target + unsloth_peft_kwargs = {"model": model, "max_seq_length": model_args.model_max_length} model = FastLanguageModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs) else: lora_config = LoraConfig( task_type=TaskType.CAUSAL_LM, inference_mode=False, - modules_to_save=finetuning_args.additional_target, use_dora=finetuning_args.use_dora, **peft_kwargs, ) From 7856f98965793a2cb911214586bafcfe2615f99d Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Wed, 10 Apr 2024 00:57:51 +0800 Subject: [PATCH 3/3] Update adapter.py Former-commit-id: 720fde3683529ed7e08ac27c7c4598c6bdc30d44 --- src/llmtuner/model/adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llmtuner/model/adapter.py b/src/llmtuner/model/adapter.py index bf206907..bf6f9381 100644 --- a/src/llmtuner/model/adapter.py +++ b/src/llmtuner/model/adapter.py @@ -145,7 +145,7 @@ def init_adapter( if model_args.use_unsloth: from unsloth import FastLanguageModel # type: ignore - unsloth_peft_kwargs = {"model": model, "max_seq_length": model_args.model_max_length} + unsloth_peft_kwargs = {"model": model, "max_seq_length": model_args.model_max_length} model = FastLanguageModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs) else: lora_config = LoraConfig(