From 9d9f8c6531fdc581d9ac8c26047597341bbb2d01 Mon Sep 17 00:00:00 2001 From: ancv Date: Sat, 15 Jun 2024 23:00:55 +0700 Subject: [PATCH] remove some unused params Former-commit-id: 04315c3d92ecc25537e45d5807cb38bc290dcb16 --- src/llamafactory/data/processors/supervised.py | 2 +- src/llamafactory/model/model_utils/packing.py | 2 +- src/llamafactory/train/kto/workflow.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/llamafactory/data/processors/supervised.py b/src/llamafactory/data/processors/supervised.py index 3406576b..35640174 100644 --- a/src/llamafactory/data/processors/supervised.py +++ b/src/llamafactory/data/processors/supervised.py @@ -10,7 +10,7 @@ if TYPE_CHECKING: from transformers import ProcessorMixin from transformers.tokenization_utils import PreTrainedTokenizer - from ...hparams import DataArguments, FinetuningArguments + from ...hparams import DataArguments from ..template import Template diff --git a/src/llamafactory/model/model_utils/packing.py b/src/llamafactory/model/model_utils/packing.py index fe718ebb..9b7359be 100644 --- a/src/llamafactory/model/model_utils/packing.py +++ b/src/llamafactory/model/model_utils/packing.py @@ -239,7 +239,7 @@ def configure_packing(config: "PretrainedConfig") -> None: attn_implementation = getattr(config, "_attn_implementation", None) if attn_implementation != "flash_attention_2": - raise ValueError("Efficient packing only supports for flash_attention_2. Please set config `flash_attn` is fa2" + " " + attn_implementation) + raise ValueError("Efficient packing only supports for flash_attention_2. Please set config `flash_attn` is fa2") logger = get_logger(__name__) diff --git a/src/llamafactory/train/kto/workflow.py b/src/llamafactory/train/kto/workflow.py index f003e157..c79b160b 100644 --- a/src/llamafactory/train/kto/workflow.py +++ b/src/llamafactory/train/kto/workflow.py @@ -24,7 +24,7 @@ def run_kto( ): tokenizer_module = load_tokenizer(model_args) tokenizer = tokenizer_module["tokenizer"] - dataset = get_dataset(model_args, data_args, training_args, finetuning_args, stage="kto", **tokenizer_module) + dataset = get_dataset(model_args, data_args, training_args, stage="kto", **tokenizer_module) model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train) data_collator = KTODataCollatorWithPadding(