mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-24 23:02:49 +08:00
remove some unused params
Former-commit-id: 04315c3d92ecc25537e45d5807cb38bc290dcb16
This commit is contained in:
parent
045eb155a2
commit
9d9f8c6531
@ -10,7 +10,7 @@ if TYPE_CHECKING:
|
|||||||
from transformers import ProcessorMixin
|
from transformers import ProcessorMixin
|
||||||
from transformers.tokenization_utils import PreTrainedTokenizer
|
from transformers.tokenization_utils import PreTrainedTokenizer
|
||||||
|
|
||||||
from ...hparams import DataArguments, FinetuningArguments
|
from ...hparams import DataArguments
|
||||||
from ..template import Template
|
from ..template import Template
|
||||||
|
|
||||||
|
|
||||||
|
@ -239,7 +239,7 @@ def configure_packing(config: "PretrainedConfig") -> None:
|
|||||||
attn_implementation = getattr(config, "_attn_implementation", None)
|
attn_implementation = getattr(config, "_attn_implementation", None)
|
||||||
|
|
||||||
if attn_implementation != "flash_attention_2":
|
if attn_implementation != "flash_attention_2":
|
||||||
raise ValueError("Efficient packing only supports for flash_attention_2. Please set config `flash_attn` is fa2" + " " + attn_implementation)
|
raise ValueError("Efficient packing only supports for flash_attention_2. Please set config `flash_attn` is fa2")
|
||||||
|
|
||||||
logger = get_logger(__name__)
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ def run_kto(
|
|||||||
):
|
):
|
||||||
tokenizer_module = load_tokenizer(model_args)
|
tokenizer_module = load_tokenizer(model_args)
|
||||||
tokenizer = tokenizer_module["tokenizer"]
|
tokenizer = tokenizer_module["tokenizer"]
|
||||||
dataset = get_dataset(model_args, data_args, training_args, finetuning_args, stage="kto", **tokenizer_module)
|
dataset = get_dataset(model_args, data_args, training_args, stage="kto", **tokenizer_module)
|
||||||
model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train)
|
model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train)
|
||||||
|
|
||||||
data_collator = KTODataCollatorWithPadding(
|
data_collator = KTODataCollatorWithPadding(
|
||||||
|
Loading…
x
Reference in New Issue
Block a user