fix flashattn + packing

This commit is contained in:
hiyouga
2024-07-21 17:07:45 +08:00
parent ad71296a7c
commit 4135e69406
4 changed files with 25 additions and 20 deletions

View File

@@ -37,7 +37,7 @@ def _encode_unsupervised_example(
template: "Template",
tokenizer: "PreTrainedTokenizer",
processor: Optional["ProcessorMixin"],
data_args: "DataArguments",
cutoff_len: int,
) -> Tuple[List[int], List[int]]:
if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models
prompt[0]["content"] = template.image_token + prompt[0]["content"]
@@ -55,7 +55,7 @@ def _encode_unsupervised_example(
image_token_id = tokenizer.convert_tokens_to_ids(template.image_token)
input_ids = [image_token_id] * getattr(processor, "image_seq_length") + input_ids
source_len, target_len = infer_seqlen(len(input_ids), len(labels), data_args.cutoff_len)
source_len, target_len = infer_seqlen(len(input_ids), len(labels), cutoff_len)
input_ids = input_ids[:source_len]
labels = labels[:target_len]
return input_ids, labels
@@ -88,7 +88,7 @@ def preprocess_unsupervised_dataset(
template=template,
tokenizer=tokenizer,
processor=processor,
data_args=data_args,
cutoff_len=data_args.cutoff_len,
)
model_inputs["input_ids"].append(input_ids)
model_inputs["attention_mask"].append([1] * len(input_ids))