mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-09-12 16:12:48 +08:00
202 lines
7.9 KiB
Python
202 lines
7.9 KiB
Python
import bisect
|
|
from collections import defaultdict
|
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple
|
|
|
|
from ...extras.constants import IGNORE_INDEX
|
|
from ...extras.logging import get_logger
|
|
from .mm_utils import get_paligemma_token_type_ids, get_pixel_values, greedy_knapsack
|
|
|
|
|
|
if TYPE_CHECKING:
|
|
from transformers import ProcessorMixin
|
|
from transformers.tokenization_utils import PreTrainedTokenizer
|
|
|
|
from ...hparams import DataArguments
|
|
from ..template import Template
|
|
|
|
|
|
logger = get_logger(__name__)
|
|
|
|
|
|
def search_for_fit(numbers: Sequence[int], capacity: int) -> int:
|
|
r"""
|
|
Finds the index of largest number that fits into the knapsack with the given capacity.
|
|
"""
|
|
index = bisect.bisect(numbers, capacity)
|
|
return -1 if index == 0 else (index - 1)
|
|
|
|
|
|
def greedy_knapsack(numbers: List[int], capacity: int) -> List[List[int]]:
|
|
r"""
|
|
An efficient greedy algorithm with binary search for the knapsack problem.
|
|
"""
|
|
numbers.sort() # sort numbers in ascending order for binary search
|
|
knapsacks = []
|
|
|
|
while numbers:
|
|
current_knapsack = []
|
|
remaining_capacity = capacity
|
|
|
|
while True:
|
|
index = search_for_fit(numbers, remaining_capacity)
|
|
if index == -1:
|
|
break # no more numbers fit in this knapsack
|
|
|
|
remaining_capacity -= numbers[index] # update the remaining capacity
|
|
current_knapsack.append(numbers.pop(index)) # add the number to knapsack
|
|
|
|
knapsacks.append(current_knapsack)
|
|
|
|
return knapsacks
|
|
|
|
|
|
def _encode_supervised_example(
|
|
prompt: Sequence[Dict[str, str]],
|
|
response: Sequence[Dict[str, str]],
|
|
system: Optional[str],
|
|
tools: Optional[str],
|
|
template: "Template",
|
|
tokenizer: "PreTrainedTokenizer",
|
|
processor: Optional["ProcessorMixin"],
|
|
data_args: "DataArguments",
|
|
) -> Tuple[List[int], List[int]]:
|
|
if processor is not None and not hasattr(processor, "image_seq_length"): # llava-like models
|
|
prompt[0]["content"] = template.image_token + prompt[0]["content"]
|
|
|
|
messages = prompt + response
|
|
input_ids, labels = [], []
|
|
|
|
if processor is not None and hasattr(processor, "image_seq_length"): # paligemma models
|
|
image_token_id = tokenizer.convert_tokens_to_ids(template.image_token)
|
|
input_ids += [image_token_id] * getattr(processor, "image_seq_length")
|
|
labels += [IGNORE_INDEX] * getattr(processor, "image_seq_length")
|
|
|
|
encoded_pairs = template.encode_multiturn(
|
|
tokenizer, messages, system, tools, data_args.cutoff_len, data_args.reserved_label_len
|
|
)
|
|
for turn_idx, (source_ids, target_ids) in enumerate(encoded_pairs):
|
|
if data_args.train_on_prompt:
|
|
source_mask = source_ids
|
|
elif turn_idx != 0 and template.efficient_eos:
|
|
source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (len(source_ids) - 1)
|
|
else:
|
|
source_mask = [IGNORE_INDEX] * len(source_ids)
|
|
|
|
input_ids += source_ids + target_ids
|
|
labels += source_mask + target_ids
|
|
|
|
if template.efficient_eos:
|
|
input_ids += [tokenizer.eos_token_id]
|
|
labels += [tokenizer.eos_token_id]
|
|
|
|
return input_ids, labels
|
|
|
|
|
|
def preprocess_supervised_dataset(
|
|
examples: Dict[str, List[Any]],
|
|
template: "Template",
|
|
tokenizer: "PreTrainedTokenizer",
|
|
processor: Optional["ProcessorMixin"],
|
|
data_args: "DataArguments",
|
|
) -> Dict[str, List[List[int]]]:
|
|
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
|
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
|
|
model_inputs = {"input_ids": [], "attention_mask": [], "labels": []}
|
|
if processor is not None:
|
|
model_inputs["pixel_values"] = []
|
|
if hasattr(processor, "image_seq_length"): # paligemma models
|
|
model_inputs["token_type_ids"] = []
|
|
|
|
for i in range(len(examples["prompt"])):
|
|
if len(examples["prompt"][i]) % 2 != 1 or len(examples["response"][i]) != 1:
|
|
logger.warning("Dropped invalid example: {}".format(examples["prompt"][i] + examples["response"][i]))
|
|
continue
|
|
|
|
input_ids, labels = _encode_supervised_example(
|
|
prompt=examples["prompt"][i],
|
|
response=examples["response"][i],
|
|
system=examples["system"][i],
|
|
tools=examples["tools"][i],
|
|
template=template,
|
|
tokenizer=tokenizer,
|
|
processor=processor,
|
|
data_args=data_args,
|
|
)
|
|
model_inputs["input_ids"].append(input_ids)
|
|
model_inputs["attention_mask"].append([1] * len(input_ids))
|
|
model_inputs["labels"].append(labels)
|
|
if processor is not None:
|
|
model_inputs["pixel_values"].append(get_pixel_values(examples["images"][i], processor))
|
|
if hasattr(processor, "image_seq_length"): # paligemma models
|
|
model_inputs["token_type_ids"].append(get_paligemma_token_type_ids(len(input_ids), processor))
|
|
|
|
return model_inputs
|
|
|
|
|
|
def preprocess_packed_supervised_dataset(
|
|
examples: Dict[str, List[Any]],
|
|
template: "Template",
|
|
tokenizer: "PreTrainedTokenizer",
|
|
data_args: "DataArguments",
|
|
) -> Dict[str, List[List[int]]]:
|
|
# build inputs with format `<bos> X1 Y1 <eos> <bos> X2 Y2 <eos>`
|
|
# and labels with format `<ignore> ... <ignore> Y1 <eos> <ignore> ... <ignore> Y2 <eos>`
|
|
valid_num = 0
|
|
batch_input_ids, batch_labels = [], []
|
|
lengths = []
|
|
length2indexes = defaultdict(list)
|
|
for i in range(len(examples["prompt"])):
|
|
if len(examples["prompt"][i]) % 2 != 1 or len(examples["response"][i]) != 1:
|
|
logger.warning("Dropped invalid example: {}".format(examples["prompt"][i] + examples["response"][i]))
|
|
continue
|
|
|
|
input_ids, labels = _encode_supervised_example(
|
|
prompt=examples["prompt"][i],
|
|
response=examples["response"][i],
|
|
system=examples["system"][i],
|
|
tools=examples["tools"][i],
|
|
template=template,
|
|
tokenizer=tokenizer,
|
|
processor=None,
|
|
data_args=data_args,
|
|
)
|
|
length = len(input_ids)
|
|
if length > data_args.cutoff_len:
|
|
logger.warning("Dropped lengthy example with length {} > {}.".format(length, data_args.cutoff_len))
|
|
else:
|
|
lengths.append(length)
|
|
length2indexes[length].append(valid_num)
|
|
batch_input_ids.append(input_ids)
|
|
batch_labels.append(labels)
|
|
valid_num += 1
|
|
|
|
model_inputs = {"input_ids": [], "attention_mask": [], "labels": []}
|
|
knapsacks = greedy_knapsack(lengths, data_args.cutoff_len)
|
|
for knapsack in knapsacks:
|
|
packed_input_ids, packed_labels = [], []
|
|
for length in knapsack:
|
|
index = length2indexes[length].pop()
|
|
packed_input_ids += batch_input_ids[index]
|
|
packed_labels += batch_labels[index]
|
|
|
|
if len(packed_input_ids) <= data_args.cutoff_len:
|
|
pad_length = data_args.cutoff_len - len(packed_input_ids)
|
|
packed_input_ids += [tokenizer.pad_token_id] * pad_length
|
|
packed_labels += [IGNORE_INDEX] * pad_length
|
|
else:
|
|
raise ValueError("The length of packed example exceeds the cutoff length.")
|
|
|
|
model_inputs["input_ids"].append(packed_input_ids)
|
|
model_inputs["attention_mask"].append([1] * len(packed_input_ids))
|
|
model_inputs["labels"].append(packed_labels)
|
|
|
|
return model_inputs
|
|
|
|
|
|
def print_supervised_dataset_example(example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer") -> None:
|
|
valid_labels = list(filter(lambda x: x != IGNORE_INDEX, example["labels"]))
|
|
print("input_ids:\n{}".format(example["input_ids"]))
|
|
print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
|
|
print("label_ids:\n{}".format(example["labels"]))
|
|
print("labels:\n{}".format(tokenizer.decode(valid_labels, skip_special_tokens=False)))
|