mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-16 20:00:36 +08:00
fix mixed mm inputs and rlhf-v
This commit is contained in:
@@ -21,6 +21,7 @@ from .processor_utils import infer_seqlen
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from PIL.Image import Image
|
||||
from transformers import PreTrainedTokenizer, ProcessorMixin
|
||||
|
||||
from ...hparams import DataArguments
|
||||
@@ -35,25 +36,30 @@ def _encode_unsupervised_example(
|
||||
response: Sequence[Dict[str, str]],
|
||||
system: Optional[str],
|
||||
tools: Optional[str],
|
||||
images: Sequence["Image"],
|
||||
template: "Template",
|
||||
tokenizer: "PreTrainedTokenizer",
|
||||
processor: Optional["ProcessorMixin"],
|
||||
cutoff_len: int,
|
||||
) -> Tuple[List[int], List[int]]:
|
||||
) -> Tuple[List[int], List[int], Dict[str, Any]]:
|
||||
if len(response) == 1:
|
||||
messages = prompt + response
|
||||
else:
|
||||
messages = prompt + [{"role": Role.ASSISTANT.value, "content": ""}]
|
||||
|
||||
messages = template.mm_plugin.process_messages(messages, images, processor)
|
||||
input_ids, labels = template.encode_oneturn(tokenizer, messages, system, tools)
|
||||
if template.efficient_eos:
|
||||
labels += [tokenizer.eos_token_id]
|
||||
|
||||
input_ids, _ = template.mm_plugin.process_token_ids(input_ids, None, tokenizer, processor)
|
||||
input_ids, _ = template.mm_plugin.process_token_ids(input_ids, None, images, tokenizer, processor)
|
||||
source_len, target_len = infer_seqlen(len(input_ids), len(labels), cutoff_len)
|
||||
input_ids = input_ids[:source_len]
|
||||
labels = labels[:target_len]
|
||||
return input_ids, labels
|
||||
extra_inputs = template.mm_plugin.get_mm_inputs(
|
||||
images=images, feature_seqlens={"token_type_ids": len(input_ids)}, processor=processor
|
||||
)
|
||||
return input_ids, labels, extra_inputs
|
||||
|
||||
|
||||
def preprocess_unsupervised_dataset(
|
||||
@@ -70,12 +76,12 @@ def preprocess_unsupervised_dataset(
|
||||
logger.warning("Dropped invalid example: {}".format(examples["prompt"][i] + examples["response"][i]))
|
||||
continue
|
||||
|
||||
prompt = template.mm_plugin.process_messages(examples["prompt"][i], examples["images"][i], processor)
|
||||
input_ids, labels = _encode_unsupervised_example(
|
||||
prompt=prompt,
|
||||
input_ids, labels, extra_inputs = _encode_unsupervised_example(
|
||||
prompt=examples["prompt"][i],
|
||||
response=examples["response"][i],
|
||||
system=examples["system"][i],
|
||||
tools=examples["tools"][i],
|
||||
images=examples["images"][i],
|
||||
template=template,
|
||||
tokenizer=tokenizer,
|
||||
processor=processor,
|
||||
@@ -84,12 +90,8 @@ def preprocess_unsupervised_dataset(
|
||||
model_inputs["input_ids"].append(input_ids)
|
||||
model_inputs["attention_mask"].append([1] * len(input_ids))
|
||||
model_inputs["labels"].append(labels)
|
||||
template.mm_plugin.process_model_inputs(
|
||||
model_inputs=model_inputs,
|
||||
images=examples["images"][i],
|
||||
feature_seqlens={"token_type_ids": len(input_ids)},
|
||||
processor=processor,
|
||||
)
|
||||
for key, value in extra_inputs.items():
|
||||
model_inputs[key].append(value)
|
||||
|
||||
return model_inputs
|
||||
|
||||
|
||||
Reference in New Issue
Block a user