From 4583c534f96c2eeb3c0cd13d803dfff507200fc6 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Mon, 13 May 2024 23:28:28 +0800 Subject: [PATCH] add yi-vl Former-commit-id: 64dac4085e3949f20ab66e507cfb199b09189ead --- src/llmtuner/data/template.py | 14 ++++++++++++++ src/llmtuner/model/patcher.py | 3 ++- src/llmtuner/model/utils/visual.py | 26 +++++++++++++++++++++++++- src/llmtuner/train/sft/trainer.py | 12 ++++++++++-- src/llmtuner/train/sft/workflow.py | 2 ++ 5 files changed, 53 insertions(+), 4 deletions(-) diff --git a/src/llmtuner/data/template.py b/src/llmtuner/data/template.py index ada6cfcd..7ab31147 100644 --- a/src/llmtuner/data/template.py +++ b/src/llmtuner/data/template.py @@ -856,6 +856,20 @@ _register_template( ) +_register_template( + name="yi-vl", + format_user=StringFormatter(slots=["### Human:\n{{content}}\n### Assistant: "]), + stop_words=["###"], + default_system=( + "This is a chat between an inquisitive human and an AI assistant. " + "Assume the role of the AI assistant. " + "Read all the images carefully, and respond to the human's questions with informative, helpful, detailed and polite answers." + "这是一个好奇的人类和一个人工智能助手之间的对话。" + "假设你扮演这个AI助手的角色。仔细阅读所有的图像,并对人类的问题做出信息丰富、有帮助、详细的和礼貌的回答。" + ), +) + + _register_template( name="yuan", format_user=StringFormatter(slots=["{{content}}", {"token": ""}]), diff --git a/src/llmtuner/model/patcher.py b/src/llmtuner/model/patcher.py index fd99bd3b..b7cad67c 100644 --- a/src/llmtuner/model/patcher.py +++ b/src/llmtuner/model/patcher.py @@ -16,7 +16,7 @@ from .utils.moe import add_z3_leaf_module, configure_moe from .utils.quantization import configure_quantization from .utils.rope import configure_rope from .utils.valuehead import prepare_valuehead_model -from .utils.visual import autocast_projector_dtype, configure_hidden_size +from .utils.visual import autocast_projector_dtype, configure_hidden_size, configure_visual if TYPE_CHECKING: @@ -50,6 +50,7 @@ def patch_config( configure_quantization(config, tokenizer, model_args, init_kwargs) configure_moe(config, model_args, is_trainable) configure_hidden_size(config) + configure_visual(config, model_args) if model_args.use_cache and not is_trainable: setattr(config, "use_cache", True) diff --git a/src/llmtuner/model/utils/visual.py b/src/llmtuner/model/utils/visual.py index b29a9ba5..d1556bb3 100644 --- a/src/llmtuner/model/utils/visual.py +++ b/src/llmtuner/model/utils/visual.py @@ -1,12 +1,14 @@ from typing import TYPE_CHECKING, Tuple import torch +import transformers +from torch import nn from ...extras.logging import get_logger if TYPE_CHECKING: - from transformers import PretrainedConfig, PreTrainedModel + from transformers import PretrainedConfig, PreTrainedModel, LlavaConfig from ...hparams import ModelArguments @@ -31,3 +33,25 @@ def autocast_projector_dtype( logger.info("Casting multimodal projector outputs in {}.".format(model_args.compute_dtype)) mm_projector: "torch.nn.Module" = getattr(model, mm_projector_name) mm_projector.register_forward_hook(_mm_projector_forward_post_hook) + + +class LlavaMultiModalProjectorYiVL(nn.Module): + def __init__(self, config: "LlavaConfig"): + super().__init__() + self.linear_1 = nn.Linear(config.vision_config.hidden_size, config.text_config.hidden_size, bias=True) + self.linear_2 = nn.LayerNorm(config.text_config.hidden_size, bias=True) + self.linear_3 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True) + self.linear_4 = nn.LayerNorm(config.text_config.hidden_size, bias=True) + self.act = nn.GELU() + self.proj = nn.Sequential(*[self.linear_1, self.linear_2, self.act, self.linear_3, self.linear_4]) + + def forward(self, image_features): + hidden_states = self.proj(image_features) + return hidden_states + + +def configure_visual(config: "PretrainedConfig", model_args: "ModelArguments") -> None: + logger = get_logger(__name__) + if model_args.visual_inputs and "Yi" in getattr(config.text_config, "_name_or_path", None): + transformers.models.llava.modeling_llava.LlavaMultiModalProjector = LlavaMultiModalProjectorYiVL + logger.info("Patched Multimodal Projector for Yi-VL.") diff --git a/src/llmtuner/train/sft/trainer.py b/src/llmtuner/train/sft/trainer.py index def427fd..1b456e50 100644 --- a/src/llmtuner/train/sft/trainer.py +++ b/src/llmtuner/train/sft/trainer.py @@ -5,7 +5,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import numpy as np import torch -from transformers import Seq2SeqTrainer +from transformers import Seq2SeqTrainer, ProcessorMixin from ...extras.constants import IGNORE_INDEX from ...extras.logging import get_logger @@ -26,9 +26,10 @@ class CustomSeq2SeqTrainer(Seq2SeqTrainer): Inherits Seq2SeqTrainer to compute generative metrics such as BLEU and ROUGE. """ - def __init__(self, finetuning_args: "FinetuningArguments", **kwargs) -> None: + def __init__(self, finetuning_args: "FinetuningArguments", processor: "ProcessorMixin", **kwargs) -> None: super().__init__(**kwargs) self.finetuning_args = finetuning_args + self.processor = processor if finetuning_args.use_badam: from badam import clip_grad_norm_for_sparse_tensor @@ -120,3 +121,10 @@ class CustomSeq2SeqTrainer(Seq2SeqTrainer): for label, pred in zip(decoded_labels, decoded_preds): res.append(json.dumps({"label": label, "predict": pred}, ensure_ascii=False)) writer.write("\n".join(res)) + + def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False): + super().save_model(output_dir, _internal_call) + if self.processor is not None: + if output_dir is None: + output_dir = self.args.output_dir + getattr(self.processor, "image_processor").save_pretrained(output_dir) \ No newline at end of file diff --git a/src/llmtuner/train/sft/workflow.py b/src/llmtuner/train/sft/workflow.py index 4a9775b4..3b7b909a 100644 --- a/src/llmtuner/train/sft/workflow.py +++ b/src/llmtuner/train/sft/workflow.py @@ -30,6 +30,7 @@ def run_sft( ): tokenizer_module = load_tokenizer(model_args) tokenizer = tokenizer_module["tokenizer"] + processor = tokenizer_module["processor"] dataset = get_dataset(model_args, data_args, training_args, stage="sft", **tokenizer_module) model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train) @@ -55,6 +56,7 @@ def run_sft( model=model, args=training_args, finetuning_args=finetuning_args, + processor=processor, tokenizer=tokenizer, data_collator=data_collator, callbacks=callbacks,