try to past test

Former-commit-id: 7b4ba0efb658422fd29dca63bac1e9cee8e82af8
This commit is contained in:
BUAADreamer 2024-09-10 13:12:51 +08:00
parent 677d57b7c7
commit 16c7326bc5
4 changed files with 54 additions and 43 deletions

View File

@ -161,7 +161,7 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/
## Supported Models
| Model | Model size | Template |
|-------------------------------------------------------------------| -------------------------------- |------------------|
| ----------------------------------------------------------------- | -------------------------------- | --------- |
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |

View File

@ -379,6 +379,7 @@ class LlavaNextVideoPlugin(BasePlugin):
res.update(video_res)
return res
class PaliGemmaPlugin(BasePlugin):
@override
def process_messages(

View File

@ -120,9 +120,12 @@ def load_config(model_args: "ModelArguments") -> "PretrainedConfig":
"""
init_kwargs = _get_init_kwargs(model_args)
if "LLaVA-NeXT-Video" in model_args.model_name_or_path:
from transformers import PretrainedConfig, LlavaNextVideoConfig, CLIPVisionConfig, LlamaConfig
from transformers import CLIPVisionConfig, LlamaConfig, LlavaNextVideoConfig, PretrainedConfig
official_config = PretrainedConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs)
config = LlavaNextVideoConfig(CLIPVisionConfig(**official_config.vision_config), LlamaConfig(**official_config.text_config))
config = LlavaNextVideoConfig(
CLIPVisionConfig(**official_config.vision_config), LlamaConfig(**official_config.text_config)
)
setattr(config, "visual_inputs", True)
return config
return AutoConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs)
@ -163,6 +166,7 @@ def load_model(
load_class = AutoModelForCausalLM
if "llava_next_video" == getattr(config, "model_type"):
from transformers import LlavaNextVideoForConditionalGeneration
load_class = LlavaNextVideoForConditionalGeneration
if model_args.train_from_scratch:

View File

@ -108,7 +108,13 @@ def configure_visual_model(config: "PretrainedConfig") -> None:
Patches VLMs before loading them.
"""
model_type = getattr(config, "model_type", None)
if model_type in ["llava", "llava_next", "video_llava", "idefics2", "llava_next_video"]: # required for ds zero3 and valuehead models
if model_type in [
"llava",
"llava_next",
"video_llava",
"idefics2",
"llava_next_video",
]: # required for ds zero3 and valuehead models
setattr(config, "hidden_size", getattr(config.text_config, "hidden_size", None))
if getattr(config, "is_yi_vl_derived_model", None):