mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-15 03:10:35 +08:00
add llava-next/llava-next-video/video-llava
Former-commit-id: 6642cd501d
This commit is contained in:
@@ -119,15 +119,6 @@ def load_config(model_args: "ModelArguments") -> "PretrainedConfig":
|
||||
Loads model config.
|
||||
"""
|
||||
init_kwargs = _get_init_kwargs(model_args)
|
||||
if "LLaVA-NeXT-Video" in model_args.model_name_or_path:
|
||||
from transformers import CLIPVisionConfig, LlamaConfig, LlavaNextVideoConfig, PretrainedConfig
|
||||
|
||||
official_config = PretrainedConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs)
|
||||
config = LlavaNextVideoConfig(
|
||||
CLIPVisionConfig(**official_config.vision_config), LlamaConfig(**official_config.text_config)
|
||||
)
|
||||
setattr(config, "visual_inputs", True)
|
||||
return config
|
||||
return AutoConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs)
|
||||
|
||||
|
||||
@@ -164,11 +155,6 @@ def load_model(
|
||||
load_class = AutoModelForVision2Seq
|
||||
else:
|
||||
load_class = AutoModelForCausalLM
|
||||
if "llava_next_video" == getattr(config, "model_type"):
|
||||
from transformers import LlavaNextVideoForConditionalGeneration
|
||||
|
||||
load_class = LlavaNextVideoForConditionalGeneration
|
||||
|
||||
if model_args.train_from_scratch:
|
||||
model = load_class.from_config(config)
|
||||
else:
|
||||
|
||||
@@ -92,7 +92,7 @@ def autocast_projector_dtype(model: "PreTrainedModel", model_args: "ModelArgumen
|
||||
|
||||
if getattr(model, "quantization_method", None):
|
||||
model_type = getattr(model.config, "model_type", None)
|
||||
if model_type in ["llava", "paligemma"]:
|
||||
if model_type in ["llava", "llava_next", "llava_next_video", "paligemma", "video_llava"]:
|
||||
mm_projector: "torch.nn.Module" = getattr(model, "multi_modal_projector")
|
||||
elif model_type == "qwen2_vl":
|
||||
mm_projector: "torch.nn.Module" = getattr(getattr(model, "visual"), "merger")
|
||||
@@ -111,9 +111,8 @@ def configure_visual_model(config: "PretrainedConfig") -> None:
|
||||
if model_type in [
|
||||
"llava",
|
||||
"llava_next",
|
||||
"video_llava",
|
||||
"idefics2",
|
||||
"llava_next_video",
|
||||
"video_llava",
|
||||
]: # required for ds zero3 and valuehead models
|
||||
setattr(config, "hidden_size", getattr(config.text_config, "hidden_size", None))
|
||||
|
||||
@@ -128,7 +127,7 @@ def get_forbidden_modules(config: "PretrainedConfig", finetuning_args: "Finetuni
|
||||
"""
|
||||
model_type = getattr(config, "model_type", None)
|
||||
forbidden_modules = set()
|
||||
if model_type in ["llava", "paligemma"]:
|
||||
if model_type in ["llava", "llava_next", "llava_next_video", "paligemma", "video_llava"]:
|
||||
if finetuning_args.freeze_vision_tower:
|
||||
forbidden_modules.add("vision_tower")
|
||||
|
||||
@@ -170,7 +169,7 @@ def patch_target_modules(
|
||||
"""
|
||||
model_type = getattr(config, "model_type", None)
|
||||
if finetuning_args.freeze_vision_tower:
|
||||
if model_type in ["llava", "paligemma"]:
|
||||
if model_type in ["llava", "llava_next", "llava_next_video", "paligemma", "video_llava"]:
|
||||
return "^(?!.*vision_tower).*(?:{}).*".format("|".join(target_modules))
|
||||
elif model_type == "qwen2_vl":
|
||||
return "^(?!.*visual).*(?:{}).*".format("|".join(target_modules))
|
||||
|
||||
Reference in New Issue
Block a user