[model] support Internvl3_5 (#9028)

This commit is contained in:
Kingsley 2025-08-28 17:12:00 +08:00 committed by GitHub
parent 50780499ed
commit d6394546a9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 29 additions and 3 deletions

View File

@ -280,7 +280,7 @@ Choose your path:
| [Hunyuan](https://huggingface.co/tencent/) | 7B | hunyuan | | [Hunyuan](https://huggingface.co/tencent/) | 7B | hunyuan |
| [Index](https://huggingface.co/IndexTeam) | 1.9B | index | | [Index](https://huggingface.co/IndexTeam) | 1.9B | index |
| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 | | [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 |
| [InternVL 2.5-3](https://huggingface.co/OpenGVLab) | 1B/2B/8B/14B/38B/78B | intern_vl | | [InternVL 2.5-3.5](https://huggingface.co/OpenGVLab) | 1B/2B/4B/8B/14B/30B/38B/78B/241B | intern_vl |
| [InternLM/Intern-S1-mini](https://huggingface.co/internlm/) | 8B | intern_s1 | | [InternLM/Intern-S1-mini](https://huggingface.co/internlm/) | 8B | intern_s1 |
| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl | | [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl |
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - | | [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |

View File

@ -282,7 +282,7 @@ https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
| [Hunyuan](https://huggingface.co/tencent/) | 7B | hunyuan | | [Hunyuan](https://huggingface.co/tencent/) | 7B | hunyuan |
| [Index](https://huggingface.co/IndexTeam) | 1.9B | index | | [Index](https://huggingface.co/IndexTeam) | 1.9B | index |
| [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 | | [InternLM 2-3](https://huggingface.co/internlm) | 7B/8B/20B | intern2 |
| [InternVL 2.5-3](https://huggingface.co/OpenGVLab) | 1B/2B/8B/14B/38B/78B | intern_vl | | [InternVL 2.5-3.5](https://huggingface.co/OpenGVLab) | 1B/2B/4B/8B/14B/30B/38B/78B/241B | intern_vl |
| [InternLM/Intern-S1-mini](https://huggingface.co/internlm/) | 8B | intern_s1 | | [InternLM/Intern-S1-mini](https://huggingface.co/internlm/) | 8B | intern_s1 |
| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl | | [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl |
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - | | [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |

View File

@ -1260,6 +1260,18 @@ register_model_group(
DownloadSource.DEFAULT: "OpenGVLab/InternVL3-78B-hf", DownloadSource.DEFAULT: "OpenGVLab/InternVL3-78B-hf",
DownloadSource.MODELSCOPE: "OpenGVLab/InternVL3-78B-hf", DownloadSource.MODELSCOPE: "OpenGVLab/InternVL3-78B-hf",
}, },
"InternVL3_5-1B-hf": {
DownloadSource.DEFAULT: "llamafactory/InternVL3_5-1B-hf",
},
"InternVL3_5-2B-hf": {
DownloadSource.DEFAULT: "llamafactory/InternVL3_5-2B-hf",
},
"InternVL3_5-4B-hf": {
DownloadSource.DEFAULT: "llamafactory/InternVL3_5-4B-hf",
},
"InternVL3_5-8B-hf": {
DownloadSource.DEFAULT: "llamafactory/InternVL3_5-8B-hf",
},
}, },
template="intern_vl", template="intern_vl",
multimodal=True, multimodal=True,

View File

@ -39,6 +39,9 @@ def add_z3_leaf_module(model: "PreTrainedModel") -> None:
return return
model_type = getattr(model.config, "model_type", None) model_type = getattr(model.config, "model_type", None)
text_config = getattr(model.config, "text_config", None)
text_architectures = getattr(text_config, "architectures", None)
if model_type == "dbrx": if model_type == "dbrx":
from transformers.models.dbrx.modeling_dbrx import DbrxFFN from transformers.models.dbrx.modeling_dbrx import DbrxFFN
@ -102,7 +105,7 @@ def add_z3_leaf_module(model: "PreTrainedModel") -> None:
_set_z3_leaf_modules(model, [Qwen2MoeSparseMoeBlock]) _set_z3_leaf_modules(model, [Qwen2MoeSparseMoeBlock])
if model_type == "qwen3_moe": if model_type == "qwen3_moe" or text_architectures == "Qwen3MoeForCausalLM": # for internvl_3_5
from transformers.models.qwen3_moe.modeling_qwen3_moe import Qwen3MoeSparseMoeBlock from transformers.models.qwen3_moe.modeling_qwen3_moe import Qwen3MoeSparseMoeBlock
_set_z3_leaf_modules(model, [Qwen3MoeSparseMoeBlock]) _set_z3_leaf_modules(model, [Qwen3MoeSparseMoeBlock])
@ -113,6 +116,8 @@ def configure_moe(config: "PretrainedConfig", model_args: "ModelArguments", is_t
return return
model_type = getattr(config, "model_type", None) model_type = getattr(config, "model_type", None)
text_config = getattr(config, "text_config", None) # for multimodal model
if model_type in [ if model_type in [
"dbrx", "dbrx",
"granitemoe", "granitemoe",
@ -127,9 +132,18 @@ def configure_moe(config: "PretrainedConfig", model_args: "ModelArguments", is_t
]: ]:
setattr(config, "output_router_logits", True) setattr(config, "output_router_logits", True)
if text_config and getattr(text_config, "model_type", None) in [
"glm4v_moe_text", # glmv4_5
"qwen3_moe", # internvl_3_5
]:
setattr(text_config, "output_router_logits", True)
if model_type in ["granitemoe", "jamba", "llama4", "mixtral", "olmoe", "phimoe", "qwen2_moe", "qwen3_moe"]: if model_type in ["granitemoe", "jamba", "llama4", "mixtral", "olmoe", "phimoe", "qwen2_moe", "qwen3_moe"]:
setattr(config, "router_aux_loss_coef", model_args.moe_aux_loss_coef) setattr(config, "router_aux_loss_coef", model_args.moe_aux_loss_coef)
elif text_config and getattr(text_config, "model_type", None) in ["qwen3_moe"]:
setattr(text_config, "router_aux_loss_coef", model_args.moe_aux_loss_coef)
elif model_type == "deepseek": elif model_type == "deepseek":
setattr(config, "aux_loss_alpha", model_args.moe_aux_loss_coef) setattr(config, "aux_loss_alpha", model_args.moe_aux_loss_coef)