[misc] fix omni thinker load (#9552)

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
Kingsley
2025-11-30 09:36:36 +08:00
committed by GitHub
parent d1f585f80a
commit 22be45c78c

View File

@@ -127,7 +127,7 @@ def add_z3_leaf_module(model: "PreTrainedModel") -> None:
_set_z3_leaf_modules(model, [Qwen3VLMoeTextSparseMoeBlock])
if model_type == "qwen3_omni_moe":
if model_type in ("qwen3_omni_moe", "qwen3_omni_moe_thinker"):
from transformers.models.qwen3_omni_moe.modeling_qwen3_omni_moe import Qwen3OmniMoeThinkerTextSparseMoeBlock
_set_z3_leaf_modules(model, [Qwen3OmniMoeThinkerTextSparseMoeBlock])