From 5204cd2bca93b937ad888b68ec3ea76208f5e647 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B5=AE=E6=A2=A6?= <46097299+frozenleaves@users.noreply.github.com> Date: Fri, 19 Dec 2025 14:57:37 +0800 Subject: [PATCH] [misc] add version check for moe (#9633) --- src/llamafactory/model/patcher.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/llamafactory/model/patcher.py b/src/llamafactory/model/patcher.py index ca93fad59..1bf2c1320 100644 --- a/src/llamafactory/model/patcher.py +++ b/src/llamafactory/model/patcher.py @@ -51,9 +51,13 @@ logger = logging.get_logger(__name__) def patch_qwen3_omni_moe_thinker_text_sparse_moe_block(): - if is_transformers_version_greater_than("4.57.0"): + if is_transformers_version_greater_than("4.57.0") and not is_transformers_version_greater_than("4.58.0"): from .model_utils.moe import Qwen3OmniMoeThinkerTextSparseMoeBlock + logger.warning_rank0( + "You are using transformers with 4.x version, the Qwen3OmniMoeThinkerTextSparseMoeBlock will have some issues about deepspeed zero2 and fsdp2 training, so that we patched this model to avoid it. Transformers v5.0.0rc0 has fixed the issue, you can also try to update the transformers to using qwen3_omni. See more information on https://github.com/hiyouga/LLaMA-Factory/issues/9628." + ) + modeling_qwen3_omni_moe.Qwen3OmniMoeThinkerTextSparseMoeBlock = Qwen3OmniMoeThinkerTextSparseMoeBlock