From aab9b400bb9f0582d4b88bc60f41778c3bfc20b3 Mon Sep 17 00:00:00 2001 From: Shanay Mehta <101552567+Shanay-Mehta@users.noreply.github.com> Date: Tue, 24 Feb 2026 17:24:37 +0530 Subject: [PATCH] [model] Add DeepSpeed Z3 leaf module for Qwen3-Next (#10194) Co-authored-by: Claude Opus 4.6 --- src/llamafactory/model/model_utils/moe.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/llamafactory/model/model_utils/moe.py b/src/llamafactory/model/model_utils/moe.py index d7c19c44c..fdff829dc 100644 --- a/src/llamafactory/model/model_utils/moe.py +++ b/src/llamafactory/model/model_utils/moe.py @@ -142,6 +142,10 @@ def add_z3_leaf_module(model: "PreTrainedModel") -> None: _set_z3_leaf_modules(model, [Qwen3OmniMoeThinkerTextSparseMoeBlock]) + if model_type == "qwen3_next": + from transformers.models.qwen3_next.modeling_qwen3_next import Qwen3NextSparseMoeBlock + + _set_z3_leaf_modules(model, [Qwen3NextSparseMoeBlock]) def configure_moe(config: "PretrainedConfig", model_args: "ModelArguments", is_trainable: bool) -> None: if not is_trainable or not model_args.moe_aux_loss_coef: