fix qwen2 moe (#6684)

Former-commit-id: 7bf09abf1c4d971cda33daed933c75f391e79294
This commit is contained in:
hoshi-hiyouga 2025-01-17 13:46:09 +08:00 committed by GitHub
parent 555f17c1ee
commit 788accb601

View File

@ -61,7 +61,7 @@ def add_z3_leaf_module(model: "PreTrainedModel") -> None:
_set_z3_leaf_modules(model, [MixtralSparseMoeBlock]) _set_z3_leaf_modules(model, [MixtralSparseMoeBlock])
if model_type == "qwen2moe": if model_type == "qwen2_moe":
from transformers.models.qwen2_moe.modeling_qwen2_moe import Qwen2MoeSparseMoeBlock from transformers.models.qwen2_moe.modeling_qwen2_moe import Qwen2MoeSparseMoeBlock
_set_z3_leaf_modules(model, [Qwen2MoeSparseMoeBlock]) _set_z3_leaf_modules(model, [Qwen2MoeSparseMoeBlock])