From 8985c43033d8ab778f9c70ebb5f00d1d57e67c89 Mon Sep 17 00:00:00 2001 From: A-Cepheus <60658915+A-Cepheus@users.noreply.github.com> Date: Mon, 22 Jan 2024 15:21:14 +0800 Subject: [PATCH] fix: ZeRO3 does not work with MoE models Former-commit-id: e1d5c9851922522f45314c3058d4658198631875 --- src/llmtuner/model/loader.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index b02a4560..656bfa6d 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -96,6 +96,11 @@ def load_model_and_tokenizer( **config_kwargs, ) + if getattr(config, "model_type", None) == "mistral" and is_deepspeed_zero3_enabled(): + from deepspeed.utils import set_z3_leaf_modules + from transformers.models.mixtral.modeling_mixtral import MixtralSparseMoeBlock + set_z3_leaf_modules(model, [MixtralSparseMoeBlock]) + patch_model(model, tokenizer, model_args, is_trainable) register_autoclass(config, model, tokenizer)