🐞 fix: typo, move MoE fix to patcher

Former-commit-id: 4ff28e99ff9b48df7150591c6bbd3723f22b7715
This commit is contained in:
A-Cepheus 2024-01-22 16:01:58 +08:00
parent 18ad259fb3
commit 712ab4ae7a
2 changed files with 5 additions and 5 deletions

View File

@ -96,11 +96,6 @@ def load_model_and_tokenizer(
**config_kwargs,
)
if getattr(config, "model_type", None) == "mistral" and is_deepspeed_zero3_enabled():
from deepspeed.utils import set_z3_leaf_modules
from transformers.models.mixtral.modeling_mixtral import MixtralSparseMoeBlock
set_z3_leaf_modules(model, [MixtralSparseMoeBlock])
patch_model(model, tokenizer, model_args, is_trainable)
register_autoclass(config, model, tokenizer)

View File

@ -284,6 +284,11 @@ def patch_model(
if is_trainable:
_prepare_model_for_training(model, model_args)
if getattr(config, "model_type", None) == "mixtral" and is_deepspeed_zero3_enabled():
from deepspeed.utils import set_z3_leaf_modules
from transformers.models.mixtral.modeling_mixtral import MixtralSparseMoeBlock
set_z3_leaf_modules(model, [MixtralSparseMoeBlock])
def patch_valuehead_model(model: "AutoModelForCausalLMWithValueHead") -> None:
def tie_weights(self: "AutoModelForCausalLMWithValueHead") -> None: