diff --git a/src/llamafactory/model/model_utils/attention.py b/src/llamafactory/model/model_utils/attention.py index 0a12012e9..e2349f40e 100644 --- a/src/llamafactory/model/model_utils/attention.py +++ b/src/llamafactory/model/model_utils/attention.py @@ -29,6 +29,15 @@ logger = logging.get_logger(__name__) def configure_attn_implementation(config: "PretrainedConfig", model_args: "ModelArguments") -> None: + if getattr(config, "model_type", None) == "gpt_oss": + from transformers.integrations.hub_kernels import load_and_register_kernel + flash_attn3_kernel = "kernels-community/vllm-flash-attn3" + load_and_register_kernel(flash_attn3_kernel) + setattr(config, "_attn_implementation", flash_attn3_kernel) + setattr(config, "_attn_implementation_internal", flash_attn3_kernel) + model_args.flash_attn = flash_attn3_kernel + return + from transformers.utils import is_flash_attn_2_available if getattr(config, "model_type", None) == "gemma2": diff --git a/src/llamafactory/model/model_utils/liger_kernel.py b/src/llamafactory/model/model_utils/liger_kernel.py index 72c01f6f7..f7dcc85ec 100644 --- a/src/llamafactory/model/model_utils/liger_kernel.py +++ b/src/llamafactory/model/model_utils/liger_kernel.py @@ -77,6 +77,9 @@ def apply_liger_kernel( from liger_kernel.transformers import apply_liger_kernel_to_qwen3 as apply_liger_kernel elif model_type == "qwen3_moe": from liger_kernel.transformers import apply_liger_kernel_to_qwen3_moe as apply_liger_kernel + elif model_type == "gpt_oss": + # Install manually from https://github.com/Comet0322/Liger-Kernel + from liger_kernel.transformers import apply_liger_kernel_to_gpt_oss as apply_liger_kernel else: logger.warning_rank0("Current model does not support liger kernel.") return diff --git a/src/llamafactory/model/model_utils/moe.py b/src/llamafactory/model/model_utils/moe.py index 121c5488a..9e3662a27 100644 --- a/src/llamafactory/model/model_utils/moe.py +++ b/src/llamafactory/model/model_utils/moe.py @@ -129,9 +129,13 @@ def add_z3_leaf_module(model: "PreTrainedModel") -> None: if model_type in ("qwen3_omni_moe", "qwen3_omni_moe_thinker"): from transformers.models.qwen3_omni_moe.modeling_qwen3_omni_moe import Qwen3OmniMoeThinkerTextSparseMoeBlock - + _set_z3_leaf_modules(model, [Qwen3OmniMoeThinkerTextSparseMoeBlock]) - + + if model_type == "gpt_oss": + from transformers.models.gpt_oss.modeling_gpt_oss import GptOssMLP + + _set_z3_leaf_modules(model, [GptOssMLP]) def configure_moe(config: "PretrainedConfig", model_args: "ModelArguments", is_trainable: bool) -> None: if not is_trainable or not model_args.moe_aux_loss_coef: