[model] update kt code (#9406)

This commit is contained in:
Yaowei Zheng
2025-11-05 15:27:22 +08:00
committed by GitHub
parent 56f45e826f
commit eaf963f67f
28 changed files with 108 additions and 68 deletions

View File

@@ -59,6 +59,7 @@ def configure_attn_implementation(config: "PretrainedConfig", model_args: "Model
requested_attn_implementation = "sdpa"
elif model_args.flash_attn == AttentionFunction.FA2:
from transformers import is_torch_npu_available
if not (is_flash_attn_2_available() or is_torch_npu_available()):
logger.warning_rank0("FlashAttention-2 is not installed.")
return