mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-17 20:30:36 +08:00
[model] update kt code (#9406)
This commit is contained in:
@@ -59,6 +59,7 @@ def configure_attn_implementation(config: "PretrainedConfig", model_args: "Model
|
||||
requested_attn_implementation = "sdpa"
|
||||
elif model_args.flash_attn == AttentionFunction.FA2:
|
||||
from transformers import is_torch_npu_available
|
||||
|
||||
if not (is_flash_attn_2_available() or is_torch_npu_available()):
|
||||
logger.warning_rank0("FlashAttention-2 is not installed.")
|
||||
return
|
||||
|
||||
Reference in New Issue
Block a user