From 5df765e376ab9aebc349f707bff3520a4544cc14 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Sun, 29 Sep 2024 10:47:41 +0800 Subject: [PATCH] Update attention.py Former-commit-id: fe7057a8a3eb111cdaf8349b6ac077d898bf4935 --- src/llamafactory/model/model_utils/attention.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/llamafactory/model/model_utils/attention.py b/src/llamafactory/model/model_utils/attention.py index dfb42a9f..7667b069 100644 --- a/src/llamafactory/model/model_utils/attention.py +++ b/src/llamafactory/model/model_utils/attention.py @@ -37,13 +37,11 @@ def configure_attn_implementation( if is_flash_attn_2_available(): require_version("transformers>=4.42.4", "To fix: pip install transformers>=4.42.4") require_version("flash_attn>=2.6.3", "To fix: pip install flash_attn>=2.6.3") - if model_args.flash_attn != "fa2": logger.warning("Gemma-2 should use flash attention 2, change `flash_attn` to fa2.") - - model_args.flash_attn = "fa2" + model_args.flash_attn = "fa2" else: - logger.warning("Gemma-2 should use eager attention, change `flash_attn` to disabled.") + logger.warning("FlashAttention-2 is not installed, use eager attention.") model_args.flash_attn = "disabled" elif model_args.flash_attn == "sdpa": logger.warning("Gemma-2 should use soft-capping attention, while the SDPA attention does not support it.")