From 12e0e5d0d7d78fa5b8ea6d669d1d900fb75b15e0 Mon Sep 17 00:00:00 2001 From: hiyouga <467089858@qq.com> Date: Sun, 14 Jul 2024 10:56:45 +0800 Subject: [PATCH] tiny fix Former-commit-id: d3c01552e0f978f150902175f096f6e3bfb64363 --- src/llamafactory/model/model_utils/attention.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/llamafactory/model/model_utils/attention.py b/src/llamafactory/model/model_utils/attention.py index 7dee827c..da53baa2 100644 --- a/src/llamafactory/model/model_utils/attention.py +++ b/src/llamafactory/model/model_utils/attention.py @@ -36,13 +36,14 @@ def configure_attn_implementation( if model_args.flash_attn == "auto" or model_args.flash_attn == "fa2": if is_flash_attn_2_available(): require_version("transformers>=4.42.4", "To fix: pip install transformers>=4.42.4") + require_version("flash_attn>=2.6.0", "To fix: pip install flash_attn>=2.6.0") logger.warning("Gemma-2 should use flash attention 2, change `flash_attn` to fa2.") model_args.flash_attn = "fa2" else: logger.warning("Gemma-2 should use eager attention, change `flash_attn` to disabled.") model_args.flash_attn = "disabled" elif model_args.flash_attn == "sdpa": - raise ValueError("Gemma-2 should use soft-capping attention, while the SDPA attention is not compatible.") + logger.warning("Gemma-2 should use soft-capping attention, while the SDPA attention does not support it.") if model_args.flash_attn == "auto": return