Update attention.py

Former-commit-id: fe7057a8a3eb111cdaf8349b6ac077d898bf4935
This commit is contained in:
hoshi-hiyouga 2024-09-29 10:47:41 +08:00 committed by GitHub
parent 6ae0e27c8b
commit 5df765e376

View File

@ -37,13 +37,11 @@ def configure_attn_implementation(
if is_flash_attn_2_available():
require_version("transformers>=4.42.4", "To fix: pip install transformers>=4.42.4")
require_version("flash_attn>=2.6.3", "To fix: pip install flash_attn>=2.6.3")
if model_args.flash_attn != "fa2":
logger.warning("Gemma-2 should use flash attention 2, change `flash_attn` to fa2.")
model_args.flash_attn = "fa2"
model_args.flash_attn = "fa2"
else:
logger.warning("Gemma-2 should use eager attention, change `flash_attn` to disabled.")
logger.warning("FlashAttention-2 is not installed, use eager attention.")
model_args.flash_attn = "disabled"
elif model_args.flash_attn == "sdpa":
logger.warning("Gemma-2 should use soft-capping attention, while the SDPA attention does not support it.")