made a small change to a warning about fa2 for gemma2 models.

Former-commit-id: e0695a026d822c896cb4f5b33e0c4f88441d75e9
This commit is contained in:
Amirreza A 2024-09-28 19:03:36 +03:30
parent 6509114259
commit ca736bcab7

View File

@ -37,7 +37,10 @@ def configure_attn_implementation(
if is_flash_attn_2_available():
require_version("transformers>=4.42.4", "To fix: pip install transformers>=4.42.4")
require_version("flash_attn>=2.6.3", "To fix: pip install flash_attn>=2.6.3")
logger.warning("Gemma-2 should use flash attention 2, change `flash_attn` to fa2.")
if model_args.flash_attn != "fa2":
logger.warning("Gemma-2 should use flash attention 2, change `flash_attn` to fa2.")
model_args.flash_attn = "fa2"
else:
logger.warning("Gemma-2 should use eager attention, change `flash_attn` to disabled.")