mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-10-14 23:58:11 +08:00
tiny fix
Former-commit-id: 220d7c1ce15e8013a900e59fe0c7937e38b5c3b5
This commit is contained in:
parent
5ab997d484
commit
71e4404c0d
@ -36,13 +36,14 @@ def configure_attn_implementation(
|
||||
if model_args.flash_attn == "auto" or model_args.flash_attn == "fa2":
|
||||
if is_flash_attn_2_available():
|
||||
require_version("transformers>=4.42.4", "To fix: pip install transformers>=4.42.4")
|
||||
require_version("flash_attn>=2.6.0", "To fix: pip install flash_attn>=2.6.0")
|
||||
logger.warning("Gemma-2 should use flash attention 2, change `flash_attn` to fa2.")
|
||||
model_args.flash_attn = "fa2"
|
||||
else:
|
||||
logger.warning("Gemma-2 should use eager attention, change `flash_attn` to disabled.")
|
||||
model_args.flash_attn = "disabled"
|
||||
elif model_args.flash_attn == "sdpa":
|
||||
raise ValueError("Gemma-2 should use soft-capping attention, while the SDPA attention is not compatible.")
|
||||
logger.warning("Gemma-2 should use soft-capping attention, while the SDPA attention does not support it.")
|
||||
|
||||
if model_args.flash_attn == "auto":
|
||||
return
|
||||
|
Loading…
x
Reference in New Issue
Block a user