mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-10-15 16:18:10 +08:00
tiny fix
Former-commit-id: 19e43c3a9ed771e991cb273d394ab28fb923f868
This commit is contained in:
parent
884b49e662
commit
4357e42391
@ -35,7 +35,7 @@ def configure_attn_implementation(
|
||||
if model_args.flash_attn == "auto":
|
||||
logger.warning("Gemma-2 models should use eager attention in training, change `flash_attn` to disabled.")
|
||||
model_args.flash_attn = "disabled"
|
||||
else:
|
||||
elif model_args.flash_attn != "disabled":
|
||||
logger.warning(
|
||||
"Gemma-2 models should use eager attention in training, but you set `flash_attn: {}`. "
|
||||
"Will proceed at your own risk.".format(model_args.flash_attn)
|
||||
|
Loading…
x
Reference in New Issue
Block a user