From de4de5b5ab4816cd08ad91eb20edc21febdcc555 Mon Sep 17 00:00:00 2001 From: hiyouga <467089858@qq.com> Date: Mon, 1 Jul 2024 03:55:20 +0800 Subject: [PATCH] tiny fix Former-commit-id: 8c41a0aa6db8bf31200c83b14819d474927268a1 --- src/llamafactory/model/model_utils/attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llamafactory/model/model_utils/attention.py b/src/llamafactory/model/model_utils/attention.py index 80d9d4b8..4bed7e21 100644 --- a/src/llamafactory/model/model_utils/attention.py +++ b/src/llamafactory/model/model_utils/attention.py @@ -35,7 +35,7 @@ def configure_attn_implementation( if model_args.flash_attn == "auto": logger.warning("Gemma-2 models should use eager attention in training, change `flash_attn` to disabled.") model_args.flash_attn = "disabled" - else: + elif model_args.flash_attn != "disabled": logger.warning( "Gemma-2 models should use eager attention in training, but you set `flash_attn: {}`. " "Will proceed at your own risk.".format(model_args.flash_attn)