From 755e3e49b4f4cfa250605b09ebc819dd0381a40d Mon Sep 17 00:00:00 2001 From: hiyouga Date: Thu, 28 Sep 2023 00:53:29 +0800 Subject: [PATCH] fix #1064 Former-commit-id: c90223639790152fadd100cedb5f63d375d9c195 --- src/llmtuner/extras/patches/llama_patch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llmtuner/extras/patches/llama_patch.py b/src/llmtuner/extras/patches/llama_patch.py index 830c7ce3..930d3a25 100644 --- a/src/llmtuner/extras/patches/llama_patch.py +++ b/src/llmtuner/extras/patches/llama_patch.py @@ -13,7 +13,7 @@ try: from flash_attn import flash_attn_func, flash_attn_varlen_func # type: ignore from flash_attn.bert_padding import pad_input, unpad_input # type: ignore except ImportError: - raise ImportError("Please install FlashAttention from https://github.com/Dao-AILab/flash-attention") + print("FlashAttention-2 is not installed, ignore this if you are not using FlashAttention.") logger = logging.get_logger(__name__)