From 6ae0e27c8b009f2cdcfb2008f3a6c8ba9dd5cb9f Mon Sep 17 00:00:00 2001 From: Amirreza A Date: Sat, 28 Sep 2024 19:03:36 +0330 Subject: [PATCH 1/3] made a small change to a warning about fa2 for gemma2 models. Former-commit-id: 94ee105526d817e59bfd91f7bd4161d7cb2fd216 --- src/llamafactory/model/model_utils/attention.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/llamafactory/model/model_utils/attention.py b/src/llamafactory/model/model_utils/attention.py index 96e2c8a9..dfb42a9f 100644 --- a/src/llamafactory/model/model_utils/attention.py +++ b/src/llamafactory/model/model_utils/attention.py @@ -37,7 +37,10 @@ def configure_attn_implementation( if is_flash_attn_2_available(): require_version("transformers>=4.42.4", "To fix: pip install transformers>=4.42.4") require_version("flash_attn>=2.6.3", "To fix: pip install flash_attn>=2.6.3") - logger.warning("Gemma-2 should use flash attention 2, change `flash_attn` to fa2.") + + if model_args.flash_attn != "fa2": + logger.warning("Gemma-2 should use flash attention 2, change `flash_attn` to fa2.") + model_args.flash_attn = "fa2" else: logger.warning("Gemma-2 should use eager attention, change `flash_attn` to disabled.") From 5df765e376ab9aebc349f707bff3520a4544cc14 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Sun, 29 Sep 2024 10:47:41 +0800 Subject: [PATCH 2/3] Update attention.py Former-commit-id: fe7057a8a3eb111cdaf8349b6ac077d898bf4935 --- src/llamafactory/model/model_utils/attention.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/llamafactory/model/model_utils/attention.py b/src/llamafactory/model/model_utils/attention.py index dfb42a9f..7667b069 100644 --- a/src/llamafactory/model/model_utils/attention.py +++ b/src/llamafactory/model/model_utils/attention.py @@ -37,13 +37,11 @@ def configure_attn_implementation( if is_flash_attn_2_available(): require_version("transformers>=4.42.4", "To fix: pip install transformers>=4.42.4") require_version("flash_attn>=2.6.3", "To fix: pip install flash_attn>=2.6.3") - if model_args.flash_attn != "fa2": logger.warning("Gemma-2 should use flash attention 2, change `flash_attn` to fa2.") - - model_args.flash_attn = "fa2" + model_args.flash_attn = "fa2" else: - logger.warning("Gemma-2 should use eager attention, change `flash_attn` to disabled.") + logger.warning("FlashAttention-2 is not installed, use eager attention.") model_args.flash_attn = "disabled" elif model_args.flash_attn == "sdpa": logger.warning("Gemma-2 should use soft-capping attention, while the SDPA attention does not support it.") From e265082db86c929b21db03d5047e7cd94e20c4e5 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Sun, 29 Sep 2024 05:02:44 +0000 Subject: [PATCH 3/3] update readme Former-commit-id: a2bd6944cd85fdca83407c1cb354f61e57e2ac78 --- README.md | 2 +- README_zh.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 92bbcc88..144908c1 100644 --- a/README.md +++ b/README.md @@ -175,7 +175,7 @@ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ | [InternLM2/InternLM2.5](https://huggingface.co/internlm) | 7B/20B | intern2 | | [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - | | [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 | -| [Llama 3/Llama 3.1/Llama3.2](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 | +| [Llama 3-3.2](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 | | [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava | | [MiniCPM](https://huggingface.co/openbmb) | 1B/2B/4B | cpm/cpm3 | | [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral | diff --git a/README_zh.md b/README_zh.md index 0b02f35f..60ac8ee2 100644 --- a/README_zh.md +++ b/README_zh.md @@ -176,7 +176,7 @@ https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272 | [InternLM2/InternLM2.5](https://huggingface.co/internlm) | 7B/20B | intern2 | | [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - | | [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 | -| [Llama 3/Llama 3.1/Llama3.2](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 | +| [Llama 3-3.2](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 | | [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | llava | | [MiniCPM](https://huggingface.co/openbmb) | 1B/2B/4B | cpm/cpm3 | | [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |