From 22624e566ec1cfe94b3999905d86abaa81f1f42a Mon Sep 17 00:00:00 2001 From: cx2333 <99069487+cx2333-gt@users.noreply.github.com> Date: Thu, 7 Mar 2024 10:13:55 +0800 Subject: [PATCH] fix flash_attn in train_web Former-commit-id: a8889498fa4e9b6c7a82422ed5b1da3662b48d42 --- src/llmtuner/webui/components/top.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llmtuner/webui/components/top.py b/src/llmtuner/webui/components/top.py index 5c0d4f39..1e5cb1c4 100644 --- a/src/llmtuner/webui/components/top.py +++ b/src/llmtuner/webui/components/top.py @@ -30,7 +30,7 @@ def create_top() -> Dict[str, "Component"]: quantization_bit = gr.Dropdown(choices=["none", "8", "4"], value="none") template = gr.Dropdown(choices=list(templates.keys()), value="default") rope_scaling = gr.Radio(choices=["none", "linear", "dynamic"], value="none") - booster = gr.Radio(choices=["none", "flashattn", "unsloth"], value="none") + booster = gr.Radio(choices=["none", "flash_attn", "unsloth"], value="none") model_name.change(list_adapters, [model_name, finetuning_type], [adapter_path], queue=False).then( get_model_path, [model_name], [model_path], queue=False