fix flash_attn in train_web

Former-commit-id: 5f340e362b0e91fec76c19c77c5705bba1db481a
This commit is contained in:
cx2333 2024-03-07 10:13:55 +08:00
parent e93fb3cc6c
commit 5007566588

View File

@ -30,7 +30,7 @@ def create_top() -> Dict[str, "Component"]:
quantization_bit = gr.Dropdown(choices=["none", "8", "4"], value="none")
template = gr.Dropdown(choices=list(templates.keys()), value="default")
rope_scaling = gr.Radio(choices=["none", "linear", "dynamic"], value="none")
booster = gr.Radio(choices=["none", "flashattn", "unsloth"], value="none")
booster = gr.Radio(choices=["none", "flash_attn", "unsloth"], value="none")
model_name.change(list_adapters, [model_name, finetuning_type], [adapter_path], queue=False).then(
get_model_path, [model_name], [model_path], queue=False