fix flash_attn in train_web

Former-commit-id: a8889498fa4e9b6c7a82422ed5b1da3662b48d42
This commit is contained in:
cx2333 2024-03-07 10:13:55 +08:00
parent 31c618f1f7
commit 22624e566e

View File

@ -30,7 +30,7 @@ def create_top() -> Dict[str, "Component"]:
quantization_bit = gr.Dropdown(choices=["none", "8", "4"], value="none") quantization_bit = gr.Dropdown(choices=["none", "8", "4"], value="none")
template = gr.Dropdown(choices=list(templates.keys()), value="default") template = gr.Dropdown(choices=list(templates.keys()), value="default")
rope_scaling = gr.Radio(choices=["none", "linear", "dynamic"], value="none") rope_scaling = gr.Radio(choices=["none", "linear", "dynamic"], value="none")
booster = gr.Radio(choices=["none", "flashattn", "unsloth"], value="none") booster = gr.Radio(choices=["none", "flash_attn", "unsloth"], value="none")
model_name.change(list_adapters, [model_name, finetuning_type], [adapter_path], queue=False).then( model_name.change(list_adapters, [model_name, finetuning_type], [adapter_path], queue=False).then(
get_model_path, [model_name], [model_path], queue=False get_model_path, [model_name], [model_path], queue=False