mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-29 10:10:35 +08:00
[webui] support other hub (#8567)
This commit is contained in:
@@ -16,9 +16,10 @@ from typing import TYPE_CHECKING
|
||||
|
||||
from ...data import TEMPLATES
|
||||
from ...extras.constants import METHODS, SUPPORTED_MODELS
|
||||
from ...extras.misc import use_modelscope, use_openmind
|
||||
from ...extras.packages import is_gradio_available
|
||||
from ..common import save_config
|
||||
from ..control import can_quantize, can_quantize_to, check_template, get_model_info, list_checkpoints
|
||||
from ..control import can_quantize, can_quantize_to, check_template, get_model_info, list_checkpoints, switch_hub
|
||||
|
||||
|
||||
if is_gradio_available():
|
||||
@@ -33,8 +34,10 @@ def create_top() -> dict[str, "Component"]:
|
||||
with gr.Row():
|
||||
lang = gr.Dropdown(choices=["en", "ru", "zh", "ko", "ja"], value=None, scale=1)
|
||||
available_models = list(SUPPORTED_MODELS.keys()) + ["Custom"]
|
||||
model_name = gr.Dropdown(choices=available_models, value=None, scale=3)
|
||||
model_path = gr.Textbox(scale=3)
|
||||
model_name = gr.Dropdown(choices=available_models, value=None, scale=2)
|
||||
model_path = gr.Textbox(scale=2)
|
||||
default_hub = "modelscope" if use_modelscope() else "openmind" if use_openmind() else "huggingface"
|
||||
hub_name = gr.Dropdown(choices=["huggingface", "modelscope", "openmind"], value=default_hub, scale=2)
|
||||
|
||||
with gr.Row():
|
||||
finetuning_type = gr.Dropdown(choices=METHODS, value="lora", scale=1)
|
||||
@@ -50,18 +53,25 @@ def create_top() -> dict[str, "Component"]:
|
||||
model_name.change(get_model_info, [model_name], [model_path, template], queue=False).then(
|
||||
list_checkpoints, [model_name, finetuning_type], [checkpoint_path], queue=False
|
||||
).then(check_template, [lang, template])
|
||||
model_name.input(save_config, inputs=[lang, model_name], queue=False)
|
||||
model_path.input(save_config, inputs=[lang, model_name, model_path], queue=False)
|
||||
model_name.input(save_config, inputs=[lang, hub_name, model_name], queue=False)
|
||||
model_path.input(save_config, inputs=[lang, hub_name, model_name, model_path], queue=False)
|
||||
finetuning_type.change(can_quantize, [finetuning_type], [quantization_bit], queue=False).then(
|
||||
list_checkpoints, [model_name, finetuning_type], [checkpoint_path], queue=False
|
||||
)
|
||||
checkpoint_path.focus(list_checkpoints, [model_name, finetuning_type], [checkpoint_path], queue=False)
|
||||
quantization_method.change(can_quantize_to, [quantization_method], [quantization_bit], queue=False)
|
||||
hub_name.change(switch_hub, inputs=[hub_name], queue=False).then(
|
||||
get_model_info, [model_name], [model_path, template], queue=False
|
||||
).then(list_checkpoints, [model_name, finetuning_type], [checkpoint_path], queue=False).then(
|
||||
check_template, [lang, template]
|
||||
)
|
||||
hub_name.input(save_config, inputs=[lang, hub_name], queue=False)
|
||||
|
||||
return dict(
|
||||
lang=lang,
|
||||
model_name=model_name,
|
||||
model_path=model_path,
|
||||
hub_name=hub_name,
|
||||
finetuning_type=finetuning_type,
|
||||
checkpoint_path=checkpoint_path,
|
||||
quantization_bit=quantization_bit,
|
||||
|
||||
Reference in New Issue
Block a user