mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-09-12 16:12:48 +08:00
update web demo
Former-commit-id: 53d6dc396dbfb1d25236bbc3075404cd5be6b376
This commit is contained in:
parent
91d178f14d
commit
471ef69218
@ -1,8 +1,3 @@
|
|||||||
# coding=utf-8
|
|
||||||
# Implements API for fine-tuned models in OpenAI's format. (https://platform.openai.com/docs/api-reference/chat)
|
|
||||||
# Usage: python api_demo.py --model_name_or_path path_to_model --checkpoint_dir path_to_checkpoint
|
|
||||||
# Visit http://localhost:8000/docs for document.
|
|
||||||
|
|
||||||
import uvicorn
|
import uvicorn
|
||||||
|
|
||||||
from llmtuner import ChatModel, create_app
|
from llmtuner import ChatModel, create_app
|
||||||
@ -12,6 +7,7 @@ def main():
|
|||||||
chat_model = ChatModel()
|
chat_model = ChatModel()
|
||||||
app = create_app(chat_model)
|
app = create_app(chat_model)
|
||||||
uvicorn.run(app, host="0.0.0.0", port=8000, workers=1)
|
uvicorn.run(app, host="0.0.0.0", port=8000, workers=1)
|
||||||
|
# Visit http://localhost:8000/docs for document.
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -1,7 +1,3 @@
|
|||||||
# coding=utf-8
|
|
||||||
# Implements stream chat in command line for fine-tuned models.
|
|
||||||
# Usage: python cli_demo.py --model_name_or_path path_to_model --checkpoint_dir path_to_checkpoint
|
|
||||||
|
|
||||||
from llmtuner import ChatModel
|
from llmtuner import ChatModel
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,7 +1,3 @@
|
|||||||
# coding=utf-8
|
|
||||||
# Exports the fine-tuned model.
|
|
||||||
# Usage: python export_model.py --checkpoint_dir path_to_checkpoint --output_dir path_to_save_model
|
|
||||||
|
|
||||||
from llmtuner import export_model
|
from llmtuner import export_model
|
||||||
|
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
from llmtuner.api import create_app
|
from llmtuner.api import create_app
|
||||||
from llmtuner.chat import ChatModel
|
from llmtuner.chat import ChatModel
|
||||||
from llmtuner.tuner import export_model, run_exp
|
from llmtuner.tuner import export_model, run_exp
|
||||||
from llmtuner.webui import Manager, WebChatModel, create_ui, create_chat_box
|
from llmtuner.webui import create_ui, create_web_demo
|
||||||
|
|
||||||
|
|
||||||
__version__ = "0.1.5"
|
__version__ = "0.1.5"
|
||||||
|
@ -1,4 +1 @@
|
|||||||
from llmtuner.webui.chat import WebChatModel
|
from llmtuner.webui.interface import create_ui, create_web_demo
|
||||||
from llmtuner.webui.interface import create_ui
|
|
||||||
from llmtuner.webui.manager import Manager
|
|
||||||
from llmtuner.webui.components import create_chat_box
|
|
||||||
|
@ -10,11 +10,12 @@ from llmtuner.webui.locales import ALERTS
|
|||||||
|
|
||||||
class WebChatModel(ChatModel):
|
class WebChatModel(ChatModel):
|
||||||
|
|
||||||
def __init__(self, args: Optional[Dict[str, Any]] = None) -> None:
|
def __init__(self, args: Optional[Dict[str, Any]] = None, lazy_init: Optional[bool] = True) -> None:
|
||||||
self.model = None
|
if lazy_init:
|
||||||
self.tokenizer = None
|
self.model = None
|
||||||
self.generating_args = GeneratingArguments()
|
self.tokenizer = None
|
||||||
if args is not None:
|
self.generating_args = GeneratingArguments()
|
||||||
|
else:
|
||||||
super().__init__(args)
|
super().__init__(args)
|
||||||
|
|
||||||
def load_model(
|
def load_model(
|
||||||
|
@ -6,8 +6,10 @@ from llmtuner.webui.components import (
|
|||||||
create_sft_tab,
|
create_sft_tab,
|
||||||
create_eval_tab,
|
create_eval_tab,
|
||||||
create_infer_tab,
|
create_infer_tab,
|
||||||
create_export_tab
|
create_export_tab,
|
||||||
|
create_chat_box
|
||||||
)
|
)
|
||||||
|
from llmtuner.webui.chat import WebChatModel
|
||||||
from llmtuner.webui.css import CSS
|
from llmtuner.webui.css import CSS
|
||||||
from llmtuner.webui.manager import Manager
|
from llmtuner.webui.manager import Manager
|
||||||
from llmtuner.webui.runner import Runner
|
from llmtuner.webui.runner import Runner
|
||||||
@ -53,6 +55,23 @@ def create_ui() -> gr.Blocks:
|
|||||||
return demo
|
return demo
|
||||||
|
|
||||||
|
|
||||||
|
def create_web_demo() -> gr.Blocks:
|
||||||
|
chat_model = WebChatModel(lazy_init=False)
|
||||||
|
|
||||||
|
with gr.Blocks(title="Web Demo", css=CSS) as demo:
|
||||||
|
lang = gr.Dropdown(choices=["en", "zh"], value="en")
|
||||||
|
|
||||||
|
_, _, _, chat_elems = create_chat_box(chat_model, visible=True)
|
||||||
|
|
||||||
|
manager = Manager([{"lang": lang}, chat_elems])
|
||||||
|
|
||||||
|
demo.load(manager.gen_label, [lang], [lang] + list(chat_elems.values()))
|
||||||
|
|
||||||
|
lang.change(manager.gen_label, [lang], [lang] + list(chat_elems.values()))
|
||||||
|
|
||||||
|
return demo
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
demo = create_ui()
|
demo = create_ui()
|
||||||
demo.queue()
|
demo.queue()
|
||||||
|
@ -1,30 +1,8 @@
|
|||||||
# coding=utf-8
|
from llmtuner import create_web_demo
|
||||||
# Implements user interface in browser for fine-tuned models.
|
|
||||||
# Usage: python web_demo.py --model_name_or_path path_to_model --checkpoint_dir path_to_checkpoint
|
|
||||||
|
|
||||||
import gradio as gr
|
|
||||||
from transformers.utils.versions import require_version
|
|
||||||
|
|
||||||
from llmtuner import Manager, WebChatModel, create_chat_box
|
|
||||||
|
|
||||||
|
|
||||||
require_version("gradio>=3.36.0", "To fix: pip install gradio>=3.36.0")
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
chat_model = WebChatModel()
|
demo = create_web_demo()
|
||||||
|
|
||||||
with gr.Blocks(title="Web Demo") as demo:
|
|
||||||
lang = gr.Dropdown(choices=["en", "zh"], value="en")
|
|
||||||
|
|
||||||
_, _, _, chat_elems = create_chat_box(chat_model, visible=True)
|
|
||||||
|
|
||||||
manager = Manager([{"lang": lang}, chat_elems])
|
|
||||||
|
|
||||||
demo.load(manager.gen_label, [lang], [lang] + list(chat_elems.values()))
|
|
||||||
|
|
||||||
lang.change(manager.gen_label, [lang], [lang] + list(chat_elems.values()))
|
|
||||||
|
|
||||||
demo.queue()
|
demo.queue()
|
||||||
demo.launch(server_name="0.0.0.0", server_port=7860, share=False, inbrowser=True)
|
demo.launch(server_name="0.0.0.0", server_port=7860, share=False, inbrowser=True)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user