From 042e41b16b93b6991e5f2230600031c90ba94605 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Wed, 8 May 2024 10:36:36 +0800 Subject: [PATCH 01/24] modify export model Former-commit-id: c7051edae4ce23f85daf204a2aaac134b1f29c3d --- src/llmtuner/train/tuner.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index e1a997c1..6973a4e5 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -15,11 +15,9 @@ from .pt import run_pt from .rm import run_rm from .sft import run_sft - if TYPE_CHECKING: from transformers import TrainerCallback - logger = get_logger(__name__) @@ -52,7 +50,9 @@ def export_model(args: Optional[Dict[str, Any]] = None) -> None: if model_args.adapter_name_or_path is not None and model_args.export_quantization_bit is not None: raise ValueError("Please merge adapters before quantizing the model.") - tokenizer = load_tokenizer(model_args)["tokenizer"] + tokenizer_module = load_tokenizer(model_args)["tokenizer"] + tokenizer = tokenizer_module['tokenizer'] + processor = tokenizer_module['processor'] get_template_and_fix_tokenizer(tokenizer, data_args.template) model = load_model(tokenizer, model_args, finetuning_args) # must after fixing tokenizer to resize vocab @@ -88,3 +88,6 @@ def export_model(args: Optional[Dict[str, Any]] = None) -> None: tokenizer.push_to_hub(model_args.export_hub_model_id, token=model_args.hf_hub_token) except Exception: logger.warning("Cannot save tokenizer, please copy the files manually.") + + if model_args.visual_inputs: + processor.image_processor.save_pretrained(model_args.export_dir) From c5cfe458e854313b68d54efd083813d381b946c1 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Wed, 8 May 2024 22:50:42 +0800 Subject: [PATCH 02/24] add mllm export Former-commit-id: ce4770d33f6761d3b1d60661efcb0be34a036154 --- src/llmtuner/train/tuner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index 6973a4e5..00349e09 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -50,7 +50,7 @@ def export_model(args: Optional[Dict[str, Any]] = None) -> None: if model_args.adapter_name_or_path is not None and model_args.export_quantization_bit is not None: raise ValueError("Please merge adapters before quantizing the model.") - tokenizer_module = load_tokenizer(model_args)["tokenizer"] + tokenizer_module = load_tokenizer(model_args) tokenizer = tokenizer_module['tokenizer'] processor = tokenizer_module['processor'] get_template_and_fix_tokenizer(tokenizer, data_args.template) From 4fc4b26bb0e7967e9f34cae1f91de6a0891d9229 Mon Sep 17 00:00:00 2001 From: cocktailpeanut Date: Thu, 9 May 2024 01:26:15 -0400 Subject: [PATCH 03/24] remove unnecessary environment variable usage Former-commit-id: 4be1d832cb269a07987f5cab5d5f949e269087da --- src/webui.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/webui.py b/src/webui.py index b9385259..000098d1 100644 --- a/src/webui.py +++ b/src/webui.py @@ -5,9 +5,7 @@ from llmtuner.webui.interface import create_ui def main(): server_name = os.environ.get("GRADIO_SERVER_NAME", "0.0.0.0") - server_port = int(os.environ.get("GRADIO_SERVER_PORT", "7860")) - gradio_share = bool(int(os.environ.get("GRADIO_SHARE", "0"))) - create_ui().queue().launch(share=gradio_share, server_name=server_name, server_port=server_port) + create_ui().queue().launch(server_name=server_name) if __name__ == "__main__": From e7275a75f74dabc764e184e374ad4224c890953c Mon Sep 17 00:00:00 2001 From: cocktailpeanut Date: Thu, 9 May 2024 01:32:00 -0400 Subject: [PATCH 04/24] more removal of unnecessary environment variables Former-commit-id: 59ef1a6e0d81585a6c010143d05fcfae26d40c00 --- src/llmtuner/webui/interface.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/llmtuner/webui/interface.py b/src/llmtuner/webui/interface.py index 969ce6bd..6cfce8aa 100644 --- a/src/llmtuner/webui/interface.py +++ b/src/llmtuner/webui/interface.py @@ -79,6 +79,4 @@ def run_web_ui() -> None: def run_web_demo() -> None: server_name = os.environ.get("GRADIO_SERVER_NAME", "0.0.0.0") - server_port = int(os.environ.get("GRADIO_SERVER_PORT", "7860")) - gradio_share = bool(int(os.environ.get("GRADIO_SHARE", "0"))) - create_web_demo().queue().launch(share=gradio_share, server_name=server_name, server_port=server_port) + create_web_demo().queue().launch(server_name=server_name) From 6937ea90794cf4e9a7fa5807632149b0c9f3a501 Mon Sep 17 00:00:00 2001 From: cocktailpeanut Date: Thu, 9 May 2024 01:33:20 -0400 Subject: [PATCH 05/24] yet another removal of unnecessary environment variables Former-commit-id: a07726028f0287de28e4751672b27efe0efc6477 --- src/llmtuner/webui/interface.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/llmtuner/webui/interface.py b/src/llmtuner/webui/interface.py index 6cfce8aa..bbd91bb7 100644 --- a/src/llmtuner/webui/interface.py +++ b/src/llmtuner/webui/interface.py @@ -72,9 +72,7 @@ def create_web_demo() -> gr.Blocks: def run_web_ui() -> None: server_name = os.environ.get("GRADIO_SERVER_NAME", "0.0.0.0") - server_port = int(os.environ.get("GRADIO_SERVER_PORT", "7860")) - gradio_share = bool(int(os.environ.get("GRADIO_SHARE", "0"))) - create_ui().queue().launch(share=gradio_share, server_name=server_name, server_port=server_port) + create_ui().queue().launch(server_name=server_name) def run_web_demo() -> None: From 0140b8282472ad3e2d5ed539e4692a88a90a19ce Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 9 May 2024 13:53:39 +0800 Subject: [PATCH 06/24] add mllm processor save and Chinese-LLaVA-Med show Former-commit-id: 110c49fbf79fe0625f091e63746bfabde00add99 --- README.md | 1 + README_zh.md | 2 ++ 2 files changed, 3 insertions(+) diff --git a/README.md b/README.md index 798b7bd4..e71ee552 100644 --- a/README.md +++ b/README.md @@ -467,6 +467,7 @@ If you have a project that should be incorporated, please contact via email or c 1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: A series of large language models for Chinese medical domain, based on LLaMA2-7B and Baichuan-13B. 1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**: A series of MBTI Personality large language models, capable of giving any LLM 16 different personality types based on different datasets and training methods. 1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**: A large language model specialized in generate metadata for stable diffusion. [[🤗Demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt) +1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**: A multimodal large language model specialized in Chinese medical domain, based on LLaVA-1.5-7B. diff --git a/README_zh.md b/README_zh.md index 2c5b1aa1..7c0497c2 100644 --- a/README_zh.md +++ b/README_zh.md @@ -467,6 +467,8 @@ export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1` 1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: 医疗大模型项目 CareGPT,基于 LLaMA2-7B 和 Baichuan-13B 在中文医疗数据上微调而得。 1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**:MBTI性格大模型项目,根据数据集与训练方式让任意 LLM 拥有 16 个不同的性格类型。 1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**:一个用于生成 Stable Diffusion 提示词的大型语言模型。[[🤗Demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt) +1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**: 中文多模态医学大模型,基于 LLaVA-1.5-7B 在中文多模态医疗数据上微调而得 + From 1a01dc288b57a9c9a0036040ffc8465d6b65cdbc Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Thu, 9 May 2024 14:05:19 +0800 Subject: [PATCH 07/24] add push processor to hub Former-commit-id: 7a05a965311edfdfafa57af8342875860d341f27 --- src/llmtuner/train/tuner.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index 00349e09..11509c20 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -91,3 +91,5 @@ def export_model(args: Optional[Dict[str, Any]] = None) -> None: if model_args.visual_inputs: processor.image_processor.save_pretrained(model_args.export_dir) + if model_args.export_hub_model_id is not None: + processor.image_processor.push_to_hub(model_args.export_hub_model_id, token=model_args.hf_hub_token) \ No newline at end of file From 82e830f8e7210fd8e6b19de9b82c786b33c01bb2 Mon Sep 17 00:00:00 2001 From: codingma Date: Thu, 9 May 2024 16:33:45 +0800 Subject: [PATCH 08/24] fix sha1 of glaive_toolcall dataset Former-commit-id: 25649cd14899f41fe12c99af12619ddcd5a8ba88 --- data/dataset_info.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/dataset_info.json b/data/dataset_info.json index 72446550..d5b7208f 100644 --- a/data/dataset_info.json +++ b/data/dataset_info.json @@ -41,7 +41,7 @@ }, "glaive_toolcall": { "file_name": "glaive_toolcall_10k.json", - "file_sha1": "a6917b85d209df98d31fdecb253c79ebc440f6f3", + "file_sha1": "36aea64548fbf6aa300bef411b9221092ed84902", "formatting": "sharegpt", "columns": { "messages": "conversations", From 0aa218d39acf91a9b3c2e8d1e7de318baf28953b Mon Sep 17 00:00:00 2001 From: hiyouga Date: Thu, 9 May 2024 16:52:27 +0800 Subject: [PATCH 09/24] resolve python 3.8 package Former-commit-id: 5eee4ec7016846356715a4fa1ad58e3cbb1cac6e --- README.md | 10 ++++++++-- README_zh.md | 10 ++++++++-- requirements.txt | 1 - setup.py | 1 + src/llmtuner/api/app.py | 4 +++- 5 files changed, 20 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 798b7bd4..474e536d 100644 --- a/README.md +++ b/README.md @@ -310,13 +310,19 @@ huggingface-cli login ### Installation +> [!IMPORTANT] +> Installation is mandatory. + ```bash git clone https://github.com/hiyouga/LLaMA-Factory.git cd LLaMA-Factory -pip install -e .[metrics] +pip install -e .[torch,metrics] ``` -Extra dependencies available: metrics, deepspeed, bitsandbytes, vllm, galore, badam, gptq, awq, aqlm, qwen, modelscope, quality +Extra dependencies available: torch, metrics, deepspeed, bitsandbytes, vllm, galore, badam, gptq, awq, aqlm, qwen, modelscope, quality + +> [!TIP] +> Use `pip install --no-deps -e .` to resolve package conflicts.
For Windows users diff --git a/README_zh.md b/README_zh.md index 2c5b1aa1..1df9bcba 100644 --- a/README_zh.md +++ b/README_zh.md @@ -310,13 +310,19 @@ huggingface-cli login ### 安装 LLaMA Factory +> [!IMPORTANT] +> 此步骤为必需。 + ```bash git clone https://github.com/hiyouga/LLaMA-Factory.git cd LLaMA-Factory -pip install -e .[metrics] +pip install -e .[torch,metrics] ``` -可选的额外依赖项:metrics、deepspeed、bitsandbytes、vllm、galore、badam、gptq、awq、aqlm、qwen、modelscope、quality +可选的额外依赖项:torch、metrics、deepspeed、bitsandbytes、vllm、galore、badam、gptq、awq、aqlm、qwen、modelscope、quality + +> [!TIP] +> 遇到包冲突时,可使用 `pip install --no-deps -e .` 解决。
Windows 用户指南 diff --git a/requirements.txt b/requirements.txt index 67bd7033..f4a942e6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ -torch>=1.13.1 transformers>=4.37.2 datasets>=2.14.3 accelerate>=0.27.2 diff --git a/setup.py b/setup.py index ddc3a594..2f684753 100644 --- a/setup.py +++ b/setup.py @@ -20,6 +20,7 @@ def get_requires(): extra_require = { + "torch": ["torch>=1.13.1"], "metrics": ["nltk", "jieba", "rouge-chinese"], "deepspeed": ["deepspeed>=0.10.0,<=0.14.0"], "bitsandbytes": ["bitsandbytes>=0.39.0"], diff --git a/src/llmtuner/api/app.py b/src/llmtuner/api/app.py index 375ee61f..6d3d5afc 100644 --- a/src/llmtuner/api/app.py +++ b/src/llmtuner/api/app.py @@ -1,6 +1,8 @@ import os from contextlib import asynccontextmanager -from typing import Annotated, Optional +from typing import Optional + +from typing_extensions import Annotated from ..chat import ChatModel from ..extras.misc import torch_gc From 7fc892111172181d9de6f8825e3bc5955fb99a6b Mon Sep 17 00:00:00 2001 From: kkkl <57311960+YUUUCC@users.noreply.github.com> Date: Sat, 11 May 2024 00:22:40 +0800 Subject: [PATCH 10/24] Update constants.py Fix the download issue of the Phi3 model Former-commit-id: 8978e80914ac6db1ed1b79641b20c84087dd4341 --- src/llmtuner/extras/constants.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llmtuner/extras/constants.py b/src/llmtuner/extras/constants.py index e055f1f3..50c78b3f 100644 --- a/src/llmtuner/extras/constants.py +++ b/src/llmtuner/extras/constants.py @@ -715,11 +715,11 @@ register_model_group( models={ "Phi3-3.8B-4k-Chat": { DownloadSource.DEFAULT: "microsoft/Phi-3-mini-4k-instruct", - DownloadSource.DEFAULT: "LLM-Research/Phi-3-mini-4k-instruct", + DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-mini-4k-instruct", }, "Phi3-3.8B-128k-Chat": { DownloadSource.DEFAULT: "microsoft/Phi-3-mini-128k-instruct", - DownloadSource.DEFAULT: "LLM-Research/Phi-3-mini-128k-instruct", + DownloadSource.MODELSCOPE: "LLM-Research/Phi-3-mini-128k-instruct", }, }, module="qkv_proj", From e0a3128460052ee1b8928218973e410df9702bc6 Mon Sep 17 00:00:00 2001 From: BUAADreamer <1428195643@qq.com> Date: Sat, 11 May 2024 13:11:00 +0800 Subject: [PATCH 11/24] add full parameter finetuning of mllm Former-commit-id: f90c1da5636ac3cb8112c5081a3b56b09a17fcf8 --- src/llmtuner/hparams/model_args.py | 4 ++++ src/llmtuner/model/loader.py | 3 ++- src/llmtuner/model/patcher.py | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/llmtuner/hparams/model_args.py b/src/llmtuner/hparams/model_args.py index ac70bb3c..996eabae 100644 --- a/src/llmtuner/hparams/model_args.py +++ b/src/llmtuner/hparams/model_args.py @@ -85,6 +85,10 @@ class ModelArguments: default=False, metadata={"help": "Whethor or not to use multimodal LLM that accepts visual inputs."}, ) + autocast_projector: bool = field( + default=True, + metadata={"help": "Whethor or not to autocast projector."}, + ) moe_aux_loss_coef: Optional[float] = field( default=None, metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."}, diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index ead6178f..1dca84a1 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -155,7 +155,8 @@ def load_model( model.eval() else: model.train() - + if model_args.visual_inputs: + model.vision_tower.requires_grad_(False) trainable_params, all_param = count_parameters(model) if is_trainable: param_stats = "trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format( diff --git a/src/llmtuner/model/patcher.py b/src/llmtuner/model/patcher.py index 31cba492..6ca6f2e5 100644 --- a/src/llmtuner/model/patcher.py +++ b/src/llmtuner/model/patcher.py @@ -101,7 +101,7 @@ def patch_model( if model_args.resize_vocab: resize_embedding_layer(model, tokenizer) - if model_args.visual_inputs: + if model_args.visual_inputs and model_args.autocast_projector: autocast_projector_dtype(model, model_args) if is_trainable: From 15cccbfdcc3f683dcc9ed00e6e758a8522d9199e Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Sat, 11 May 2024 22:43:04 +0800 Subject: [PATCH 12/24] Update README.md Former-commit-id: d24c83bb30e2829ba78db90c4c4975788f2eed25 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5880dbea..90c66caf 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ [![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main) [![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/) [![Downloads](https://static.pepy.tech/badge/llmtuner)](https://pypi.org/project/llmtuner/) -[![Citation](https://img.shields.io/badge/citation-43-green)](#projects-using-llama-factory) +[![Citation](https://img.shields.io/badge/citation-44-green)](#projects-using-llama-factory) [![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls) [![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK) [![Twitter](https://img.shields.io/twitter/follow/llamafactory_ai)](https://twitter.com/llamafactory_ai) From 2175fedd6a5fe3e6c87b5e3608a9cce2f9cfa549 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Sat, 11 May 2024 22:44:51 +0800 Subject: [PATCH 13/24] Update README_zh.md Former-commit-id: 1a205478403b5852fac0aa8418cdb8995fbe40e3 --- README_zh.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README_zh.md b/README_zh.md index 330a012e..0aba9043 100644 --- a/README_zh.md +++ b/README_zh.md @@ -5,7 +5,7 @@ [![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main) [![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/) [![Downloads](https://static.pepy.tech/badge/llmtuner)](https://pypi.org/project/llmtuner/) -[![Citation](https://img.shields.io/badge/citation-43-green)](#使用了-llama-factory-的项目) +[![Citation](https://img.shields.io/badge/citation-44-green)](#使用了-llama-factory-的项目) [![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls) [![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK) [![Twitter](https://img.shields.io/twitter/follow/llamafactory_ai)](https://twitter.com/llamafactory_ai) @@ -473,7 +473,7 @@ export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1` 1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: 医疗大模型项目 CareGPT,基于 LLaMA2-7B 和 Baichuan-13B 在中文医疗数据上微调而得。 1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**:MBTI性格大模型项目,根据数据集与训练方式让任意 LLM 拥有 16 个不同的性格类型。 1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**:一个用于生成 Stable Diffusion 提示词的大型语言模型。[[🤗Demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt) -1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**: 中文多模态医学大模型,基于 LLaVA-1.5-7B 在中文多模态医疗数据上微调而得 +1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**:中文多模态医学大模型,基于 LLaVA-1.5-7B 在中文多模态医疗数据上微调而得。
From a2a23451e9fffa1439af73db815eaa388e99d512 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Sat, 11 May 2024 23:54:53 +0800 Subject: [PATCH 14/24] Update tuner.py Former-commit-id: 22afcbdb25160583e5ece28fad0585c7bc70f41a --- src/llmtuner/train/tuner.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index 11509c20..cf44aa8c 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -15,9 +15,11 @@ from .pt import run_pt from .rm import run_rm from .sft import run_sft + if TYPE_CHECKING: from transformers import TrainerCallback + logger = get_logger(__name__) @@ -51,8 +53,8 @@ def export_model(args: Optional[Dict[str, Any]] = None) -> None: raise ValueError("Please merge adapters before quantizing the model.") tokenizer_module = load_tokenizer(model_args) - tokenizer = tokenizer_module['tokenizer'] - processor = tokenizer_module['processor'] + tokenizer = tokenizer_module["tokenizer"] + processor = tokenizer_module["processor"] get_template_and_fix_tokenizer(tokenizer, data_args.template) model = load_model(tokenizer, model_args, finetuning_args) # must after fixing tokenizer to resize vocab @@ -63,7 +65,7 @@ def export_model(args: Optional[Dict[str, Any]] = None) -> None: raise ValueError("The model is not a `PreTrainedModel`, export aborted.") if getattr(model, "quantization_method", None) is None: # cannot convert dtype of a quantized model - output_dtype = getattr(model.config, "torch_dtype", torch.float16) + output_dtype = torch.float16 setattr(model.config, "torch_dtype", output_dtype) model = model.to(output_dtype) @@ -86,10 +88,12 @@ def export_model(args: Optional[Dict[str, Any]] = None) -> None: tokenizer.save_pretrained(model_args.export_dir) if model_args.export_hub_model_id is not None: tokenizer.push_to_hub(model_args.export_hub_model_id, token=model_args.hf_hub_token) + + if model_args.visual_inputs and processor is not None: + getattr(processor, "image_processor").save_pretrained(model_args.export_dir) + if model_args.export_hub_model_id is not None: + getattr(processor, "image_processor").push_to_hub( + model_args.export_hub_model_id, token=model_args.hf_hub_token + ) except Exception: logger.warning("Cannot save tokenizer, please copy the files manually.") - - if model_args.visual_inputs: - processor.image_processor.save_pretrained(model_args.export_dir) - if model_args.export_hub_model_id is not None: - processor.image_processor.push_to_hub(model_args.export_hub_model_id, token=model_args.hf_hub_token) \ No newline at end of file From 8e0b2a5e6f960d10470f6f8c584f33d9467725c8 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Sat, 11 May 2024 23:55:59 +0800 Subject: [PATCH 15/24] Update tuner.py Former-commit-id: ccd1eb2c0992f75440c0e1c5cd3f02d03aacb085 --- src/llmtuner/train/tuner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index cf44aa8c..ffdc3e60 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -65,7 +65,7 @@ def export_model(args: Optional[Dict[str, Any]] = None) -> None: raise ValueError("The model is not a `PreTrainedModel`, export aborted.") if getattr(model, "quantization_method", None) is None: # cannot convert dtype of a quantized model - output_dtype = torch.float16 + output_dtype = getattr(model.config, "torch_dtype", torch.float16) setattr(model.config, "torch_dtype", output_dtype) model = model.to(output_dtype) From 63a1564215a101737baba011e70b0f57a16e2969 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Sat, 11 May 2024 23:56:40 +0800 Subject: [PATCH 16/24] Update patcher.py Former-commit-id: 2c88d394d29c6e98ac3a6860848855722614ca52 --- src/llmtuner/model/patcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llmtuner/model/patcher.py b/src/llmtuner/model/patcher.py index 6ca6f2e5..31cba492 100644 --- a/src/llmtuner/model/patcher.py +++ b/src/llmtuner/model/patcher.py @@ -101,7 +101,7 @@ def patch_model( if model_args.resize_vocab: resize_embedding_layer(model, tokenizer) - if model_args.visual_inputs and model_args.autocast_projector: + if model_args.visual_inputs: autocast_projector_dtype(model, model_args) if is_trainable: From 8e36c99503f7ac5bf78546a1758e260c2558e165 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Sat, 11 May 2024 23:57:05 +0800 Subject: [PATCH 17/24] Update model_args.py Former-commit-id: c4114add4c42c1d7723f7270451a6c9fc656ecd1 --- src/llmtuner/hparams/model_args.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/llmtuner/hparams/model_args.py b/src/llmtuner/hparams/model_args.py index 996eabae..ac70bb3c 100644 --- a/src/llmtuner/hparams/model_args.py +++ b/src/llmtuner/hparams/model_args.py @@ -85,10 +85,6 @@ class ModelArguments: default=False, metadata={"help": "Whethor or not to use multimodal LLM that accepts visual inputs."}, ) - autocast_projector: bool = field( - default=True, - metadata={"help": "Whethor or not to autocast projector."}, - ) moe_aux_loss_coef: Optional[float] = field( default=None, metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."}, From 95e3a295009dc4d47d3b880c43ac7cf62f2f762d Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Sat, 11 May 2024 23:58:47 +0800 Subject: [PATCH 18/24] Update loader.py Former-commit-id: 2fc12790414677bb82736208fb9547640780af2e --- src/llmtuner/model/loader.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index 1dca84a1..ead6178f 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -155,8 +155,7 @@ def load_model( model.eval() else: model.train() - if model_args.visual_inputs: - model.vision_tower.requires_grad_(False) + trainable_params, all_param = count_parameters(model) if is_trainable: param_stats = "trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format( From fd034e91289e82721a0ab119918afb4aed2155bc Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sun, 12 May 2024 00:02:49 +0800 Subject: [PATCH 19/24] fix llava config Former-commit-id: b13d032325e45d401a9dbc64d4c73e308eff3288 --- src/llmtuner/model/adapter.py | 3 +++ src/llmtuner/model/loader.py | 2 +- src/llmtuner/model/patcher.py | 9 +++------ src/llmtuner/model/utils/valuehead.py | 7 +------ src/llmtuner/model/utils/visual.py | 9 +++++++-- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/llmtuner/model/adapter.py b/src/llmtuner/model/adapter.py index d43e00f0..0ffb91c1 100644 --- a/src/llmtuner/model/adapter.py +++ b/src/llmtuner/model/adapter.py @@ -46,6 +46,9 @@ def init_adapter( if (not finetuning_args.pure_bf16) and (not finetuning_args.use_badam): model = model.float() + if model_args.visual_inputs and hasattr(model, "vision_tower"): # freeze vision model + model.vision_tower.requires_grad_(False) + if finetuning_args.finetuning_type == "freeze" and is_trainable: logger.info("Fine-tuning method: Freeze") num_layers = ( diff --git a/src/llmtuner/model/loader.py b/src/llmtuner/model/loader.py index ead6178f..ea55de27 100644 --- a/src/llmtuner/model/loader.py +++ b/src/llmtuner/model/loader.py @@ -106,7 +106,7 @@ def load_model( """ init_kwargs = _get_init_kwargs(model_args) config = load_config(model_args) - patch_config(config, tokenizer, model_args, init_kwargs, is_trainable, add_valuehead) + patch_config(config, tokenizer, model_args, init_kwargs, is_trainable) model = None lazy_load = False diff --git a/src/llmtuner/model/patcher.py b/src/llmtuner/model/patcher.py index 31cba492..fd99bd3b 100644 --- a/src/llmtuner/model/patcher.py +++ b/src/llmtuner/model/patcher.py @@ -15,8 +15,8 @@ from .utils.longlora import configure_longlora from .utils.moe import add_z3_leaf_module, configure_moe from .utils.quantization import configure_quantization from .utils.rope import configure_rope -from .utils.valuehead import configure_valuehead, prepare_valuehead_model -from .utils.visual import autocast_projector_dtype +from .utils.valuehead import prepare_valuehead_model +from .utils.visual import autocast_projector_dtype, configure_hidden_size if TYPE_CHECKING: @@ -40,7 +40,6 @@ def patch_config( model_args: "ModelArguments", init_kwargs: Dict[str, Any], is_trainable: bool, - add_valuehead: bool, ) -> None: if model_args.compute_dtype is None: # priority: bf16 > fp16 > fp32 model_args.compute_dtype = infer_optim_dtype(model_dtype=getattr(config, "torch_dtype", None)) @@ -50,9 +49,7 @@ def patch_config( configure_longlora(config, model_args, is_trainable) configure_quantization(config, tokenizer, model_args, init_kwargs) configure_moe(config, model_args, is_trainable) - - if add_valuehead: - configure_valuehead(config) + configure_hidden_size(config) if model_args.use_cache and not is_trainable: setattr(config, "use_cache", True) diff --git a/src/llmtuner/model/utils/valuehead.py b/src/llmtuner/model/utils/valuehead.py index a6180753..d813729e 100644 --- a/src/llmtuner/model/utils/valuehead.py +++ b/src/llmtuner/model/utils/valuehead.py @@ -8,7 +8,7 @@ from ...extras.logging import get_logger if TYPE_CHECKING: - from transformers import PretrainedConfig, PreTrainedModel + from transformers import PreTrainedModel from ...hparams import ModelArguments @@ -16,11 +16,6 @@ if TYPE_CHECKING: logger = get_logger(__name__) -def configure_valuehead(config: "PretrainedConfig") -> None: - if getattr(config, "model_type", None) == "llava": - setattr(config, "hidden_size", getattr(config.vision_config, "intermediate_size", None)) - - def load_valuehead_params(path_or_repo_id: str, model_args: "ModelArguments") -> Dict[str, torch.Tensor]: r""" Loads value head parameters from Hugging Face Hub or local disk. diff --git a/src/llmtuner/model/utils/visual.py b/src/llmtuner/model/utils/visual.py index cb51301b..b29a9ba5 100644 --- a/src/llmtuner/model/utils/visual.py +++ b/src/llmtuner/model/utils/visual.py @@ -6,7 +6,7 @@ from ...extras.logging import get_logger if TYPE_CHECKING: - from transformers import PreTrainedModel + from transformers import PretrainedConfig, PreTrainedModel from ...hparams import ModelArguments @@ -14,6 +14,11 @@ if TYPE_CHECKING: logger = get_logger(__name__) +def configure_hidden_size(config: "PretrainedConfig") -> None: + if getattr(config, "model_type", None) == "llava": + setattr(config, "hidden_size", getattr(config.text_config, "hidden_size", None)) + + def autocast_projector_dtype( model: "PreTrainedModel", model_args: "ModelArguments", mm_projector_name: str = "multi_modal_projector" ) -> None: @@ -22,7 +27,7 @@ def autocast_projector_dtype( ) -> "torch.Tensor": return output.to(model_args.compute_dtype) - if hasattr(model, mm_projector_name): + if hasattr(model, mm_projector_name) and getattr(model.config, "quantization_method", None): logger.info("Casting multimodal projector outputs in {}.".format(model_args.compute_dtype)) mm_projector: "torch.nn.Module" = getattr(model, mm_projector_name) mm_projector.register_forward_hook(_mm_projector_forward_post_hook) From f90514a26d8c448c2a1843ef9fe3ef230f29e767 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sun, 12 May 2024 00:03:59 +0800 Subject: [PATCH 20/24] fix #3674 Former-commit-id: 6bad2eafef75ec697477e1f2ce739006042fb4c7 --- src/llmtuner/train/tuner.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index ffdc3e60..8f103ca1 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -68,6 +68,8 @@ def export_model(args: Optional[Dict[str, Any]] = None) -> None: output_dtype = getattr(model.config, "torch_dtype", torch.float16) setattr(model.config, "torch_dtype", output_dtype) model = model.to(output_dtype) + else: + setattr(model.config, "torch_dtype", torch.float16) model.save_pretrained( save_directory=model_args.export_dir, From 395f18909c5209b159a27fc32f9d0bed69981d77 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sun, 12 May 2024 00:33:49 +0800 Subject: [PATCH 21/24] update readme Former-commit-id: d57ca8a865b46588f65b2cc15073c5fcc4e4cebc --- README.md | 12 +++++++++--- README_zh.md | 13 +++++++++---- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 90c66caf..80154cae 100644 --- a/README.md +++ b/README.md @@ -366,17 +366,23 @@ See [examples/README.md](examples/README.md) for advanced usage (including distr #### Use local environment ```bash -CUDA_VISIBLE_DEVICES=0 GRADIO_SHARE=1 llamafactory-cli webui +CUDA_VISIBLE_DEVICES=0 GRADIO_SERVER_PORT=7860 GRADIO_SHARE=1 llamafactory-cli webui ``` -
For Alibaba Cloud users +
For Alibaba Cloud PAI or AutoDL users -If you encountered display problems in LLaMA Board on Alibaba Cloud, try using the following command to set environment variables before starting LLaMA Board: +If you encountered display problems in LLaMA Board on Alibaba Cloud PAI, try using the following command to set environment variables before starting LLaMA Board: ```bash export GRADIO_ROOT_PATH=/${JUPYTER_NAME}/proxy/7860/ ``` +If you are using AutoDL, please install a specific version of Gradio: + +```bash +pip install gradio==4.10.0 +``` +
#### Use Docker diff --git a/README_zh.md b/README_zh.md index 0aba9043..5656fb4a 100644 --- a/README_zh.md +++ b/README_zh.md @@ -366,17 +366,23 @@ CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_lora_s #### 使用本地环境 ```bash -CUDA_VISIBLE_DEVICES=0 GRADIO_SHARE=1 llamafactory-cli webui +CUDA_VISIBLE_DEVICES=0 GRADIO_SERVER_PORT=7860 GRADIO_SHARE=1 llamafactory-cli webui ``` -
阿里云用户指南 +
阿里云 PAI 和 AutoDL 用户指南 -如果您在阿里云上使用 LLaMA Board 时遇到显示问题,请尝试在启动前使用以下命令设置环境变量: +如果您在阿里云 PAI 上使用 LLaMA Board 时遇到显示问题,请尝试在启动前使用以下命令设置环境变量: ```bash export GRADIO_ROOT_PATH=/${JUPYTER_NAME}/proxy/7860/ ``` +如果您正在使用 AutoDL,请安装下述 Gradio 版本: + +```bash +pip install gradio==4.10.0 +``` +
#### 使用 Docker @@ -475,7 +481,6 @@ export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1` 1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**:一个用于生成 Stable Diffusion 提示词的大型语言模型。[[🤗Demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt) 1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**:中文多模态医学大模型,基于 LLaVA-1.5-7B 在中文多模态医疗数据上微调而得。 -
## 协议 From 3e5a099187c6622d15946414970dc5fc24301841 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sun, 12 May 2024 01:10:30 +0800 Subject: [PATCH 22/24] remove checksum and fix ui args Former-commit-id: 0cfdeb1d30efb63211434bc4656bceb59e666289 --- README.md | 4 ++-- README_zh.md | 4 ++-- data/dataset_info.json | 25 +++++-------------------- src/llmtuner/data/loader.py | 4 +--- src/llmtuner/data/parser.py | 2 -- src/llmtuner/data/utils.py | 15 --------------- src/llmtuner/webui/interface.py | 6 ++++-- src/webui.py | 3 ++- 8 files changed, 16 insertions(+), 47 deletions(-) diff --git a/README.md b/README.md index 80154cae..57a34dab 100644 --- a/README.md +++ b/README.md @@ -366,7 +366,7 @@ See [examples/README.md](examples/README.md) for advanced usage (including distr #### Use local environment ```bash -CUDA_VISIBLE_DEVICES=0 GRADIO_SERVER_PORT=7860 GRADIO_SHARE=1 llamafactory-cli webui +CUDA_VISIBLE_DEVICES=0 GRADIO_SHARE=1 llamafactory-cli webui ```
For Alibaba Cloud PAI or AutoDL users @@ -374,7 +374,7 @@ CUDA_VISIBLE_DEVICES=0 GRADIO_SERVER_PORT=7860 GRADIO_SHARE=1 llamafactory-cli w If you encountered display problems in LLaMA Board on Alibaba Cloud PAI, try using the following command to set environment variables before starting LLaMA Board: ```bash -export GRADIO_ROOT_PATH=/${JUPYTER_NAME}/proxy/7860/ +export GRADIO_SERVER_PORT=7860 GRADIO_ROOT_PATH=/${JUPYTER_NAME}/proxy/7860/ ``` If you are using AutoDL, please install a specific version of Gradio: diff --git a/README_zh.md b/README_zh.md index 5656fb4a..047b1645 100644 --- a/README_zh.md +++ b/README_zh.md @@ -366,7 +366,7 @@ CUDA_VISIBLE_DEVICES=0 llamafactory-cli export examples/merge_lora/llama3_lora_s #### 使用本地环境 ```bash -CUDA_VISIBLE_DEVICES=0 GRADIO_SERVER_PORT=7860 GRADIO_SHARE=1 llamafactory-cli webui +CUDA_VISIBLE_DEVICES=0 GRADIO_SHARE=1 llamafactory-cli webui ```
阿里云 PAI 和 AutoDL 用户指南 @@ -374,7 +374,7 @@ CUDA_VISIBLE_DEVICES=0 GRADIO_SERVER_PORT=7860 GRADIO_SHARE=1 llamafactory-cli w 如果您在阿里云 PAI 上使用 LLaMA Board 时遇到显示问题,请尝试在启动前使用以下命令设置环境变量: ```bash -export GRADIO_ROOT_PATH=/${JUPYTER_NAME}/proxy/7860/ +export GRADIO_SERVER_PORT=7860 GRADIO_ROOT_PATH=/${JUPYTER_NAME}/proxy/7860/ ``` 如果您正在使用 AutoDL,请安装下述 Gradio 版本: diff --git a/data/dataset_info.json b/data/dataset_info.json index d5b7208f..032a5c49 100644 --- a/data/dataset_info.json +++ b/data/dataset_info.json @@ -1,27 +1,21 @@ { "alpaca_en": { - "file_name": "alpaca_data_en_52k.json", - "file_sha1": "607f94a7f581341e59685aef32f531095232cf23" + "file_name": "alpaca_data_en_52k.json" }, "alpaca_zh": { - "file_name": "alpaca_data_zh_51k.json", - "file_sha1": "2ba9827122c158dc256668d42bd1bcb8bc6b786e" + "file_name": "alpaca_data_zh_51k.json" }, "alpaca_gpt4_en": { - "file_name": "alpaca_gpt4_data_en.json", - "file_sha1": "647f4ad447bd993e4b6b6223d1be15208bab694a" + "file_name": "alpaca_gpt4_data_en.json" }, "alpaca_gpt4_zh": { - "file_name": "alpaca_gpt4_data_zh.json", - "file_sha1": "3eaa3bda364ccdd59925d7448a698256c31ef845" + "file_name": "alpaca_gpt4_data_zh.json" }, "identity": { - "file_name": "identity.json", - "file_sha1": "0f67e97fd01612006ab3536cdaf6cfb0d1e7f279" + "file_name": "identity.json" }, "oaast_sft_zh": { "file_name": "oaast_sft_zh.json", - "file_sha1": "a6a91f18f80f37b10ded9cf633fb50c033bf7b9f", "columns": { "prompt": "instruction", "query": "input", @@ -31,7 +25,6 @@ }, "lima": { "file_name": "lima.json", - "file_sha1": "9db59f6b7007dc4b17529fc63379b9cd61640f37", "columns": { "prompt": "instruction", "query": "input", @@ -41,7 +34,6 @@ }, "glaive_toolcall": { "file_name": "glaive_toolcall_10k.json", - "file_sha1": "36aea64548fbf6aa300bef411b9221092ed84902", "formatting": "sharegpt", "columns": { "messages": "conversations", @@ -50,7 +42,6 @@ }, "mllm_demo": { "file_name": "mllm_demo.json", - "file_sha1": "d626cc0ad88a26d0dc9fcb47336821cf486d8bcc", "formatting": "sharegpt", "columns": { "messages": "messages", @@ -308,7 +299,6 @@ }, "oaast_rm_zh": { "file_name": "oaast_rm_zh.json", - "file_sha1": "1065af1f3784dd61be5e79713a35f427b713a232", "columns": { "prompt": "instruction", "query": "input", @@ -319,17 +309,14 @@ }, "comparison_gpt4_en": { "file_name": "comparison_gpt4_data_en.json", - "file_sha1": "96fa18313544e22444fe20eead7754b17da452ae", "ranking": true }, "comparison_gpt4_zh": { "file_name": "comparison_gpt4_data_zh.json", - "file_sha1": "515b18ed497199131ddcc1af950345c11dc5c7fd", "ranking": true }, "orca_rlhf": { "file_name": "orca_rlhf.json", - "file_sha1": "acc8f74d16fd1fc4f68e7d86eaa781c2c3f5ba8e", "ranking": true, "columns": { "prompt": "question", @@ -370,14 +357,12 @@ }, "wiki_demo": { "file_name": "wiki_demo.txt", - "file_sha1": "e70375e28eda542a90c68213640cc371898ce181", "columns": { "prompt": "text" } }, "c4_demo": { "file_name": "c4_demo.json", - "file_sha1": "a5a0c86759732f9a5238e447fecd74f28a66cca8", "columns": { "prompt": "text" } diff --git a/src/llmtuner/data/loader.py b/src/llmtuner/data/loader.py index ca0d5407..3cc01b0d 100644 --- a/src/llmtuner/data/loader.py +++ b/src/llmtuner/data/loader.py @@ -11,7 +11,7 @@ from .aligner import align_dataset from .parser import get_dataset_list from .preprocess import get_preprocess_and_print_func from .template import get_template_and_fix_tokenizer -from .utils import checksum, merge_dataset +from .utils import merge_dataset if TYPE_CHECKING: @@ -61,8 +61,6 @@ def load_single_dataset( if data_path is None: raise ValueError("File extension must be txt, csv, json or jsonl.") - - checksum(data_files, dataset_attr.file_sha1) else: raise NotImplementedError diff --git a/src/llmtuner/data/parser.py b/src/llmtuner/data/parser.py index 01a417a9..3170fd8a 100644 --- a/src/llmtuner/data/parser.py +++ b/src/llmtuner/data/parser.py @@ -21,7 +21,6 @@ class DatasetAttr: load_from: Literal["hf_hub", "ms_hub", "script", "file"] dataset_name: str """ extra configs """ - file_sha1: Optional[str] = None subset: Optional[str] = None folder: Optional[str] = None ranking: bool = False @@ -99,7 +98,6 @@ def get_dataset_list(data_args: "DataArguments") -> List["DatasetAttr"]: else: dataset_attr = DatasetAttr("file", dataset_name=dataset_info[name]["file_name"]) - dataset_attr.set_attr("file_sha1", dataset_info[name]) dataset_attr.set_attr("subset", dataset_info[name]) dataset_attr.set_attr("folder", dataset_info[name]) dataset_attr.set_attr("ranking", dataset_info[name], default=False) diff --git a/src/llmtuner/data/utils.py b/src/llmtuner/data/utils.py index dc189609..29fd4ad4 100644 --- a/src/llmtuner/data/utils.py +++ b/src/llmtuner/data/utils.py @@ -26,21 +26,6 @@ class Role(str, Enum): OBSERVATION = "observation" -def checksum(data_files: List[str], file_sha1: Optional[str] = None) -> None: - if file_sha1 is None: - logger.warning("Checksum failed: missing SHA-1 hash value in dataset_info.json.") - return - - if len(data_files) != 1: - logger.warning("Checksum failed: too many files.") - return - - with open(data_files[0], "rb") as f: - sha1 = hashlib.sha1(f.read()).hexdigest() - if sha1 != file_sha1: - logger.warning("Checksum failed: mismatched SHA-1 hash value at {}.".format(data_files[0])) - - def infer_max_len(source_len: int, target_len: int, max_len: int, reserved_label_len: int) -> Tuple[int, int]: max_target_len = int(max_len * (target_len / (source_len + target_len))) max_target_len = max(max_target_len, reserved_label_len) diff --git a/src/llmtuner/webui/interface.py b/src/llmtuner/webui/interface.py index bbd91bb7..91709d40 100644 --- a/src/llmtuner/webui/interface.py +++ b/src/llmtuner/webui/interface.py @@ -71,10 +71,12 @@ def create_web_demo() -> gr.Blocks: def run_web_ui() -> None: + gradio_share = bool(int(os.environ.get("GRADIO_SHARE", "0"))) server_name = os.environ.get("GRADIO_SERVER_NAME", "0.0.0.0") - create_ui().queue().launch(server_name=server_name) + create_ui().queue().launch(share=gradio_share, server_name=server_name) def run_web_demo() -> None: + gradio_share = bool(int(os.environ.get("GRADIO_SHARE", "0"))) server_name = os.environ.get("GRADIO_SERVER_NAME", "0.0.0.0") - create_web_demo().queue().launch(server_name=server_name) + create_web_demo().queue().launch(share=gradio_share, server_name=server_name) diff --git a/src/webui.py b/src/webui.py index 000098d1..3f8690d0 100644 --- a/src/webui.py +++ b/src/webui.py @@ -4,8 +4,9 @@ from llmtuner.webui.interface import create_ui def main(): + gradio_share = bool(int(os.environ.get("GRADIO_SHARE", "0"))) server_name = os.environ.get("GRADIO_SERVER_NAME", "0.0.0.0") - create_ui().queue().launch(server_name=server_name) + create_ui().queue().launch(share=gradio_share, server_name=server_name) if __name__ == "__main__": From c558e446929e7bef0b70d803855490864cef5056 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sun, 12 May 2024 01:25:16 +0800 Subject: [PATCH 23/24] fix #3658 Former-commit-id: 37799a62d4431d1d8c02fee6c23d607a65723c1a --- src/llmtuner/extras/callbacks.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/llmtuner/extras/callbacks.py b/src/llmtuner/extras/callbacks.py index a142928a..6d24b244 100644 --- a/src/llmtuner/extras/callbacks.py +++ b/src/llmtuner/extras/callbacks.py @@ -139,13 +139,15 @@ class LogCallback(TrainerCallback): r""" Event called after an evaluation phase. """ - self._close_thread_pool() + if not self.do_train: + self._close_thread_pool() def on_predict(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): r""" Event called after a successful prediction. """ - self._close_thread_pool() + if not self.do_train: + self._close_thread_pool() def on_log(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): r""" From 32ee4ef229b92430de6944926c924782228bef6c Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sun, 12 May 2024 01:28:51 +0800 Subject: [PATCH 24/24] lint Former-commit-id: cb72eb6ab24615ce492ca2945f29daa34c0c52d4 --- src/llmtuner/data/utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/llmtuner/data/utils.py b/src/llmtuner/data/utils.py index 29fd4ad4..aaa5bdc0 100644 --- a/src/llmtuner/data/utils.py +++ b/src/llmtuner/data/utils.py @@ -1,6 +1,5 @@ -import hashlib from enum import Enum, unique -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Dict, List, Tuple, Union from datasets import concatenate_datasets, interleave_datasets