From 7641a214d81bc03844208bf48fa9374ef873a834 Mon Sep 17 00:00:00 2001 From: codingma Date: Sun, 28 Apr 2024 11:31:34 +0800 Subject: [PATCH 01/32] support BAdam in WebUI Former-commit-id: 1247154dd7d5eba5d11c4bb8504bf551ab49eb72 --- src/llmtuner/webui/components/train.py | 26 ++++++ src/llmtuner/webui/locales.py | 109 +++++++++++++++++++++++++ src/llmtuner/webui/runner.py | 9 ++ 3 files changed, 144 insertions(+) diff --git a/src/llmtuner/webui/components/train.py b/src/llmtuner/webui/components/train.py index 7dc324af..9d93a9b6 100644 --- a/src/llmtuner/webui/components/train.py +++ b/src/llmtuner/webui/components/train.py @@ -210,6 +210,32 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]: ) ) + with gr.Accordion(open=False) as badam_tab: + with gr.Row(): + use_badam = gr.Checkbox() + badam_mode = gr.Dropdown(choices=["layer", "ratio"], value="layer") + badam_mask_mode = gr.Dropdown(choices=["adjacent", "scatter"], value="adjacent") + badam_switch_mode = gr.Dropdown(choices=["ascending", "descending", "random", "fixed"], value="ascending") + badam_update_ratio = gr.Slider(value=0, minimum=0, maximum=1, step=0.01) + badam_switch_block_every = gr.Slider(value=50, minimum=-1, maximum=200, step=1) + + badam_verbose = gr.Dropdown(choices=[0, 1, 2], value=0) + + input_elems.update({use_badam, badam_mode, badam_switch_block_every, badam_switch_mode, badam_update_ratio, + badam_mask_mode, badam_verbose}) + elem_dict.update( + dict( + badam_tab=badam_tab, + use_badam=use_badam, + badam_mode=badam_mode, + badam_switch_block_every=badam_switch_block_every, + badam_switch_mode=badam_switch_mode, + badam_update_ratio=badam_update_ratio, + badam_mask_mode=badam_mask_mode, + badam_verbose=badam_verbose, + ) + ) + with gr.Row(): cmd_preview_btn = gr.Button() arg_save_btn = gr.Button() diff --git a/src/llmtuner/webui/locales.py b/src/llmtuner/webui/locales.py index d341c7b6..d3dd4dc2 100644 --- a/src/llmtuner/webui/locales.py +++ b/src/llmtuner/webui/locales.py @@ -891,6 +891,115 @@ LOCALES = { "info": "应用 GaLore 的模块名称。使用英文逗号分隔多个名称。", }, }, + "badam_tab": { + "en": { + "label": "BAdam configurations", + }, + "ru": { + "label": "Конфигурации BAdam", + }, + "zh": { + "label": "BAdam 参数设置", + }, + }, + "use_badam": { + "en": { + "label": "Use BAdam", + "info": "Enable the block coordinate optimization with Adam.", + }, + "ru": { + "label": "Использовать BAdam", + "info": "Включите блочную оптимизацию координат с Adam.", + }, + "zh": { + "label": "使用 BAdam", + "info": "使用多Block协同的Adam优化器。", + }, + }, + "badam_mode": { + "en": { + "label": "BAdam mode", + "info": "Whether to use layer-wise or ratio-wise BAdam optimizer.", + }, + "ru": { + "label": "Режим BAdam", + "info": "Использовать оптимизатор BAdam с обработкой слоев или с обработкой коэффициентов.", + }, + "zh": { + "label": "BAdam 模式", + "info": "使用layer或者ratio比例模式。", + }, + }, + "badam_switch_block_every": { + "en": { + "label": "Switch block frequency", + "info": "How often to switch model's block update. Set to -1 to disable the block update.", + }, + "ru": { + "label": "Частота переключения", + "info": "Как часто переключать обновление блока модели. Установите -1, чтобы отключить обновление блока.", + }, + "zh": { + "label": "切换block的频率", + "info": "控制切换block切换的频率,如果是-1,则不切换。", + }, + }, + "badam_switch_mode": { + "en": { + "label": "Switch mode", + "info": "The strategy of picking block to update for layer-wise BAdam.", + }, + "ru": { + "label": "Переключить режим", + "info": "Стратегия выбора блока для обновления в методе BAdam по слоям.", + }, + "zh": { + "label": "Block切换策略", + "info": "如果是layer类型的训练模式,如何切换block。", + }, + }, + "badam_update_ratio": { + "en": { + "label": "Update ratio", + "info": "The ratio of the update for ratio-wise BAdam.", + }, + "ru": { + "label": "Коэффициент обновления", + "info": "Коэффициент обновления для метода BAdam, основанного на коэффициентах.", + }, + "zh": { + "label": "Block更新比例", + "info": "如果是比例类型的训练模式,block每次更新的范围比例。", + }, + }, + "badam_mask_mode": { + "en": { + "label": "Mask mode", + "info": "The mode of the mask for BAdam optimizer.", + }, + "ru": { + "label": "Режим маски", + "info": "Режим маски для оптимизатора BAdam.", + }, + "zh": { + "label": "Mask模式", + "info": "BAdam优化器内训练参数的mask关系。", + }, + }, + "badam_verbose": { + "en": { + "label": "Verbosity level", + "info": "0 for no print, 1 for print the block prefix, 2 for print trainable parameters.", + }, + "ru": { + "label": "Уровень многословности", + "info": "0 для отсутствия печати, 1 для печати префикса блока, 2 для печати обучаемых параметров.", + }, + "zh": { + "label": "输出日志级别", + "info": "0:不输出,1:输出block前缀, 1:输出可训练的参数。", + }, + }, "cmd_preview_btn": { "en": { "value": "Preview command", diff --git a/src/llmtuner/webui/runner.py b/src/llmtuner/webui/runner.py index 8054484f..52584f31 100644 --- a/src/llmtuner/webui/runner.py +++ b/src/llmtuner/webui/runner.py @@ -151,6 +151,7 @@ class Runner: fp16=(get("train.compute_type") == "fp16"), bf16=(get("train.compute_type") == "bf16"), pure_bf16=(get("train.compute_type") == "pure_bf16"), + use_badam=get("train.use_badam"), ) args["disable_tqdm"] = True @@ -198,6 +199,14 @@ class Runner: args["galore_scale"] = get("train.galore_scale") args["galore_target"] = get("train.galore_target") + if args["use_badam"]: + args["badam_mode"] = get("train.badam_mode") + args["badam_switch_block_every"] = get("train.badam_switch_block_every") + args["badam_switch_mode"] = get("train.badam_switch_mode") + args["badam_update_ratio"] = get("train.badam_update_ratio") + args["badam_mask_mode"] = get("train.badam_mask_mode") + args["badam_verbose"] = get("train.badam_verbose") + return args def _parse_eval_args(self, data: Dict["Component", Any]) -> Dict[str, Any]: From dfd153cc813fc4ed815018c26f852ef15883888c Mon Sep 17 00:00:00 2001 From: khazic Date: Sun, 28 Apr 2024 14:27:45 +0800 Subject: [PATCH 02/32] added the second sharegpt format Former-commit-id: 6d140ac98a78ecc0a713842bb917dc8eb14450cb --- data/README.md | 32 ++++++++++++++++++++++++++++---- data/README_zh.md | 26 +++++++++++++++++++++++++- 2 files changed, 53 insertions(+), 5 deletions(-) diff --git a/data/README.md b/data/README.md index 6de0430f..9158233f 100644 --- a/data/README.md +++ b/data/README.md @@ -94,20 +94,44 @@ Remember to set `"ranking": true` for the preference datasets. The dataset in sharegpt format should follow the below format: ```json +# The first sharegpt format [ { "conversations": [ { "from": "human", - "value": "user instruction" + "value": "用户指令" }, { "from": "gpt", - "value": "model response" + "value": "模型回答" } ], - "system": "system prompt (optional)", - "tools": "tool description (optional)" + "system": "系统提示词(选填)", + "tools": "工具描述(选填)" + } +] + +# The second sharegpt format + +[ + { + "type": "chatml", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Tell me something about large language models." + }, + { + "role": "assistant", + "content": "Large language models are a type of language model ..." + } + ], + "source": "unknown" } ] ``` diff --git a/data/README_zh.md b/data/README_zh.md index fb6cb1d9..9abef5b6 100644 --- a/data/README_zh.md +++ b/data/README_zh.md @@ -37,7 +37,7 @@ ---- -该项目目前支持两种格式的数据集:**alpaca** 和 **sharegpt**,其中 alpaca 格式的数据集按照以下方式组织: +该项目目前支持三种格式的数据集:**alpaca** 和 **sharegpt**,其中 alpaca 格式的数据集按照以下方式组织: ```json [ @@ -94,6 +94,7 @@ 而 sharegpt 格式的数据集按照以下方式组织: ```json +# 第一种sharegpt格式 [ { "conversations": [ @@ -110,6 +111,29 @@ "tools": "工具描述(选填)" } ] + +# 第二种sharegpt格式 + +[ + { + "type": "chatml", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Tell me something about large language models." + }, + { + "role": "assistant", + "content": "Large language models are a type of language model ..." + } + ], + "source": "unknown" + } +] ``` 对于上述格式的数据,`dataset_info.json` 中的 `columns` 应为: From 3d88589c0f37990ea371b5fb18886dcbf48c92d1 Mon Sep 17 00:00:00 2001 From: khazic Date: Sun, 28 Apr 2024 14:30:05 +0800 Subject: [PATCH 03/32] Upgrade the second sharegpt format Former-commit-id: 057f992a666b029d207a3dc7dfc353f9abcf8316 --- data/README_zh.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/data/README_zh.md b/data/README_zh.md index 9abef5b6..5a9db167 100644 --- a/data/README_zh.md +++ b/data/README_zh.md @@ -120,15 +120,15 @@ "messages": [ { "role": "system", - "content": "You are a helpful assistant." + "content": "你是一个很有用的AI助手" }, { "role": "user", - "content": "Tell me something about large language models." + "content": "告诉我一些关于大模型的一些信息" }, { "role": "assistant", - "content": "Large language models are a type of language model ..." + "content": "大模型是一种语言模型" } ], "source": "unknown" From 57fcdca336d547a53b7a81ab592575243bddf0cf Mon Sep 17 00:00:00 2001 From: Lao Date: Sun, 28 Apr 2024 23:31:37 +0800 Subject: [PATCH 04/32] Update README_zh.md Former-commit-id: bacc8588dc7b0b43c240189ecf4336bedc299357 --- data/README_zh.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/README_zh.md b/data/README_zh.md index 5a9db167..1fe98a9e 100644 --- a/data/README_zh.md +++ b/data/README_zh.md @@ -37,7 +37,7 @@ ---- -该项目目前支持三种格式的数据集:**alpaca** 和 **sharegpt**,其中 alpaca 格式的数据集按照以下方式组织: +该项目目前支持二种格式的数据集:**alpaca** 和 **sharegpt**,其中 alpaca 格式的数据集按照以下方式组织: ```json [ From ea58cf111e4bdb2773b666835a977e72de235ff1 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Thu, 2 May 2024 02:13:46 +0800 Subject: [PATCH 05/32] Update README.md Former-commit-id: 4fb43b0c9aa48242126252ad755a2a1683b38d6a --- data/README.md | 145 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 99 insertions(+), 46 deletions(-) diff --git a/data/README.md b/data/README.md index 9158233f..012de4e7 100644 --- a/data/README.md +++ b/data/README.md @@ -1,4 +1,4 @@ -If you are using a custom dataset, please provide your dataset definition in the following format in `dataset_info.json`. +If you are using a custom dataset, please add your **dataset description** to `dataset_info.json` according to the following format. We also provide several examples in the next section. ```json "dataset_name": { @@ -33,7 +33,7 @@ If you are using a custom dataset, please provide your dataset definition in the } ``` -Given above, you can use the custom dataset via specifying `--dataset dataset_name`. +After that, you can load the custom dataset by specifying `--dataset dataset_name`. ---- @@ -54,10 +54,11 @@ Currently we support dataset in **alpaca** or **sharegpt** format, the dataset i ] ``` -Regarding the above dataset, the `columns` in `dataset_info.json` should be: +Regarding the above dataset, the description in `dataset_info.json` should be: ```json "dataset_name": { + "file_name": "data.json", "columns": { "prompt": "instruction", "query": "input", @@ -70,76 +71,86 @@ Regarding the above dataset, the `columns` in `dataset_info.json` should be: The `query` column will be concatenated with the `prompt` column and used as the user prompt, then the user prompt would be `prompt\nquery`. The `response` column represents the model response. -The `system` column will be used as the system prompt. The `history` column is a list consisting string tuples representing prompt-response pairs in the history. Note that the responses in the history **will also be used for training**. +The `system` column will be used as the system prompt. The `history` column is a list consisting string tuples representing prompt-response pairs in the history. Note that the responses in the history **will also be used for training** in supervised fine-tuning. -For the pre-training datasets, only the `prompt` column will be used for training. - -For the preference datasets, the `response` column should be a string list whose length is 2, with the preferred answers appearing first, for example: +For the **pre-training datasets**, only the `prompt` column will be used for training, for example: ```json -{ - "instruction": "user instruction", - "input": "user input", - "output": [ - "chosen answer", - "rejected answer" - ] +[ + {"text": "document"}, + {"text": "document"} +] +``` + +Regarding the above dataset, the description in `dataset_info.json` should be: + +```json +"dataset_name": { + "file_name": "data.json", + "columns": { + "prompt": "text" + } } ``` -Remember to set `"ranking": true` for the preference datasets. +For the **preference datasets**, the `response` column should be a string list whose length is 2, with the preferred answers appearing first, for example: + +```json +[ + { + "instruction": "user instruction", + "input": "user input", + "output": [ + "chosen answer", + "rejected answer" + ] + } +] +``` + +Regarding the above dataset, the description in `dataset_info.json` should be: + +```json +"dataset_name": { + "file_name": "data.json", + "ranking": true, + "columns": { + "prompt": "instruction", + "query": "input", + "response": "output", + } +} +``` ---- -The dataset in sharegpt format should follow the below format: +The dataset in **sharegpt** format should follow the below format: ```json -# The first sharegpt format [ { "conversations": [ { "from": "human", - "value": "用户指令" + "value": "user instruction" }, { "from": "gpt", - "value": "模型回答" + "value": "model response" } ], - "system": "系统提示词(选填)", - "tools": "工具描述(选填)" - } -] - -# The second sharegpt format - -[ - { - "type": "chatml", - "messages": [ - { - "role": "system", - "content": "You are a helpful assistant." - }, - { - "role": "user", - "content": "Tell me something about large language models." - }, - { - "role": "assistant", - "content": "Large language models are a type of language model ..." - } - ], - "source": "unknown" + "system": "system prompt (optional)", + "tools": "tool description (optional)" } ] ``` -Regarding the above dataset, the `columns` in `dataset_info.json` should be: +Regarding the above dataset, the description in `dataset_info.json` should be: ```json "dataset_name": { + "file_name": "data.json", + "formatting": "sharegpt", "columns": { "messages": "conversations", "system": "system", @@ -156,4 +167,46 @@ Regarding the above dataset, the `columns` in `dataset_info.json` should be: where the `messages` column should be a list following the `u/a/u/a/u/a` order. -Pre-training datasets and preference datasets are incompatible with the sharegpt format yet. +We also supports the dataset in the **openai** format: + +```json +[ + { + "messages": [ + { + "role": "system", + "content": "system prompt (optional)" + }, + { + "role": "user", + "content": "user instruction" + }, + { + "role": "assistant", + "content": "model response" + } + ] + } +] +``` + +Regarding the above dataset, the description in `dataset_info.json` should be: + +```json +"dataset_name": { + "file_name": "data.json", + "formatting": "sharegpt", + "columns": { + "messages": "messages" + }, + "tags": { + "role_tag": "role", + "content_tag": "content", + "user_tag": "user", + "assistant_tag": "assistant", + "system_tag": "system" + } +} +``` + +Pre-training datasets and preference datasets are **incompatible** with the sharegpt format yet. From eb99999ca8be43bcd12eaf699f27b762952cb85f Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Thu, 2 May 2024 02:14:55 +0800 Subject: [PATCH 06/32] Update README_zh.md Former-commit-id: 1c673d89faca3160627009fcd0a4aa39138570c0 --- data/README_zh.md | 139 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 96 insertions(+), 43 deletions(-) diff --git a/data/README_zh.md b/data/README_zh.md index 1fe98a9e..6449c5d5 100644 --- a/data/README_zh.md +++ b/data/README_zh.md @@ -1,4 +1,4 @@ -如果您使用自定义数据集,请务必在 `dataset_info.json` 文件中按照以下格式提供数据集定义。 +如果您使用自定义数据集,请务必按照以下格式在 `dataset_info.json` 文件中添加**数据集描述**。我们在下面也提供了一些例子。 ```json "数据集名称": { @@ -33,11 +33,11 @@ } ``` -添加后可通过指定 `--dataset 数据集名称` 参数使用自定义数据集。 +然后,可通过使用 `--dataset 数据集名称` 参数加载自定义数据集。 ---- -该项目目前支持二种格式的数据集:**alpaca** 和 **sharegpt**,其中 alpaca 格式的数据集按照以下方式组织: +该项目目前支持两种格式的数据集:**alpaca** 和 **sharegpt**,其中 alpaca 格式的数据集按照以下方式组织: ```json [ @@ -54,10 +54,11 @@ ] ``` -对于上述格式的数据,`dataset_info.json` 中的 `columns` 应为: +对于上述格式的数据,`dataset_info.json` 中的描述应为: ```json "数据集名称": { + "file_name": "data.json", "columns": { "prompt": "instruction", "query": "input", @@ -70,31 +71,62 @@ 其中 `query` 列对应的内容会与 `prompt` 列对应的内容拼接后作为用户指令,即用户指令为 `prompt\nquery`。`response` 列对应的内容为模型回答。 -`system` 列对应的内容将被作为系统提示词。`history` 列是由多个字符串二元组构成的列表,分别代表历史消息中每轮的指令和回答。注意历史消息中的回答**也会被用于训练**。 +`system` 列对应的内容将被作为系统提示词。`history` 列是由多个字符串二元组构成的列表,分别代表历史消息中每轮的指令和回答。注意在指令监督学习时,历史消息中的回答**也会被用于训练**。 -对于预训练数据集,仅 `prompt` 列中的内容会用于模型训练。 - -对于偏好数据集,`response` 列应当是一个长度为 2 的字符串列表,排在前面的代表更优的回答,例如: +对于**预训练数据集**,仅 `prompt` 列中的内容会用于模型训练,例如: ```json -{ - "instruction": "用户指令", - "input": "用户输入", - "output": [ - "优质回答", - "劣质回答" - ] +[ + {"text": "document"}, + {"text": "document"} +] +``` + +对于上述格式的数据,`dataset_info.json` 中的描述应为: + +```json +"数据集名称": { + "file_name": "data.json", + "columns": { + "prompt": "text" + } } ``` -添加偏好数据集需要额外指定 `"ranking": true`。 +对于**偏好数据集**,`response` 列应当是一个长度为 2 的字符串列表,排在前面的代表更优的回答,例如: + +```json +[ + { + "instruction": "用户指令", + "input": "用户输入", + "output": [ + "优质回答", + "劣质回答" + ] + } +] +``` + +对于上述格式的数据,`dataset_info.json` 中的描述应为: + +```json +"数据集名称": { + "file_name": "data.json", + "ranking": true, + "columns": { + "prompt": "instruction", + "query": "input", + "response": "output", + } +} +``` ---- -而 sharegpt 格式的数据集按照以下方式组织: +而 **sharegpt** 格式的数据集按照以下方式组织: ```json -# 第一种sharegpt格式 [ { "conversations": [ @@ -111,35 +143,14 @@ "tools": "工具描述(选填)" } ] - -# 第二种sharegpt格式 - -[ - { - "type": "chatml", - "messages": [ - { - "role": "system", - "content": "你是一个很有用的AI助手" - }, - { - "role": "user", - "content": "告诉我一些关于大模型的一些信息" - }, - { - "role": "assistant", - "content": "大模型是一种语言模型" - } - ], - "source": "unknown" - } -] ``` -对于上述格式的数据,`dataset_info.json` 中的 `columns` 应为: +对于上述格式的数据,`dataset_info.json` 中的描述应为: ```json "数据集名称": { + "file_name": "data.json", + "formatting": "sharegpt", "columns": { "messages": "conversations", "system": "system", @@ -156,4 +167,46 @@ 其中 `messages` 列应当是一个列表,且符合 `用户/模型/用户/模型/用户/模型` 的顺序。 -预训练数据集和偏好数据集尚不支持 sharegpt 格式。 +我们同样支持 **openai** 格式的数据集: + +```json +[ + { + "messages": [ + { + "role": "system", + "content": "系统提示词(选填)" + }, + { + "role": "user", + "content": "用户指令" + }, + { + "role": "assistant", + "content": "模型回答" + } + ] + } +] +``` + +对于上述格式的数据,`dataset_info.json` 中的描述应为: + +```json +"数据集名称": { + "file_name": "data.json", + "formatting": "sharegpt", + "columns": { + "messages": "messages" + }, + "tags": { + "role_tag": "role", + "content_tag": "content", + "user_tag": "user", + "assistant_tag": "assistant", + "system_tag": "system" + } +} +``` + +预训练数据集和偏好数据集**尚不支持** sharegpt 格式。 From a11a04a24faeb63df01467dc4e6abe2192201b88 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Thu, 2 May 2024 02:21:27 +0800 Subject: [PATCH 07/32] Update train.py Former-commit-id: 16f0d0056967872e02969fdd842a381f9484af8a --- src/llmtuner/webui/components/train.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/llmtuner/webui/components/train.py b/src/llmtuner/webui/components/train.py index 9d93a9b6..be070869 100644 --- a/src/llmtuner/webui/components/train.py +++ b/src/llmtuner/webui/components/train.py @@ -214,25 +214,19 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]: with gr.Row(): use_badam = gr.Checkbox() badam_mode = gr.Dropdown(choices=["layer", "ratio"], value="layer") - badam_mask_mode = gr.Dropdown(choices=["adjacent", "scatter"], value="adjacent") badam_switch_mode = gr.Dropdown(choices=["ascending", "descending", "random", "fixed"], value="ascending") - badam_update_ratio = gr.Slider(value=0, minimum=0, maximum=1, step=0.01) badam_switch_block_every = gr.Slider(value=50, minimum=-1, maximum=200, step=1) + badam_update_ratio = gr.Slider(value=0, minimum=0, maximum=1, step=0.01) - badam_verbose = gr.Dropdown(choices=[0, 1, 2], value=0) - - input_elems.update({use_badam, badam_mode, badam_switch_block_every, badam_switch_mode, badam_update_ratio, - badam_mask_mode, badam_verbose}) + input_elems.update({use_badam, badam_mode, badam_switch_mode, badam_switch_block_every, badam_update_ratio}) elem_dict.update( dict( badam_tab=badam_tab, use_badam=use_badam, badam_mode=badam_mode, - badam_switch_block_every=badam_switch_block_every, badam_switch_mode=badam_switch_mode, + badam_switch_block_every=badam_switch_block_every, badam_update_ratio=badam_update_ratio, - badam_mask_mode=badam_mask_mode, - badam_verbose=badam_verbose, ) ) From dd0b85580e2682b8afadf30787a7cfc473478c10 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Thu, 2 May 2024 02:47:04 +0800 Subject: [PATCH 08/32] fix badam configs Former-commit-id: 8a4e6a4c65a9a42e6501b0d3ce81d6220c287454 --- src/llmtuner/hparams/finetuning_args.py | 15 +++-- src/llmtuner/train/utils.py | 4 +- src/llmtuner/webui/components/train.py | 8 +-- src/llmtuner/webui/locales.py | 80 ++++++++----------------- src/llmtuner/webui/runner.py | 6 +- 5 files changed, 44 insertions(+), 69 deletions(-) diff --git a/src/llmtuner/hparams/finetuning_args.py b/src/llmtuner/hparams/finetuning_args.py index f4f71bc5..03bf52af 100644 --- a/src/llmtuner/hparams/finetuning_args.py +++ b/src/llmtuner/hparams/finetuning_args.py @@ -221,16 +221,18 @@ class BAdamArgument: default=None, metadata={"help": "The starting block index for layer-wise BAdam."}, ) - badam_switch_block_every: Optional[int] = field( - default=50, - metadata={"help": "How often to switch model's block update. Set to -1 to disable the block update."}, - ) badam_switch_mode: Optional[Literal["ascending", "descending", "random", "fixed"]] = field( default="ascending", metadata={"help": "the strategy of picking block to update for layer-wise BAdam."}, ) + badam_switch_interval: Optional[int] = field( + default=50, + metadata={ + "help": "Number of steps to update the block for layer-wise BAdam. Use -1 to disable the block update." + }, + ) badam_update_ratio: float = field( - default=0.0, + default=0.05, metadata={"help": "The ratio of the update for ratio-wise BAdam."}, ) badam_mask_mode: Literal["adjacent", "scatter"] = field( @@ -308,6 +310,9 @@ class FinetuningArguments(FreezeArguments, LoraArguments, RLHFArguments, GaloreA if self.use_galore and self.finetuning_type == "lora": raise ValueError("Cannot use LoRA with GaLore together.") + if self.use_galore and self.use_badam: + raise ValueError("Cannot use GaLore with BAdam together.") + if self.loraplus_lr_ratio is not None and self.finetuning_type != "lora": raise ValueError("`loraplus_lr_ratio` is only valid for the LoRA training.") diff --git a/src/llmtuner/train/utils.py b/src/llmtuner/train/utils.py index d9fc363d..21dac461 100644 --- a/src/llmtuner/train/utils.py +++ b/src/llmtuner/train/utils.py @@ -317,14 +317,14 @@ def _create_badam_optimizer( base_optimizer=base_optimizer, named_parameters_list=list(model.named_parameters()), block_prefix_list=None, - switch_block_every=finetuning_args.badam_switch_block_every, + switch_block_every=finetuning_args.badam_switch_interval, start_block=finetuning_args.badam_start_block, switch_mode=finetuning_args.badam_switch_mode, verbose=finetuning_args.badam_verbose, ) logger.info( f"Using BAdam optimizer with layer-wise update, switch mode is {finetuning_args.badam_switch_mode}, " - f"switch block every {finetuning_args.badam_switch_block_every} steps, " + f"switch block every {finetuning_args.badam_switch_interval} steps, " f"default start block is {finetuning_args.badam_start_block}" ) diff --git a/src/llmtuner/webui/components/train.py b/src/llmtuner/webui/components/train.py index be070869..c9671289 100644 --- a/src/llmtuner/webui/components/train.py +++ b/src/llmtuner/webui/components/train.py @@ -215,17 +215,17 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]: use_badam = gr.Checkbox() badam_mode = gr.Dropdown(choices=["layer", "ratio"], value="layer") badam_switch_mode = gr.Dropdown(choices=["ascending", "descending", "random", "fixed"], value="ascending") - badam_switch_block_every = gr.Slider(value=50, minimum=-1, maximum=200, step=1) - badam_update_ratio = gr.Slider(value=0, minimum=0, maximum=1, step=0.01) + badam_switch_interval = gr.Slider(value=50, minimum=1, maximum=1024, step=1) + badam_update_ratio = gr.Slider(value=0.05, minimum=0, maximum=1, step=0.01) - input_elems.update({use_badam, badam_mode, badam_switch_mode, badam_switch_block_every, badam_update_ratio}) + input_elems.update({use_badam, badam_mode, badam_switch_mode, badam_switch_interval, badam_update_ratio}) elem_dict.update( dict( badam_tab=badam_tab, use_badam=use_badam, badam_mode=badam_mode, badam_switch_mode=badam_switch_mode, - badam_switch_block_every=badam_switch_block_every, + badam_switch_interval=badam_switch_interval, badam_update_ratio=badam_update_ratio, ) ) diff --git a/src/llmtuner/webui/locales.py b/src/llmtuner/webui/locales.py index d3dd4dc2..1c474f34 100644 --- a/src/llmtuner/webui/locales.py +++ b/src/llmtuner/webui/locales.py @@ -905,15 +905,15 @@ LOCALES = { "use_badam": { "en": { "label": "Use BAdam", - "info": "Enable the block coordinate optimization with Adam.", + "info": "Enable the BAdam optimizer.", }, "ru": { "label": "Использовать BAdam", - "info": "Включите блочную оптимизацию координат с Adam.", + "info": "Включите оптимизатор BAdam.", }, "zh": { "label": "使用 BAdam", - "info": "使用多Block协同的Adam优化器。", + "info": "使用 BAdam 优化器。", }, }, "badam_mode": { @@ -923,25 +923,11 @@ LOCALES = { }, "ru": { "label": "Режим BAdam", - "info": "Использовать оптимизатор BAdam с обработкой слоев или с обработкой коэффициентов.", + "info": "Использовать ли оптимизатор BAdam с послоевой или пропорциональной настройкой.", }, "zh": { "label": "BAdam 模式", - "info": "使用layer或者ratio比例模式。", - }, - }, - "badam_switch_block_every": { - "en": { - "label": "Switch block frequency", - "info": "How often to switch model's block update. Set to -1 to disable the block update.", - }, - "ru": { - "label": "Частота переключения", - "info": "Как часто переключать обновление блока модели. Установите -1, чтобы отключить обновление блока.", - }, - "zh": { - "label": "切换block的频率", - "info": "控制切换block切换的频率,如果是-1,则不切换。", + "info": "使用 layer-wise 或 ratio-wise BAdam 优化器。", }, }, "badam_switch_mode": { @@ -950,12 +936,26 @@ LOCALES = { "info": "The strategy of picking block to update for layer-wise BAdam.", }, "ru": { - "label": "Переключить режим", - "info": "Стратегия выбора блока для обновления в методе BAdam по слоям.", + "label": "Режим переключения", + "info": "Стратегия выбора блока для обновления для послойного BAdam.", }, "zh": { - "label": "Block切换策略", - "info": "如果是layer类型的训练模式,如何切换block。", + "label": "切换策略", + "info": "Layer-wise BAdam 优化器的块切换策略。", + }, + }, + "badam_switch_interval": { + "en": { + "label": "Switch interval", + "info": "Number of steps to update the block for layer-wise BAdam.", + }, + "ru": { + "label": "Интервал переключения", + "info": "количество шагов для обновления блока для пошагового BAdam.", + }, + "zh": { + "label": "切换频率", + "info": "Layer-wise BAdam 优化器的块切换频率。", }, }, "badam_update_ratio": { @@ -965,39 +965,11 @@ LOCALES = { }, "ru": { "label": "Коэффициент обновления", - "info": "Коэффициент обновления для метода BAdam, основанного на коэффициентах.", + "info": "Коэффициент обновления для BAdam с учётом соотношений.", }, "zh": { - "label": "Block更新比例", - "info": "如果是比例类型的训练模式,block每次更新的范围比例。", - }, - }, - "badam_mask_mode": { - "en": { - "label": "Mask mode", - "info": "The mode of the mask for BAdam optimizer.", - }, - "ru": { - "label": "Режим маски", - "info": "Режим маски для оптимизатора BAdam.", - }, - "zh": { - "label": "Mask模式", - "info": "BAdam优化器内训练参数的mask关系。", - }, - }, - "badam_verbose": { - "en": { - "label": "Verbosity level", - "info": "0 for no print, 1 for print the block prefix, 2 for print trainable parameters.", - }, - "ru": { - "label": "Уровень многословности", - "info": "0 для отсутствия печати, 1 для печати префикса блока, 2 для печати обучаемых параметров.", - }, - "zh": { - "label": "输出日志级别", - "info": "0:不输出,1:输出block前缀, 1:输出可训练的参数。", + "label": "Block 更新比例", + "info": "Ratio-wise BAdam 优化器的更新比例。", }, }, "cmd_preview_btn": { diff --git a/src/llmtuner/webui/runner.py b/src/llmtuner/webui/runner.py index 52584f31..d53a4dfe 100644 --- a/src/llmtuner/webui/runner.py +++ b/src/llmtuner/webui/runner.py @@ -147,11 +147,11 @@ class Runner: shift_attn=get("train.shift_attn"), report_to="all" if get("train.report_to") else "none", use_galore=get("train.use_galore"), + use_badam=get("train.use_badam"), output_dir=get_save_dir(get("top.model_name"), get("top.finetuning_type"), get("train.output_dir")), fp16=(get("train.compute_type") == "fp16"), bf16=(get("train.compute_type") == "bf16"), pure_bf16=(get("train.compute_type") == "pure_bf16"), - use_badam=get("train.use_badam"), ) args["disable_tqdm"] = True @@ -201,11 +201,9 @@ class Runner: if args["use_badam"]: args["badam_mode"] = get("train.badam_mode") - args["badam_switch_block_every"] = get("train.badam_switch_block_every") args["badam_switch_mode"] = get("train.badam_switch_mode") + args["badam_switch_interval"] = get("train.badam_switch_interval") args["badam_update_ratio"] = get("train.badam_update_ratio") - args["badam_mask_mode"] = get("train.badam_mask_mode") - args["badam_verbose"] = get("train.badam_verbose") return args From 2cedb59beea1ed815bfb30d2aad217116f893cdd Mon Sep 17 00:00:00 2001 From: hiyouga Date: Thu, 2 May 2024 17:16:02 +0800 Subject: [PATCH 09/32] Update prepare.sh Former-commit-id: 5928b869251a984a085289ca6861a9731dc5b910 --- examples/lora_single_gpu/prepare.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/lora_single_gpu/prepare.sh b/examples/lora_single_gpu/prepare.sh index 3652cea4..e86de636 100644 --- a/examples/lora_single_gpu/prepare.sh +++ b/examples/lora_single_gpu/prepare.sh @@ -1,4 +1,5 @@ #!/bin/bash +# use `--tokenized_path` in training script to load data CUDA_VISIBLE_DEVICES= python ../../src/train_bash.py \ --stage sft \ From ce8200ad98b1742e966f4b71c4b4bf22dd8028c6 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Fri, 3 May 2024 02:58:23 +0800 Subject: [PATCH 10/32] update webui and add CLIs Former-commit-id: 1368dda22ab875914c9dd86ee5146a4f6a4736ad --- Dockerfile | 2 +- README.md | 6 +- README_zh.md | 6 +- examples/extras/badam/sft.sh | 2 +- examples/extras/fsdp_qlora/sft.sh | 2 +- examples/extras/galore/sft.sh | 2 +- examples/extras/llama_pro/sft.sh | 2 +- examples/extras/loraplus/sft.sh | 2 +- examples/extras/mod/sft.sh | 2 +- examples/full_multi_gpu/multi_node.sh | 2 +- examples/full_multi_gpu/predict.sh | 2 +- examples/full_multi_gpu/single_node.sh | 2 +- examples/inference/api_demo.sh | 2 +- examples/inference/cli_demo.sh | 2 +- examples/inference/evaluate.sh | 2 +- examples/inference/web_demo.sh | 2 +- examples/lora_multi_gpu/ds_zero3.sh | 3 +- examples/lora_multi_gpu/multi_node.sh | 2 +- examples/lora_multi_gpu/single_node.sh | 2 +- examples/lora_single_gpu/dpo.sh | 2 +- examples/lora_single_gpu/orpo.sh | 2 +- examples/lora_single_gpu/ppo.sh | 2 +- examples/lora_single_gpu/predict.sh | 2 +- examples/lora_single_gpu/prepare.sh | 2 +- examples/lora_single_gpu/pretrain.sh | 2 +- examples/lora_single_gpu/reward.sh | 2 +- examples/lora_single_gpu/sft.sh | 2 +- examples/lora_single_gpu/sft_mllm.sh | 2 +- examples/merge_lora/merge.sh | 2 +- examples/merge_lora/quantize.sh | 2 +- examples/qlora_single_gpu/aqlm.sh | 2 +- examples/qlora_single_gpu/awq.sh | 2 +- examples/qlora_single_gpu/bitsandbytes.sh | 2 +- examples/qlora_single_gpu/gptq.sh | 2 +- requirements.txt | 1 + setup.py | 1 + src/api_demo.py | 16 --- src/cli_demo.py | 49 -------- src/evaluate.py | 9 -- src/export_model.py | 9 -- src/llmtuner/__init__.py | 10 +- src/llmtuner/api/__init__.py | 4 - src/llmtuner/api/app.py | 3 +- src/llmtuner/chat/chat_model.py | 43 +++++++ src/llmtuner/cli.py | 39 ++++++ src/llmtuner/eval/__init__.py | 4 - src/llmtuner/eval/evaluator.py | 2 +- src/llmtuner/extras/callbacks.py | 145 ++++++++++------------ src/llmtuner/extras/constants.py | 8 +- src/llmtuner/extras/logging.py | 34 +++-- src/llmtuner/extras/ploting.py | 25 +++- src/llmtuner/hparams/parser.py | 4 +- src/llmtuner/train/__init__.py | 4 - src/llmtuner/train/tuner.py | 8 +- src/llmtuner/webui/__init__.py | 4 - src/llmtuner/webui/common.py | 11 +- src/llmtuner/webui/components/export.py | 2 +- src/llmtuner/webui/components/train.py | 6 +- src/llmtuner/webui/engine.py | 4 +- src/llmtuner/webui/interface.py | 6 +- src/llmtuner/webui/runner.py | 77 +++++------- src/llmtuner/webui/utils.py | 108 ++++++++-------- src/{train_bash.py => train.py} | 4 +- src/train_web.py | 9 -- src/web_demo.py | 9 -- 65 files changed, 363 insertions(+), 372 deletions(-) delete mode 100644 src/api_demo.py delete mode 100644 src/cli_demo.py delete mode 100644 src/evaluate.py delete mode 100644 src/export_model.py create mode 100644 src/llmtuner/cli.py rename src/{train_bash.py => train.py} (67%) delete mode 100644 src/train_web.py delete mode 100644 src/web_demo.py diff --git a/Dockerfile b/Dockerfile index c3d231b5..4b8bb084 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,4 +11,4 @@ RUN pip install -e .[deepspeed,metrics,bitsandbytes,qwen] VOLUME [ "/root/.cache/huggingface/", "/app/data", "/app/output" ] EXPOSE 7860 -CMD [ "python", "src/train_web.py" ] +CMD [ "llamafactory-cli webui" ] diff --git a/README.md b/README.md index 04e5aa5b..8caac93f 100644 --- a/README.md +++ b/README.md @@ -346,7 +346,7 @@ To enable FlashAttention-2 on the Windows platform, you need to install the prec ```bash export CUDA_VISIBLE_DEVICES=0 # `set CUDA_VISIBLE_DEVICES=0` for Windows export GRADIO_SERVER_PORT=7860 # `set GRADIO_SERVER_PORT=7860` for Windows -python src/train_web.py # or python -m llmtuner.webui.interface +llamafactory-cli webui ```
For Alibaba Cloud users @@ -392,12 +392,12 @@ docker compose -f ./docker-compose.yml up -d See [examples/README.md](examples/README.md) for usage. -Use `python src/train_bash.py -h` to display arguments description. +Use `llamafactory-cli train -h` to display arguments description. ### Deploy with OpenAI-style API and vLLM ```bash -CUDA_VISIBLE_DEVICES=0,1 API_PORT=8000 python src/api_demo.py \ +CUDA_VISIBLE_DEVICES=0,1 API_PORT=8000 llamafactory-cli api \ --model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct \ --template llama3 \ --infer_backend vllm \ diff --git a/README_zh.md b/README_zh.md index 2240c688..27522232 100644 --- a/README_zh.md +++ b/README_zh.md @@ -346,7 +346,7 @@ pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/downl ```bash export CUDA_VISIBLE_DEVICES=0 # Windows 使用 `set CUDA_VISIBLE_DEVICES=0` export GRADIO_SERVER_PORT=7860 # Windows 使用 `set GRADIO_SERVER_PORT=7860` -python src/train_web.py # 或 python -m llmtuner.webui.interface +llamafactory-cli webui ```
阿里云用户指南 @@ -392,12 +392,12 @@ docker compose -f ./docker-compose.yml up -d 使用方法请参考 [examples/README_zh.md](examples/README_zh.md)。 -您可以执行 `python src/train_bash.py -h` 来查看参数文档。 +您可以执行 `llamafactory-cli train -h` 来查看参数文档。 ### 利用 vLLM 部署 OpenAI API ```bash -CUDA_VISIBLE_DEVICES=0,1 API_PORT=8000 python src/api_demo.py \ +CUDA_VISIBLE_DEVICES=0,1 API_PORT=8000 llamafactory-cli api \ --model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct \ --template llama3 \ --infer_backend vllm \ diff --git a/examples/extras/badam/sft.sh b/examples/extras/badam/sft.sh index c2319caa..61167dad 100644 --- a/examples/extras/badam/sft.sh +++ b/examples/extras/badam/sft.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage sft \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/extras/fsdp_qlora/sft.sh b/examples/extras/fsdp_qlora/sft.sh index e8b9ece7..9eb70a53 100644 --- a/examples/extras/fsdp_qlora/sft.sh +++ b/examples/extras/fsdp_qlora/sft.sh @@ -7,7 +7,7 @@ pip install "bitsandbytes>=0.43.0" CUDA_VISIBLE_DEVICES=0,1 accelerate launch \ --config_file ../../accelerate/fsdp_config.yaml \ - ../../../src/train_bash.py \ + ../../../src/train.py \ --stage sft \ --do_train \ --model_name_or_path meta-llama/Llama-2-70b-hf \ diff --git a/examples/extras/galore/sft.sh b/examples/extras/galore/sft.sh index da1779ed..283673e7 100644 --- a/examples/extras/galore/sft.sh +++ b/examples/extras/galore/sft.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage sft \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/extras/llama_pro/sft.sh b/examples/extras/llama_pro/sft.sh index 573078ff..3e26e0a6 100644 --- a/examples/extras/llama_pro/sft.sh +++ b/examples/extras/llama_pro/sft.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage sft \ --do_train \ --model_name_or_path ../../../models/llama2-7b-pro \ diff --git a/examples/extras/loraplus/sft.sh b/examples/extras/loraplus/sft.sh index cb334e7d..8d152d9e 100644 --- a/examples/extras/loraplus/sft.sh +++ b/examples/extras/loraplus/sft.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage sft \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/extras/mod/sft.sh b/examples/extras/mod/sft.sh index 2c8f04a3..5219751f 100644 --- a/examples/extras/mod/sft.sh +++ b/examples/extras/mod/sft.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage sft \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/full_multi_gpu/multi_node.sh b/examples/full_multi_gpu/multi_node.sh index d1382bc2..a1ffc0ee 100644 --- a/examples/full_multi_gpu/multi_node.sh +++ b/examples/full_multi_gpu/multi_node.sh @@ -6,7 +6,7 @@ python -m torch.distributed.run \ --node_rank $RANK \ --master_addr $MASTER_ADDR \ --master_port $MASTER_PORT \ - ../../src/train_bash.py \ + ../../src/train.py \ --deepspeed ../deepspeed/ds_z3_config.json \ --stage sft \ --do_train \ diff --git a/examples/full_multi_gpu/predict.sh b/examples/full_multi_gpu/predict.sh index 801df85a..7c2e458f 100644 --- a/examples/full_multi_gpu/predict.sh +++ b/examples/full_multi_gpu/predict.sh @@ -2,7 +2,7 @@ CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch \ --config_file ../accelerate/single_config.yaml \ - ../../src/train_bash.py \ + ../../src/train.py \ --stage sft \ --do_predict \ --model_name_or_path ../../saves/LLaMA2-7B/full/sft \ diff --git a/examples/full_multi_gpu/single_node.sh b/examples/full_multi_gpu/single_node.sh index ea4acf90..73c7662d 100644 --- a/examples/full_multi_gpu/single_node.sh +++ b/examples/full_multi_gpu/single_node.sh @@ -1,6 +1,6 @@ #!/bin/bash -deepspeed --num_gpus 4 ../../src/train_bash.py \ +deepspeed --num_gpus 4 ../../src/train.py \ --deepspeed ../deepspeed/ds_z3_config.json \ --stage sft \ --do_train \ diff --git a/examples/inference/api_demo.sh b/examples/inference/api_demo.sh index aee86595..6f0f1b2e 100644 --- a/examples/inference/api_demo.sh +++ b/examples/inference/api_demo.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 API_PORT=8000 python ../../src/api_demo.py \ +CUDA_VISIBLE_DEVICES=0 API_PORT=8000 llamafactory-cli api \ --model_name_or_path meta-llama/Llama-2-7b-hf \ --adapter_name_or_path ../../saves/LLaMA2-7B/lora/sft \ --template default \ diff --git a/examples/inference/cli_demo.sh b/examples/inference/cli_demo.sh index 3e4a1e4e..bc762411 100644 --- a/examples/inference/cli_demo.sh +++ b/examples/inference/cli_demo.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/cli_demo.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli chat \ --model_name_or_path meta-llama/Llama-2-7b-hf \ --adapter_name_or_path ../../saves/LLaMA2-7B/lora/sft \ --template default \ diff --git a/examples/inference/evaluate.sh b/examples/inference/evaluate.sh index 1fc6ccf8..5030329d 100644 --- a/examples/inference/evaluate.sh +++ b/examples/inference/evaluate.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/evaluate.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli eval \ --model_name_or_path meta-llama/Llama-2-7b-hf \ --adapter_name_or_path ../../saves/LLaMA2-7B/lora/sft \ --template fewshot \ diff --git a/examples/inference/web_demo.sh b/examples/inference/web_demo.sh index 8d6ed09d..a58cd2a0 100644 --- a/examples/inference/web_demo.sh +++ b/examples/inference/web_demo.sh @@ -1,7 +1,7 @@ #!/bin/bash # add `--visual_inputs True` to load MLLM -CUDA_VISIBLE_DEVICES=0 python ../../src/web_demo.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli webchat \ --model_name_or_path meta-llama/Llama-2-7b-hf \ --adapter_name_or_path ../../saves/LLaMA2-7B/lora/sft \ --template default \ diff --git a/examples/lora_multi_gpu/ds_zero3.sh b/examples/lora_multi_gpu/ds_zero3.sh index f429d15b..bc74a6de 100644 --- a/examples/lora_multi_gpu/ds_zero3.sh +++ b/examples/lora_multi_gpu/ds_zero3.sh @@ -1,6 +1,7 @@ #!/bin/bash +# ZeRO-3 enables weight sharding on multiple GPUs -deepspeed --num_gpus 4 ../../src/train_bash.py \ +deepspeed --num_gpus 4 ../../src/train.py \ --deepspeed ../deepspeed/ds_z3_config.json \ --stage sft \ --do_train \ diff --git a/examples/lora_multi_gpu/multi_node.sh b/examples/lora_multi_gpu/multi_node.sh index 85a3e026..a58cac20 100644 --- a/examples/lora_multi_gpu/multi_node.sh +++ b/examples/lora_multi_gpu/multi_node.sh @@ -3,7 +3,7 @@ CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch \ --config_file ../accelerate/master_config.yaml \ - ../../src/train_bash.py \ + ../../src/train.py \ --stage sft \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/lora_multi_gpu/single_node.sh b/examples/lora_multi_gpu/single_node.sh index 04529cf0..c0719c04 100644 --- a/examples/lora_multi_gpu/single_node.sh +++ b/examples/lora_multi_gpu/single_node.sh @@ -2,7 +2,7 @@ CUDA_VISIBLE_DEVICES=0,1,2,3 accelerate launch \ --config_file ../accelerate/single_config.yaml \ - ../../src/train_bash.py \ + ../../src/train.py \ --stage sft \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/lora_single_gpu/dpo.sh b/examples/lora_single_gpu/dpo.sh index 56a2dfc3..2cb6cb01 100644 --- a/examples/lora_single_gpu/dpo.sh +++ b/examples/lora_single_gpu/dpo.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage dpo \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/lora_single_gpu/orpo.sh b/examples/lora_single_gpu/orpo.sh index 407907b1..335707bf 100644 --- a/examples/lora_single_gpu/orpo.sh +++ b/examples/lora_single_gpu/orpo.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage orpo \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/lora_single_gpu/ppo.sh b/examples/lora_single_gpu/ppo.sh index 6a5b770e..9eccb05e 100644 --- a/examples/lora_single_gpu/ppo.sh +++ b/examples/lora_single_gpu/ppo.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage ppo \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/lora_single_gpu/predict.sh b/examples/lora_single_gpu/predict.sh index eb9a18c0..250efed1 100644 --- a/examples/lora_single_gpu/predict.sh +++ b/examples/lora_single_gpu/predict.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage sft \ --do_predict \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/lora_single_gpu/prepare.sh b/examples/lora_single_gpu/prepare.sh index e86de636..277f9b7a 100644 --- a/examples/lora_single_gpu/prepare.sh +++ b/examples/lora_single_gpu/prepare.sh @@ -1,7 +1,7 @@ #!/bin/bash # use `--tokenized_path` in training script to load data -CUDA_VISIBLE_DEVICES= python ../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES= llamafactory-cli train \ --stage sft \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/lora_single_gpu/pretrain.sh b/examples/lora_single_gpu/pretrain.sh index 59bdfe62..0782f00c 100644 --- a/examples/lora_single_gpu/pretrain.sh +++ b/examples/lora_single_gpu/pretrain.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage pt \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/lora_single_gpu/reward.sh b/examples/lora_single_gpu/reward.sh index 1212d082..678809fd 100644 --- a/examples/lora_single_gpu/reward.sh +++ b/examples/lora_single_gpu/reward.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage rm \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/lora_single_gpu/sft.sh b/examples/lora_single_gpu/sft.sh index 3bfbc9b8..2047e21f 100644 --- a/examples/lora_single_gpu/sft.sh +++ b/examples/lora_single_gpu/sft.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage sft \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/lora_single_gpu/sft_mllm.sh b/examples/lora_single_gpu/sft_mllm.sh index 7e900918..53e37262 100644 --- a/examples/lora_single_gpu/sft_mllm.sh +++ b/examples/lora_single_gpu/sft_mllm.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage sft \ --do_train \ --model_name_or_path llava-hf/llava-1.5-7b-hf \ diff --git a/examples/merge_lora/merge.sh b/examples/merge_lora/merge.sh index c50bd6ad..186e64a4 100644 --- a/examples/merge_lora/merge.sh +++ b/examples/merge_lora/merge.sh @@ -1,7 +1,7 @@ #!/bin/bash # DO NOT use quantized model or quantization_bit when merging lora weights -CUDA_VISIBLE_DEVICES=0 python ../../src/export_model.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli export \ --model_name_or_path meta-llama/Llama-2-7b-hf \ --adapter_name_or_path ../../saves/LLaMA2-7B/lora/sft \ --template default \ diff --git a/examples/merge_lora/quantize.sh b/examples/merge_lora/quantize.sh index aeedbe66..4a104645 100644 --- a/examples/merge_lora/quantize.sh +++ b/examples/merge_lora/quantize.sh @@ -1,7 +1,7 @@ #!/bin/bash # NEED TO run `merge.sh` before using this script -CUDA_VISIBLE_DEVICES=0 python ../../src/export_model.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli export \ --model_name_or_path ../../models/llama2-7b-sft \ --template default \ --export_dir ../../models/llama2-7b-sft-int4 \ diff --git a/examples/qlora_single_gpu/aqlm.sh b/examples/qlora_single_gpu/aqlm.sh index 68eb4482..1e0a71ca 100644 --- a/examples/qlora_single_gpu/aqlm.sh +++ b/examples/qlora_single_gpu/aqlm.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage sft \ --do_train \ --model_name_or_path BlackSamorez/Llama-2-7b-AQLM-2Bit-1x16-hf \ diff --git a/examples/qlora_single_gpu/awq.sh b/examples/qlora_single_gpu/awq.sh index b0f1f46b..c13c8134 100644 --- a/examples/qlora_single_gpu/awq.sh +++ b/examples/qlora_single_gpu/awq.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage sft \ --do_train \ --model_name_or_path TheBloke/Llama-2-7B-AWQ \ diff --git a/examples/qlora_single_gpu/bitsandbytes.sh b/examples/qlora_single_gpu/bitsandbytes.sh index 84bbb426..27f48d41 100644 --- a/examples/qlora_single_gpu/bitsandbytes.sh +++ b/examples/qlora_single_gpu/bitsandbytes.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage sft \ --do_train \ --model_name_or_path meta-llama/Llama-2-7b-hf \ diff --git a/examples/qlora_single_gpu/gptq.sh b/examples/qlora_single_gpu/gptq.sh index a971b09f..5b1b80e1 100644 --- a/examples/qlora_single_gpu/gptq.sh +++ b/examples/qlora_single_gpu/gptq.sh @@ -1,6 +1,6 @@ #!/bin/bash -CUDA_VISIBLE_DEVICES=0 python ../../src/train_bash.py \ +CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --stage sft \ --do_train \ --model_name_or_path TheBloke/Llama-2-7B-GPTQ \ diff --git a/requirements.txt b/requirements.txt index ecba3ce1..f4818ed2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,3 +16,4 @@ sse-starlette matplotlib fire packaging +pyyaml diff --git a/setup.py b/setup.py index 6a03138d..f7589eb8 100644 --- a/setup.py +++ b/setup.py @@ -52,6 +52,7 @@ def main(): python_requires=">=3.8.0", install_requires=get_requires(), extras_require=extra_require, + entry_points={"console_scripts": ["llamafactory-cli = llmtuner.cli:main"]}, classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", diff --git a/src/api_demo.py b/src/api_demo.py deleted file mode 100644 index a7140675..00000000 --- a/src/api_demo.py +++ /dev/null @@ -1,16 +0,0 @@ -import os - -import uvicorn - -from llmtuner import ChatModel, create_app - - -def main(): - chat_model = ChatModel() - app = create_app(chat_model) - print("Visit http://localhost:{}/docs for API document.".format(os.environ.get("API_PORT", 8000))) - uvicorn.run(app, host="0.0.0.0", port=int(os.environ.get("API_PORT", 8000)), workers=1) - - -if __name__ == "__main__": - main() diff --git a/src/cli_demo.py b/src/cli_demo.py deleted file mode 100644 index ba828f51..00000000 --- a/src/cli_demo.py +++ /dev/null @@ -1,49 +0,0 @@ -from llmtuner import ChatModel -from llmtuner.extras.misc import torch_gc - - -try: - import platform - - if platform.system() != "Windows": - import readline # noqa: F401 -except ImportError: - print("Install `readline` for a better experience.") - - -def main(): - chat_model = ChatModel() - messages = [] - print("Welcome to the CLI application, use `clear` to remove the history, use `exit` to exit the application.") - - while True: - try: - query = input("\nUser: ") - except UnicodeDecodeError: - print("Detected decoding error at the inputs, please set the terminal encoding to utf-8.") - continue - except Exception: - raise - - if query.strip() == "exit": - break - - if query.strip() == "clear": - messages = [] - torch_gc() - print("History has been removed.") - continue - - messages.append({"role": "user", "content": query}) - print("Assistant: ", end="", flush=True) - - response = "" - for new_text in chat_model.stream_chat(messages): - print(new_text, end="", flush=True) - response += new_text - print() - messages.append({"role": "assistant", "content": response}) - - -if __name__ == "__main__": - main() diff --git a/src/evaluate.py b/src/evaluate.py deleted file mode 100644 index 705a6e42..00000000 --- a/src/evaluate.py +++ /dev/null @@ -1,9 +0,0 @@ -from llmtuner import Evaluator - - -def main(): - Evaluator().eval() - - -if __name__ == "__main__": - main() diff --git a/src/export_model.py b/src/export_model.py deleted file mode 100644 index 4baeb2c3..00000000 --- a/src/export_model.py +++ /dev/null @@ -1,9 +0,0 @@ -from llmtuner import export_model - - -def main(): - export_model() - - -if __name__ == "__main__": - main() diff --git a/src/llmtuner/__init__.py b/src/llmtuner/__init__.py index b3a980a5..a3a97450 100644 --- a/src/llmtuner/__init__.py +++ b/src/llmtuner/__init__.py @@ -1,11 +1,3 @@ # Level: api, webui > chat, eval, train > data, model > extras, hparams -from .api import create_app -from .chat import ChatModel -from .eval import Evaluator -from .train import export_model, run_exp -from .webui import create_ui, create_web_demo - - -__version__ = "0.7.0" -__all__ = ["create_app", "ChatModel", "Evaluator", "export_model", "run_exp", "create_ui", "create_web_demo"] +__version__ = "0.7.1.dev0" diff --git a/src/llmtuner/api/__init__.py b/src/llmtuner/api/__init__.py index d7059fbd..e69de29b 100644 --- a/src/llmtuner/api/__init__.py +++ b/src/llmtuner/api/__init__.py @@ -1,4 +0,0 @@ -from .app import create_app - - -__all__ = ["create_app"] diff --git a/src/llmtuner/api/app.py b/src/llmtuner/api/app.py index 3f06fef1..36918d1b 100644 --- a/src/llmtuner/api/app.py +++ b/src/llmtuner/api/app.py @@ -224,7 +224,8 @@ def create_app(chat_model: "ChatModel") -> "FastAPI": return app -if __name__ == "__main__": +def run_api(): chat_model = ChatModel() app = create_app(chat_model) + print("Visit http://localhost:{}/docs for API document.".format(os.environ.get("API_PORT", 8000))) uvicorn.run(app, host="0.0.0.0", port=int(os.environ.get("API_PORT", 8000)), workers=1) diff --git a/src/llmtuner/chat/chat_model.py b/src/llmtuner/chat/chat_model.py index ba58dd2e..97ae87d7 100644 --- a/src/llmtuner/chat/chat_model.py +++ b/src/llmtuner/chat/chat_model.py @@ -2,6 +2,7 @@ import asyncio from threading import Thread from typing import TYPE_CHECKING, Any, AsyncGenerator, Dict, Generator, List, Optional, Sequence +from ..extras.misc import torch_gc from ..hparams import get_infer_args from .hf_engine import HuggingfaceEngine from .vllm_engine import VllmEngine @@ -95,3 +96,45 @@ class ChatModel: **input_kwargs, ) -> List[float]: return await self.engine.get_scores(batch_input, **input_kwargs) + + +def run_chat(): + try: + import platform + + if platform.system() != "Windows": + import readline # noqa: F401 + except ImportError: + print("Install `readline` for a better experience.") + + chat_model = ChatModel() + messages = [] + print("Welcome to the CLI application, use `clear` to remove the history, use `exit` to exit the application.") + + while True: + try: + query = input("\nUser: ") + except UnicodeDecodeError: + print("Detected decoding error at the inputs, please set the terminal encoding to utf-8.") + continue + except Exception: + raise + + if query.strip() == "exit": + break + + if query.strip() == "clear": + messages = [] + torch_gc() + print("History has been removed.") + continue + + messages.append({"role": "user", "content": query}) + print("Assistant: ", end="", flush=True) + + response = "" + for new_text in chat_model.stream_chat(messages): + print(new_text, end="", flush=True) + response += new_text + print() + messages.append({"role": "assistant", "content": response}) diff --git a/src/llmtuner/cli.py b/src/llmtuner/cli.py new file mode 100644 index 00000000..1b5bd658 --- /dev/null +++ b/src/llmtuner/cli.py @@ -0,0 +1,39 @@ +import sys +from enum import Enum, unique + +from .api.app import run_api +from .chat.chat_model import run_chat +from .eval.evaluator import run_eval +from .train.tuner import export_model, run_exp +from .webui.interface import run_web_demo, run_web_ui + + +@unique +class Command(str, Enum): + API = "api" + CHAT = "chat" + EVAL = "eval" + EXPORT = "export" + TRAIN = "train" + WEBDEMO = "webchat" + WEBUI = "webui" + + +def main(): + command = sys.argv.pop(1) + if command == Command.API: + run_api() + elif command == Command.CHAT: + run_chat() + elif command == Command.EVAL: + run_eval() + elif command == Command.EXPORT: + export_model() + elif command == Command.TRAIN: + run_exp() + elif command == Command.WEBDEMO: + run_web_demo() + elif command == Command.WEBUI: + run_web_ui() + else: + raise NotImplementedError("Unknown command: {}".format(command)) diff --git a/src/llmtuner/eval/__init__.py b/src/llmtuner/eval/__init__.py index 95ce0377..e69de29b 100644 --- a/src/llmtuner/eval/__init__.py +++ b/src/llmtuner/eval/__init__.py @@ -1,4 +0,0 @@ -from .evaluator import Evaluator - - -__all__ = ["Evaluator"] diff --git a/src/llmtuner/eval/evaluator.py b/src/llmtuner/eval/evaluator.py index 7446c6f5..4ea134c6 100644 --- a/src/llmtuner/eval/evaluator.py +++ b/src/llmtuner/eval/evaluator.py @@ -118,6 +118,6 @@ class Evaluator: f.write(score_info) -if __name__ == "__main__": +def run_eval(): evaluator = Evaluator() evaluator.eval() diff --git a/src/llmtuner/extras/callbacks.py b/src/llmtuner/extras/callbacks.py index 6e347c3c..fbe6f373 100644 --- a/src/llmtuner/extras/callbacks.py +++ b/src/llmtuner/extras/callbacks.py @@ -1,14 +1,18 @@ import json +import logging import os +import signal import time +from concurrent.futures import ThreadPoolExecutor from datetime import timedelta -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Dict +import transformers from transformers import TrainerCallback from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, has_length -from .constants import LOG_FILE_NAME -from .logging import get_logger +from .constants import TRAINER_LOG +from .logging import LoggerHandler, get_logger from .misc import fix_valuehead_checkpoint @@ -33,20 +37,32 @@ class FixValueHeadModelCallback(TrainerCallback): class LogCallback(TrainerCallback): - def __init__(self, runner=None): - self.runner = runner - self.in_training = False + def __init__(self, output_dir: str) -> None: + self.aborted = False + self.do_train = False + self.webui_mode = bool(int(os.environ.get("LLAMABOARD_ENABLED", "0"))) + if self.webui_mode: + signal.signal(signal.SIGABRT, self._set_abort) + self.logger_handler = LoggerHandler(output_dir) + logging.root.addHandler(self.logger_handler) + transformers.logging.add_handler(self.logger_handler) + + def _set_abort(self, signum, frame) -> None: + self.aborted = True + + def _reset(self, max_steps: int = 0) -> None: self.start_time = time.time() self.cur_steps = 0 - self.max_steps = 0 + self.max_steps = max_steps self.elapsed_time = "" self.remaining_time = "" - def timing(self): + def _timing(self, cur_steps: int) -> None: cur_time = time.time() elapsed_time = cur_time - self.start_time - avg_time_per_step = elapsed_time / self.cur_steps if self.cur_steps != 0 else 0 - remaining_time = (self.max_steps - self.cur_steps) * avg_time_per_step + avg_time_per_step = elapsed_time / cur_steps if cur_steps != 0 else 0 + remaining_time = (self.max_steps - cur_steps) * avg_time_per_step + self.cur_steps = cur_steps self.elapsed_time = str(timedelta(seconds=int(elapsed_time))) self.remaining_time = str(timedelta(seconds=int(remaining_time))) @@ -54,36 +70,27 @@ class LogCallback(TrainerCallback): r""" Event called at the beginning of training. """ - if state.is_local_process_zero: - self.in_training = True - self.start_time = time.time() - self.max_steps = state.max_steps + if args.should_log: + self.do_train = True + self._reset(max_steps=state.max_steps) - if args.save_on_each_node: - if not state.is_local_process_zero: - return - else: - if not state.is_world_process_zero: - return + if args.should_save: + os.makedirs(args.output_dir, exist_ok=True) + self.thread_pool = ThreadPoolExecutor(max_workers=1) - if os.path.exists(os.path.join(args.output_dir, LOG_FILE_NAME)) and args.overwrite_output_dir: - logger.warning("Previous log file in this folder will be deleted.") - os.remove(os.path.join(args.output_dir, LOG_FILE_NAME)) - - def on_train_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): - r""" - Event called at the end of training. - """ - if state.is_local_process_zero: - self.in_training = False - self.cur_steps = 0 - self.max_steps = 0 + if ( + args.should_save + and os.path.exists(os.path.join(args.output_dir, TRAINER_LOG)) + and args.overwrite_output_dir + ): + logger.warning("Previous trainer log in this folder will be deleted.") + os.remove(os.path.join(args.output_dir, TRAINER_LOG)) def on_substep_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): r""" Event called at the end of an substep during gradient accumulation. """ - if state.is_local_process_zero and self.runner is not None and self.runner.aborted: + if self.aborted: control.should_epoch_stop = True control.should_training_stop = True @@ -91,42 +98,41 @@ class LogCallback(TrainerCallback): r""" Event called at the end of a training step. """ - if state.is_local_process_zero: - self.cur_steps = state.global_step - self.timing() - if self.runner is not None and self.runner.aborted: - control.should_epoch_stop = True - control.should_training_stop = True + if args.should_log: + self._timing(cur_steps=state.global_step) - def on_evaluate(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): + if self.aborted: + control.should_epoch_stop = True + control.should_training_stop = True + + def on_train_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): r""" - Event called after an evaluation phase. + Event called at the end of training. """ - if state.is_local_process_zero and not self.in_training: - self.cur_steps = 0 - self.max_steps = 0 + self.thread_pool.shutdown(wait=True) + self.thread_pool = None - def on_predict( - self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", *other, **kwargs + def on_prediction_step( + self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs ): r""" - Event called after a successful prediction. + Event called after a prediction step. """ - if state.is_local_process_zero and not self.in_training: - self.cur_steps = 0 - self.max_steps = 0 + eval_dataloader = kwargs.pop("eval_dataloader", None) + if args.should_log and has_length(eval_dataloader) and not self.do_train: + if self.max_steps == 0: + self.max_steps = len(eval_dataloader) + + self._timing(cur_steps=self.cur_steps + 1) + + def _write_log(self, output_dir: str, logs: Dict[str, Any]): + with open(os.path.join(output_dir, TRAINER_LOG), "a", encoding="utf-8") as f: + f.write(json.dumps(logs) + "\n") def on_log(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs) -> None: r""" - Event called after logging the last logs. + Event called after logging the last logs, `args.should_log` has been applied. """ - if args.save_on_each_node: - if not state.is_local_process_zero: - return - else: - if not state.is_world_process_zero: - return - logs = dict( current_steps=self.cur_steps, total_steps=self.max_steps, @@ -141,26 +147,13 @@ class LogCallback(TrainerCallback): elapsed_time=self.elapsed_time, remaining_time=self.remaining_time, ) - if self.runner is not None: + logs = {k: v for k, v in logs.items() if v is not None} + if self.webui_mode and "loss" in logs and "learning_rate" in logs and "epoch" in logs: logger.info( "{{'loss': {:.4f}, 'learning_rate': {:2.4e}, 'epoch': {:.2f}}}".format( - logs["loss"] or 0, logs["learning_rate"] or 0, logs["epoch"] or 0 + logs["loss"], logs["learning_rate"], logs["epoch"] ) ) - os.makedirs(args.output_dir, exist_ok=True) - with open(os.path.join(args.output_dir, "trainer_log.jsonl"), "a", encoding="utf-8") as f: - f.write(json.dumps(logs) + "\n") - - def on_prediction_step( - self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs - ): - r""" - Event called after a prediction step. - """ - eval_dataloader = kwargs.pop("eval_dataloader", None) - if state.is_local_process_zero and has_length(eval_dataloader) and not self.in_training: - if self.max_steps == 0: - self.max_steps = len(eval_dataloader) - self.cur_steps += 1 - self.timing() + if args.should_save and self.thread_pool is not None: + self.thread_pool.submit(self._write_log, args.output_dir, logs) diff --git a/src/llmtuner/extras/constants.py b/src/llmtuner/extras/constants.py index 0329b374..bf542e69 100644 --- a/src/llmtuner/extras/constants.py +++ b/src/llmtuner/extras/constants.py @@ -24,8 +24,6 @@ IGNORE_INDEX = -100 LAYERNORM_NAMES = {"norm", "ln"} -LOG_FILE_NAME = "trainer_log.jsonl" - METHODS = ["full", "freeze", "lora"] MLLM_LIST = ["LLaVA1.5"] @@ -34,10 +32,16 @@ MOD_SUPPORTED_MODELS = ["bloom", "falcon", "gemma", "llama", "mistral", "mixtral PEFT_METHODS = ["lora"] +RUNNING_LOG = "running_log.txt" + SUBJECTS = ["Average", "STEM", "Social Sciences", "Humanities", "Other"] SUPPORTED_MODELS = OrderedDict() +TRAINER_CONFIG = "trainer_config.yaml" + +TRAINER_LOG = "trainer_log.jsonl" + TRAINING_STAGES = { "Supervised Fine-Tuning": "sft", "Reward Modeling": "rm", diff --git a/src/llmtuner/extras/logging.py b/src/llmtuner/extras/logging.py index bb270776..430b8a48 100644 --- a/src/llmtuner/extras/logging.py +++ b/src/llmtuner/extras/logging.py @@ -1,5 +1,9 @@ import logging +import os import sys +from concurrent.futures import ThreadPoolExecutor + +from .constants import RUNNING_LOG class LoggerHandler(logging.Handler): @@ -7,19 +11,35 @@ class LoggerHandler(logging.Handler): Logger handler used in Web UI. """ - def __init__(self): + def __init__(self, output_dir: str) -> None: super().__init__() - self.log = "" + formatter = logging.Formatter( + fmt="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S" + ) + self.setLevel(logging.INFO) + self.setFormatter(formatter) - def reset(self): - self.log = "" + os.makedirs(output_dir, exist_ok=True) + self.running_log = os.path.join(output_dir, RUNNING_LOG) + if os.path.exists(self.running_log): + os.remove(self.running_log) - def emit(self, record): + self.thread_pool = ThreadPoolExecutor(max_workers=1) + + def _write_log(self, log_entry: str) -> None: + with open(self.running_log, "a", encoding="utf-8") as f: + f.write(log_entry + "\n\n") + + def emit(self, record) -> None: if record.name == "httpx": return + log_entry = self.format(record) - self.log += log_entry - self.log += "\n\n" + self.thread_pool.submit(self._write_log, log_entry) + + def close(self) -> None: + self.thread_pool.shutdown(wait=True) + return super().close() def get_logger(name: str) -> logging.Logger: diff --git a/src/llmtuner/extras/ploting.py b/src/llmtuner/extras/ploting.py index fd3cb8a3..e53f1f89 100644 --- a/src/llmtuner/extras/ploting.py +++ b/src/llmtuner/extras/ploting.py @@ -1,7 +1,7 @@ import json import math import os -from typing import List +from typing import Any, Dict, List from transformers.trainer import TRAINER_STATE_NAME @@ -10,6 +10,7 @@ from .packages import is_matplotlib_available if is_matplotlib_available(): + import matplotlib.figure import matplotlib.pyplot as plt @@ -21,7 +22,7 @@ def smooth(scalars: List[float]) -> List[float]: EMA implementation according to TensorBoard. """ last = scalars[0] - smoothed = list() + smoothed = [] weight = 1.8 * (1 / (1 + math.exp(-0.05 * len(scalars))) - 0.5) # a sigmoid function for next_val in scalars: smoothed_val = last * weight + (1 - weight) * next_val @@ -30,7 +31,27 @@ def smooth(scalars: List[float]) -> List[float]: return smoothed +def gen_loss_plot(trainer_log: List[Dict[str, Any]]) -> "matplotlib.figure.Figure": + plt.close("all") + plt.switch_backend("agg") + fig = plt.figure() + ax = fig.add_subplot(111) + steps, losses = [], [] + for log in trainer_log: + if log.get("loss", None): + steps.append(log["current_steps"]) + losses.append(log["loss"]) + + ax.plot(steps, losses, color="#1f77b4", alpha=0.4, label="original") + ax.plot(steps, smooth(losses), color="#1f77b4", label="smoothed") + ax.legend() + ax.set_xlabel("step") + ax.set_ylabel("loss") + return fig + + def plot_loss(save_dictionary: os.PathLike, keys: List[str] = ["loss"]) -> None: + plt.switch_backend("agg") with open(os.path.join(save_dictionary, TRAINER_STATE_NAME), "r", encoding="utf-8") as f: data = json.load(f) diff --git a/src/llmtuner/hparams/parser.py b/src/llmtuner/hparams/parser.py index 977d7cf4..7fdd3234 100644 --- a/src/llmtuner/hparams/parser.py +++ b/src/llmtuner/hparams/parser.py @@ -10,6 +10,7 @@ from transformers.trainer_utils import get_last_checkpoint from transformers.utils import is_torch_bf16_gpu_available from transformers.utils.versions import require_version +from ..extras.constants import TRAINER_CONFIG from ..extras.logging import get_logger from ..extras.misc import check_dependencies, get_current_device from .data_args import DataArguments @@ -251,7 +252,8 @@ def get_train_args(args: Optional[Dict[str, Any]] = None) -> _TRAIN_CLS: and can_resume_from_checkpoint ): last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + files = os.listdir(training_args.output_dir) + if last_checkpoint is None and len(files) > 0 and (len(files) != 1 or files[0] != TRAINER_CONFIG): raise ValueError("Output directory already exists and is not empty. Please set `overwrite_output_dir`.") if last_checkpoint is not None: diff --git a/src/llmtuner/train/__init__.py b/src/llmtuner/train/__init__.py index 6c22bc15..e69de29b 100644 --- a/src/llmtuner/train/__init__.py +++ b/src/llmtuner/train/__init__.py @@ -1,4 +0,0 @@ -from .tuner import export_model, run_exp - - -__all__ = ["export_model", "run_exp"] diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index a2eb121f..6822ffb5 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -23,9 +23,9 @@ if TYPE_CHECKING: logger = get_logger(__name__) -def run_exp(args: Optional[Dict[str, Any]] = None, callbacks: Optional[List["TrainerCallback"]] = None): +def run_exp(args: Optional[Dict[str, Any]] = None, callbacks: List["TrainerCallback"] = []): model_args, data_args, training_args, finetuning_args, generating_args = get_train_args(args) - callbacks = [LogCallback()] if callbacks is None else callbacks + callbacks.append(LogCallback(training_args.output_dir)) if finetuning_args.stage == "pt": run_pt(model_args, data_args, training_args, finetuning_args, callbacks) @@ -88,7 +88,3 @@ def export_model(args: Optional[Dict[str, Any]] = None): tokenizer.push_to_hub(model_args.export_hub_model_id, token=model_args.hf_hub_token) except Exception: logger.warning("Cannot save tokenizer, please copy the files manually.") - - -if __name__ == "__main__": - run_exp() diff --git a/src/llmtuner/webui/__init__.py b/src/llmtuner/webui/__init__.py index 3e82dd69..e69de29b 100644 --- a/src/llmtuner/webui/__init__.py +++ b/src/llmtuner/webui/__init__.py @@ -1,4 +0,0 @@ -from .interface import create_ui, create_web_demo - - -__all__ = ["create_ui", "create_web_demo"] diff --git a/src/llmtuner/webui/common.py b/src/llmtuner/webui/common.py index 9af4c439..a33e3db7 100644 --- a/src/llmtuner/webui/common.py +++ b/src/llmtuner/webui/common.py @@ -4,6 +4,7 @@ from collections import defaultdict from typing import Any, Dict, Optional from peft.utils import SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME +from yaml import safe_dump, safe_load from ..extras.constants import ( DATA_CONFIG, @@ -29,7 +30,7 @@ DEFAULT_CACHE_DIR = "cache" DEFAULT_CONFIG_DIR = "config" DEFAULT_DATA_DIR = "data" DEFAULT_SAVE_DIR = "saves" -USER_CONFIG = "user.config" +USER_CONFIG = "user_config.yaml" def get_save_dir(*args) -> os.PathLike: @@ -47,7 +48,7 @@ def get_save_path(config_path: str) -> os.PathLike: def load_config() -> Dict[str, Any]: try: with open(get_config_path(), "r", encoding="utf-8") as f: - return json.load(f) + return safe_load(f) except Exception: return {"lang": None, "last_model": None, "path_dict": {}, "cache_dir": None} @@ -60,13 +61,13 @@ def save_config(lang: str, model_name: Optional[str] = None, model_path: Optiona user_config["last_model"] = model_name user_config["path_dict"][model_name] = model_path with open(get_config_path(), "w", encoding="utf-8") as f: - json.dump(user_config, f, indent=2, ensure_ascii=False) + safe_dump(user_config, f) def load_args(config_path: str) -> Optional[Dict[str, Any]]: try: with open(get_save_path(config_path), "r", encoding="utf-8") as f: - return json.load(f) + return safe_load(f) except Exception: return None @@ -74,7 +75,7 @@ def load_args(config_path: str) -> Optional[Dict[str, Any]]: def save_args(config_path: str, config_dict: Dict[str, Any]) -> str: os.makedirs(DEFAULT_CONFIG_DIR, exist_ok=True) with open(get_save_path(config_path), "w", encoding="utf-8") as f: - json.dump(config_dict, f, indent=2, ensure_ascii=False) + safe_dump(config_dict, f) return str(get_save_path(config_path)) diff --git a/src/llmtuner/webui/components/export.py b/src/llmtuner/webui/components/export.py index 4c224736..64273882 100644 --- a/src/llmtuner/webui/components/export.py +++ b/src/llmtuner/webui/components/export.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, Dict, Generator, List from ...extras.misc import torch_gc from ...extras.packages import is_gradio_available -from ...train import export_model +from ...train.tuner import export_model from ..common import get_save_dir from ..locales import ALERTS diff --git a/src/llmtuner/webui/components/train.py b/src/llmtuner/webui/components/train.py index c9671289..c709b916 100644 --- a/src/llmtuner/webui/components/train.py +++ b/src/llmtuner/webui/components/train.py @@ -245,7 +245,7 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]: with gr.Row(): resume_btn = gr.Checkbox(visible=False, interactive=False) - process_bar = gr.Slider(visible=False, interactive=False) + progress_bar = gr.Slider(visible=False, interactive=False) with gr.Row(): output_box = gr.Markdown() @@ -263,14 +263,14 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]: output_dir=output_dir, config_path=config_path, resume_btn=resume_btn, - process_bar=process_bar, + progress_bar=progress_bar, output_box=output_box, loss_viewer=loss_viewer, ) ) input_elems.update({output_dir, config_path}) - output_elems = [output_box, process_bar, loss_viewer] + output_elems = [output_box, progress_bar, loss_viewer] cmd_preview_btn.click(engine.runner.preview_train, input_elems, output_elems, concurrency_limit=None) arg_save_btn.click(engine.runner.save_args, input_elems, output_elems, concurrency_limit=None) diff --git a/src/llmtuner/webui/engine.py b/src/llmtuner/webui/engine.py index cebac3b9..964d65a2 100644 --- a/src/llmtuner/webui/engine.py +++ b/src/llmtuner/webui/engine.py @@ -41,7 +41,7 @@ class Engine: init_dict["train.dataset"] = {"choices": list_dataset().choices} init_dict["eval.dataset"] = {"choices": list_dataset().choices} init_dict["train.output_dir"] = {"value": "train_{}".format(get_time())} - init_dict["train.config_path"] = {"value": "{}.json".format(get_time())} + init_dict["train.config_path"] = {"value": "{}.yaml".format(get_time())} init_dict["eval.output_dir"] = {"value": "eval_{}".format(get_time())} init_dict["infer.image_box"] = {"visible": False} @@ -51,7 +51,7 @@ class Engine: yield self._update_component(init_dict) - if self.runner.alive and not self.demo_mode and not self.pure_chat: + if self.runner.running and not self.demo_mode and not self.pure_chat: yield {elem: elem.__class__(value=value) for elem, value in self.runner.running_data.items()} if self.runner.do_train: yield self._update_component({"train.resume_btn": {"value": True}}) diff --git a/src/llmtuner/webui/interface.py b/src/llmtuner/webui/interface.py index abca16c5..feb2a20a 100644 --- a/src/llmtuner/webui/interface.py +++ b/src/llmtuner/webui/interface.py @@ -68,5 +68,9 @@ def create_web_demo() -> gr.Blocks: return demo -if __name__ == "__main__": +def run_web_ui(): create_ui().queue().launch(server_name="0.0.0.0", server_port=None, share=False, inbrowser=True) + + +def run_web_demo(): + create_web_demo().queue().launch(server_name="0.0.0.0", server_port=None, share=False, inbrowser=True) diff --git a/src/llmtuner/webui/runner.py b/src/llmtuner/webui/runner.py index d53a4dfe..b14271b7 100644 --- a/src/llmtuner/webui/runner.py +++ b/src/llmtuner/webui/runner.py @@ -1,22 +1,19 @@ -import logging import os -import time -from threading import Thread -from typing import TYPE_CHECKING, Any, Dict, Generator +import signal +from copy import deepcopy +from subprocess import Popen, TimeoutExpired +from typing import TYPE_CHECKING, Any, Dict, Generator, Optional -import transformers +import psutil from transformers.trainer import TRAINING_ARGS_NAME from transformers.utils import is_torch_cuda_available -from ..extras.callbacks import LogCallback from ..extras.constants import TRAINING_STAGES -from ..extras.logging import LoggerHandler from ..extras.misc import get_device_count, torch_gc from ..extras.packages import is_gradio_available -from ..train import run_exp from .common import get_module, get_save_dir, load_args, load_config, save_args from .locales import ALERTS -from .utils import gen_cmd, gen_plot, get_eval_results, update_process_bar +from .utils import gen_cmd, get_eval_results, get_trainer_info, save_cmd if is_gradio_available(): @@ -34,24 +31,18 @@ class Runner: self.manager = manager self.demo_mode = demo_mode """ Resume """ - self.thread: "Thread" = None + self.trainer: Optional["Popen"] = None self.do_train = True self.running_data: Dict["Component", Any] = None """ State """ self.aborted = False self.running = False - """ Handler """ - self.logger_handler = LoggerHandler() - self.logger_handler.setLevel(logging.INFO) - logging.root.addHandler(self.logger_handler) - transformers.logging.add_handler(self.logger_handler) - - @property - def alive(self) -> bool: - return self.thread is not None def set_abort(self) -> None: self.aborted = True + if self.trainer is not None: + for children in psutil.Process(self.trainer.pid).children(): # abort the child process + os.kill(children.pid, signal.SIGABRT) def _initialize(self, data: Dict["Component", Any], do_train: bool, from_preview: bool) -> str: get = lambda elem_id: data[self.manager.get_elem_by_id(elem_id)] @@ -85,13 +76,11 @@ class Runner: if not from_preview and not is_torch_cuda_available(): gr.Warning(ALERTS["warn_no_cuda"][lang]) - self.logger_handler.reset() - self.trainer_callback = LogCallback(self) return "" def _finalize(self, lang: str, finish_info: str) -> str: finish_info = ALERTS["info_aborted"][lang] if self.aborted else finish_info - self.thread = None + self.trainer = None self.aborted = False self.running = False self.running_data = None @@ -270,11 +259,12 @@ class Runner: gr.Warning(error) yield {output_box: error} else: - args = self._parse_train_args(data) if do_train else self._parse_eval_args(data) - run_kwargs = dict(args=args, callbacks=[self.trainer_callback]) self.do_train, self.running_data = do_train, data - self.thread = Thread(target=run_exp, kwargs=run_kwargs) - self.thread.start() + args = self._parse_train_args(data) if do_train else self._parse_eval_args(data) + env = deepcopy(os.environ) + env["CUDA_VISIBLE_DEVICES"] = os.environ.get("CUDA_VISIBLE_DEVICES", "0") + env["LLAMABOARD_ENABLED"] = "1" + self.trainer = Popen("llamafactory-cli train {}".format(save_cmd(args)), env=env, shell=True) yield from self.monitor() def preview_train(self, data): @@ -291,9 +281,6 @@ class Runner: def monitor(self): get = lambda elem_id: self.running_data[self.manager.get_elem_by_id(elem_id)] - self.aborted = False - self.running = True - lang = get("top.lang") model_name = get("top.model_name") finetuning_type = get("top.finetuning_type") @@ -301,28 +288,31 @@ class Runner: output_path = get_save_dir(model_name, finetuning_type, output_dir) output_box = self.manager.get_elem_by_id("{}.output_box".format("train" if self.do_train else "eval")) - process_bar = self.manager.get_elem_by_id("{}.process_bar".format("train" if self.do_train else "eval")) + progress_bar = self.manager.get_elem_by_id("{}.progress_bar".format("train" if self.do_train else "eval")) loss_viewer = self.manager.get_elem_by_id("train.loss_viewer") if self.do_train else None - while self.thread is not None and self.thread.is_alive(): + while self.trainer is not None: if self.aborted: yield { output_box: ALERTS["info_aborting"][lang], - process_bar: gr.Slider(visible=False), + progress_bar: gr.Slider(visible=False), } else: + running_log, running_progress, running_loss = get_trainer_info(output_path) return_dict = { - output_box: self.logger_handler.log, - process_bar: update_process_bar(self.trainer_callback), + output_box: running_log, + progress_bar: running_progress, } - if self.do_train: - plot = gen_plot(output_path) - if plot is not None: - return_dict[loss_viewer] = plot + if self.do_train and running_loss is not None: + return_dict[loss_viewer] = running_loss yield return_dict - time.sleep(2) + try: + self.trainer.wait(2) + self.trainer = None + except TimeoutExpired: + continue if self.do_train: if os.path.exists(os.path.join(output_path, TRAINING_ARGS_NAME)): @@ -337,16 +327,11 @@ class Runner: return_dict = { output_box: self._finalize(lang, finish_info), - process_bar: gr.Slider(visible=False), + progress_bar: gr.Slider(visible=False), } - if self.do_train: - plot = gen_plot(output_path) - if plot is not None: - return_dict[loss_viewer] = plot - yield return_dict - def save_args(self, data): + def save_args(self, data: dict): output_box = self.manager.get_elem_by_id("train.output_box") error = self._initialize(data, do_train=True, from_preview=True) if error: diff --git a/src/llmtuner/webui/utils.py b/src/llmtuner/webui/utils.py index 74f74e6a..2ad1e62c 100644 --- a/src/llmtuner/webui/utils.py +++ b/src/llmtuner/webui/utils.py @@ -1,10 +1,13 @@ import json import os from datetime import datetime -from typing import TYPE_CHECKING, Any, Dict, Optional +from typing import Any, Dict, List, Optional, Tuple +from yaml import safe_dump + +from ..extras.constants import RUNNING_LOG, TRAINER_CONFIG, TRAINER_LOG from ..extras.packages import is_gradio_available, is_matplotlib_available -from ..extras.ploting import smooth +from ..extras.ploting import gen_loss_plot from .locales import ALERTS @@ -12,30 +15,6 @@ if is_gradio_available(): import gradio as gr -if is_matplotlib_available(): - import matplotlib.figure - import matplotlib.pyplot as plt - - -if TYPE_CHECKING: - from ..extras.callbacks import LogCallback - - -def update_process_bar(callback: "LogCallback") -> "gr.Slider": - if not callback.max_steps: - return gr.Slider(visible=False) - - percentage = round(100 * callback.cur_steps / callback.max_steps, 0) if callback.max_steps != 0 else 100.0 - label = "Running {:d}/{:d}: {} < {}".format( - callback.cur_steps, callback.max_steps, callback.elapsed_time, callback.remaining_time - ) - return gr.Slider(label=label, value=percentage, visible=True) - - -def get_time() -> str: - return datetime.now().strftime(r"%Y-%m-%d-%H-%M-%S") - - def can_quantize(finetuning_type: str) -> "gr.Dropdown": if finetuning_type != "lora": return gr.Dropdown(value="none", interactive=False) @@ -57,14 +36,19 @@ def check_json_schema(text: str, lang: str) -> None: gr.Warning(ALERTS["err_json_schema"][lang]) +def clean_cmd(args: Dict[str, Any]) -> Dict[str, Any]: + no_skip_keys = ["packing"] + return {k: v for k, v in args.items() if (k in no_skip_keys) or (v is not None and v is not False and v != "")} + + def gen_cmd(args: Dict[str, Any]) -> str: args.pop("disable_tqdm", None) args["plot_loss"] = args.get("do_train", None) current_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "0") cmd_lines = ["CUDA_VISIBLE_DEVICES={} python src/train_bash.py ".format(current_devices)] - for k, v in args.items(): - if v is not None and v is not False and v != "": - cmd_lines.append(" --{} {} ".format(k, str(v))) + for k, v in clean_cmd(args).items(): + cmd_lines.append(" --{} {} ".format(k, str(v))) + cmd_text = "\\\n".join(cmd_lines) cmd_text = "```bash\n{}\n```".format(cmd_text) return cmd_text @@ -76,29 +60,49 @@ def get_eval_results(path: os.PathLike) -> str: return "```json\n{}\n```\n".format(result) -def gen_plot(output_path: str) -> Optional["matplotlib.figure.Figure"]: - log_file = os.path.join(output_path, "trainer_log.jsonl") - if not os.path.isfile(log_file) or not is_matplotlib_available(): - return +def get_time() -> str: + return datetime.now().strftime(r"%Y-%m-%d-%H-%M-%S") - plt.close("all") - plt.switch_backend("agg") - fig = plt.figure() - ax = fig.add_subplot(111) - steps, losses = [], [] - with open(log_file, "r", encoding="utf-8") as f: - for line in f: - log_info: Dict[str, Any] = json.loads(line) - if log_info.get("loss", None): - steps.append(log_info["current_steps"]) - losses.append(log_info["loss"]) - if len(losses) == 0: - return +def get_trainer_info(output_path: os.PathLike) -> Tuple[str, "gr.Slider", Optional["gr.Plot"]]: + running_log = "" + running_progress = gr.Slider(visible=False) + running_loss = None - ax.plot(steps, losses, color="#1f77b4", alpha=0.4, label="original") - ax.plot(steps, smooth(losses), color="#1f77b4", label="smoothed") - ax.legend() - ax.set_xlabel("step") - ax.set_ylabel("loss") - return fig + running_log_path = os.path.join(output_path, RUNNING_LOG) + if os.path.isfile(running_log_path): + with open(running_log_path, "r", encoding="utf-8") as f: + running_log = f.read() + + trainer_log_path = os.path.join(output_path, TRAINER_LOG) + if os.path.isfile(trainer_log_path): + trainer_log: List[Dict[str, Any]] = [] + with open(trainer_log_path, "r", encoding="utf-8") as f: + for line in f: + trainer_log.append(json.loads(line)) + + if len(trainer_log) != 0: + latest_log = trainer_log[-1] + percentage = latest_log["percentage"] + label = "Running {:d}/{:d}: {} < {}".format( + latest_log["current_steps"], + latest_log["total_steps"], + latest_log["elapsed_time"], + latest_log["remaining_time"], + ) + running_progress = gr.Slider(label=label, value=percentage, visible=True) + + if is_matplotlib_available(): + running_loss = gr.Plot(gen_loss_plot(trainer_log)) + + return running_log, running_progress, running_loss + + +def save_cmd(args: Dict[str, Any]) -> str: + output_dir = args["output_dir"] + os.makedirs(output_dir, exist_ok=True) + + with open(os.path.join(output_dir, TRAINER_CONFIG), "w", encoding="utf-8") as f: + safe_dump(clean_cmd(args), f) + + return os.path.join(output_dir, TRAINER_CONFIG) diff --git a/src/train_bash.py b/src/train.py similarity index 67% rename from src/train_bash.py rename to src/train.py index 9ddd0586..6a3212cb 100644 --- a/src/train_bash.py +++ b/src/train.py @@ -1,4 +1,4 @@ -from llmtuner import run_exp +from llmtuner.train.tuner import run_exp def main(): @@ -7,7 +7,7 @@ def main(): def _mp_fn(index): # For xla_spawn (TPUs) - main() + run_exp() if __name__ == "__main__": diff --git a/src/train_web.py b/src/train_web.py deleted file mode 100644 index 8327f4dd..00000000 --- a/src/train_web.py +++ /dev/null @@ -1,9 +0,0 @@ -from llmtuner import create_ui - - -def main(): - create_ui().queue().launch(server_name="0.0.0.0", server_port=None, share=False, inbrowser=True) - - -if __name__ == "__main__": - main() diff --git a/src/web_demo.py b/src/web_demo.py deleted file mode 100644 index 3b57ee73..00000000 --- a/src/web_demo.py +++ /dev/null @@ -1,9 +0,0 @@ -from llmtuner import create_web_demo - - -def main(): - create_web_demo().queue().launch(server_name="0.0.0.0", server_port=None, share=False, inbrowser=True) - - -if __name__ == "__main__": - main() From 33d440b57795e95203d58f31f95946ecbbee700e Mon Sep 17 00:00:00 2001 From: hiyouga Date: Fri, 3 May 2024 03:54:46 +0800 Subject: [PATCH 11/32] fix colab gradio Former-commit-id: 26179a29d3400d1fea155e325a79473a8bc12f04 --- src/llmtuner/webui/interface.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llmtuner/webui/interface.py b/src/llmtuner/webui/interface.py index feb2a20a..5f17d76d 100644 --- a/src/llmtuner/webui/interface.py +++ b/src/llmtuner/webui/interface.py @@ -69,8 +69,8 @@ def create_web_demo() -> gr.Blocks: def run_web_ui(): - create_ui().queue().launch(server_name="0.0.0.0", server_port=None, share=False, inbrowser=True) + create_ui().queue().launch(server_name="0.0.0.0") def run_web_demo(): - create_web_demo().queue().launch(server_name="0.0.0.0", server_port=None, share=False, inbrowser=True) + create_web_demo().queue().launch(server_name="0.0.0.0") From 57c6eabf83de7bc092ca7d7739443c2aacd1afb9 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Fri, 3 May 2024 04:24:50 +0800 Subject: [PATCH 12/32] fix gen_args Former-commit-id: c3e2f4f07b7fb3b1d7d2b44451660f082a467aed --- src/llmtuner/webui/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llmtuner/webui/utils.py b/src/llmtuner/webui/utils.py index 2ad1e62c..74683cb9 100644 --- a/src/llmtuner/webui/utils.py +++ b/src/llmtuner/webui/utils.py @@ -45,7 +45,7 @@ def gen_cmd(args: Dict[str, Any]) -> str: args.pop("disable_tqdm", None) args["plot_loss"] = args.get("do_train", None) current_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "0") - cmd_lines = ["CUDA_VISIBLE_DEVICES={} python src/train_bash.py ".format(current_devices)] + cmd_lines = ["CUDA_VISIBLE_DEVICES={} llamafactory-cli train ".format(current_devices)] for k, v in clean_cmd(args).items(): cmd_lines.append(" --{} {} ".format(k, str(v))) From 09d9fb28f91e1517a5c1c66316279b983c9f538f Mon Sep 17 00:00:00 2001 From: hiyouga Date: Fri, 3 May 2024 04:42:50 +0800 Subject: [PATCH 13/32] enable tqdm in webui Former-commit-id: 1737bff64799047a5b715fd979b4c038ae213bb3 --- src/llmtuner/webui/runner.py | 2 -- src/llmtuner/webui/utils.py | 1 - 2 files changed, 3 deletions(-) diff --git a/src/llmtuner/webui/runner.py b/src/llmtuner/webui/runner.py index b14271b7..4ea08348 100644 --- a/src/llmtuner/webui/runner.py +++ b/src/llmtuner/webui/runner.py @@ -142,7 +142,6 @@ class Runner: bf16=(get("train.compute_type") == "bf16"), pure_bf16=(get("train.compute_type") == "pure_bf16"), ) - args["disable_tqdm"] = True if args["finetuning_type"] == "freeze": args["num_layer_trainable"] = get("train.num_layer_trainable") @@ -233,7 +232,6 @@ class Runner: temperature=get("eval.temperature"), output_dir=get_save_dir(get("top.model_name"), get("top.finetuning_type"), get("eval.output_dir")), ) - args["disable_tqdm"] = True if get("eval.predict"): args["do_predict"] = True diff --git a/src/llmtuner/webui/utils.py b/src/llmtuner/webui/utils.py index 74683cb9..c8729d36 100644 --- a/src/llmtuner/webui/utils.py +++ b/src/llmtuner/webui/utils.py @@ -42,7 +42,6 @@ def clean_cmd(args: Dict[str, Any]) -> Dict[str, Any]: def gen_cmd(args: Dict[str, Any]) -> str: - args.pop("disable_tqdm", None) args["plot_loss"] = args.get("do_train", None) current_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "0") cmd_lines = ["CUDA_VISIBLE_DEVICES={} llamafactory-cli train ".format(current_devices)] From 1fea91736a35759471fde6e37763e6a19431516e Mon Sep 17 00:00:00 2001 From: hiyouga Date: Fri, 3 May 2024 21:24:27 +0800 Subject: [PATCH 14/32] fix callback log multigpu #3559 Former-commit-id: 1f105f1551b12675ca7d339ef5f91333f0371987 --- src/llmtuner/extras/callbacks.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/llmtuner/extras/callbacks.py b/src/llmtuner/extras/callbacks.py index fbe6f373..76f07a42 100644 --- a/src/llmtuner/extras/callbacks.py +++ b/src/llmtuner/extras/callbacks.py @@ -70,11 +70,9 @@ class LogCallback(TrainerCallback): r""" Event called at the beginning of training. """ - if args.should_log: + if args.should_save: self.do_train = True self._reset(max_steps=state.max_steps) - - if args.should_save: os.makedirs(args.output_dir, exist_ok=True) self.thread_pool = ThreadPoolExecutor(max_workers=1) @@ -98,7 +96,7 @@ class LogCallback(TrainerCallback): r""" Event called at the end of a training step. """ - if args.should_log: + if args.should_save: self._timing(cur_steps=state.global_step) if self.aborted: @@ -119,7 +117,7 @@ class LogCallback(TrainerCallback): Event called after a prediction step. """ eval_dataloader = kwargs.pop("eval_dataloader", None) - if args.should_log and has_length(eval_dataloader) and not self.do_train: + if args.should_save and has_length(eval_dataloader) and not self.do_train: if self.max_steps == 0: self.max_steps = len(eval_dataloader) @@ -131,8 +129,11 @@ class LogCallback(TrainerCallback): def on_log(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs) -> None: r""" - Event called after logging the last logs, `args.should_log` has been applied. + Event called after logging the last logs. """ + if not args.should_save: + return + logs = dict( current_steps=self.cur_steps, total_steps=self.max_steps, @@ -148,12 +149,12 @@ class LogCallback(TrainerCallback): remaining_time=self.remaining_time, ) logs = {k: v for k, v in logs.items() if v is not None} - if self.webui_mode and "loss" in logs and "learning_rate" in logs and "epoch" in logs: + if self.webui_mode and all(key in logs for key in ["loss", "learning_rate", "epoch"]): logger.info( "{{'loss': {:.4f}, 'learning_rate': {:2.4e}, 'epoch': {:.2f}}}".format( logs["loss"], logs["learning_rate"], logs["epoch"] ) ) - if args.should_save and self.thread_pool is not None: + if self.thread_pool is not None: self.thread_pool.submit(self._write_log, args.output_dir, logs) From 2383e5440cd28e916fb3cb025de708acfec31e4d Mon Sep 17 00:00:00 2001 From: hiyouga Date: Fri, 3 May 2024 23:06:52 +0800 Subject: [PATCH 15/32] fix slow op in dpo/orpo trainer Former-commit-id: 38cad0896ea0516de6d4b2759ec9d45ee67d339b --- src/llmtuner/train/dpo/trainer.py | 16 ++++++++-------- src/llmtuner/train/orpo/trainer.py | 20 ++++++++++---------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/llmtuner/train/dpo/trainer.py b/src/llmtuner/train/dpo/trainer.py index 35dcd8db..b144d561 100644 --- a/src/llmtuner/train/dpo/trainer.py +++ b/src/llmtuner/train/dpo/trainer.py @@ -165,13 +165,13 @@ class CustomDPOTrainer(DPOTrainer): reward_accuracies = (chosen_rewards > rejected_rewards).float() prefix = "eval_" if train_eval == "eval" else "" - metrics["{}rewards/chosen".format(prefix)] = chosen_rewards.cpu().mean() - metrics["{}rewards/rejected".format(prefix)] = rejected_rewards.cpu().mean() - metrics["{}rewards/accuracies".format(prefix)] = reward_accuracies.cpu().mean() - metrics["{}rewards/margins".format(prefix)] = (chosen_rewards - rejected_rewards).cpu().mean() - metrics["{}logps/rejected".format(prefix)] = policy_rejected_logps.detach().cpu().mean() - metrics["{}logps/chosen".format(prefix)] = policy_chosen_logps.detach().cpu().mean() - metrics["{}logits/rejected".format(prefix)] = policy_rejected_logits.detach().cpu().mean() - metrics["{}logits/chosen".format(prefix)] = policy_chosen_logits.detach().cpu().mean() + metrics["{}rewards/chosen".format(prefix)] = chosen_rewards.mean().cpu() + metrics["{}rewards/rejected".format(prefix)] = rejected_rewards.mean().cpu() + metrics["{}rewards/accuracies".format(prefix)] = reward_accuracies.mean().cpu() + metrics["{}rewards/margins".format(prefix)] = (chosen_rewards - rejected_rewards).mean().cpu() + metrics["{}logps/rejected".format(prefix)] = policy_rejected_logps.detach().mean().cpu() + metrics["{}logps/chosen".format(prefix)] = policy_chosen_logps.detach().mean().cpu() + metrics["{}logits/rejected".format(prefix)] = policy_rejected_logits.detach().mean().cpu() + metrics["{}logits/chosen".format(prefix)] = policy_chosen_logits.detach().mean().cpu() return losses.mean(), metrics diff --git a/src/llmtuner/train/orpo/trainer.py b/src/llmtuner/train/orpo/trainer.py index 5e0d70d9..88090a9e 100644 --- a/src/llmtuner/train/orpo/trainer.py +++ b/src/llmtuner/train/orpo/trainer.py @@ -113,15 +113,15 @@ class CustomORPOTrainer(DPOTrainer): reward_accuracies = (chosen_rewards > rejected_rewards).float() prefix = "eval_" if train_eval == "eval" else "" - metrics["{}rewards/chosen".format(prefix)] = chosen_rewards.cpu().mean() - metrics["{}rewards/rejected".format(prefix)] = rejected_rewards.cpu().mean() - metrics["{}rewards/accuracies".format(prefix)] = reward_accuracies.cpu().mean() - metrics["{}rewards/margins".format(prefix)] = (chosen_rewards - rejected_rewards).cpu().mean() - metrics["{}logps/rejected".format(prefix)] = rejected_logps.detach().cpu().mean() - metrics["{}logps/chosen".format(prefix)] = chosen_logps.detach().cpu().mean() - metrics["{}logits/rejected".format(prefix)] = rejected_logits.detach().cpu().mean() - metrics["{}logits/chosen".format(prefix)] = chosen_logits.detach().cpu().mean() - metrics["{}sft_loss".format(prefix)] = sft_loss.detach().cpu().mean() - metrics["{}odds_ratio_loss".format(prefix)] = odds_ratio_loss.detach().cpu().mean() + metrics["{}rewards/chosen".format(prefix)] = chosen_rewards.mean().cpu() + metrics["{}rewards/rejected".format(prefix)] = rejected_rewards.mean().cpu() + metrics["{}rewards/accuracies".format(prefix)] = reward_accuracies.mean().cpu() + metrics["{}rewards/margins".format(prefix)] = (chosen_rewards - rejected_rewards).mean().cpu() + metrics["{}logps/rejected".format(prefix)] = rejected_logps.detach().mean().cpu() + metrics["{}logps/chosen".format(prefix)] = chosen_logps.detach().mean().cpu() + metrics["{}logits/rejected".format(prefix)] = rejected_logits.detach().mean().cpu() + metrics["{}logits/chosen".format(prefix)] = chosen_logits.detach().mean().cpu() + metrics["{}sft_loss".format(prefix)] = sft_loss.detach().mean().cpu() + metrics["{}odds_ratio_loss".format(prefix)] = odds_ratio_loss.detach().mean().cpu() return batch_loss, metrics From 7a4a6a55227176bbfa77b063c95296ded6effc94 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Fri, 3 May 2024 23:15:19 +0800 Subject: [PATCH 16/32] fix webui resume Former-commit-id: c2f6582ddd365bb64b72e8057cc4ecd7884d2480 --- src/llmtuner/webui/runner.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/llmtuner/webui/runner.py b/src/llmtuner/webui/runner.py index 4ea08348..b04c9b00 100644 --- a/src/llmtuner/webui/runner.py +++ b/src/llmtuner/webui/runner.py @@ -278,6 +278,9 @@ class Runner: yield from self._launch(data, do_train=False) def monitor(self): + self.aborted = False + self.running = True + get = lambda elem_id: self.running_data[self.manager.get_elem_by_id(elem_id)] lang = get("top.lang") model_name = get("top.model_name") From 182b9747862a7735c799bc7c6115d8e225c2bd63 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sat, 4 May 2024 00:19:19 +0800 Subject: [PATCH 17/32] fix eval in webui Former-commit-id: 774ef2bf5823d68b9cc254a676f5adb4af533d75 --- src/llmtuner/extras/callbacks.py | 85 +++++++++++++++++++------- src/llmtuner/webui/common.py | 10 ++- src/llmtuner/webui/components/eval.py | 15 ++--- src/llmtuner/webui/components/train.py | 4 +- src/llmtuner/webui/runner.py | 4 +- src/llmtuner/webui/utils.py | 4 +- 6 files changed, 84 insertions(+), 38 deletions(-) diff --git a/src/llmtuner/extras/callbacks.py b/src/llmtuner/extras/callbacks.py index 76f07a42..a07c7059 100644 --- a/src/llmtuner/extras/callbacks.py +++ b/src/llmtuner/extras/callbacks.py @@ -5,7 +5,7 @@ import signal import time from concurrent.futures import ThreadPoolExecutor from datetime import timedelta -from typing import TYPE_CHECKING, Any, Dict +from typing import TYPE_CHECKING, Any, Dict, Optional import transformers from transformers import TrainerCallback @@ -38,8 +38,20 @@ class FixValueHeadModelCallback(TrainerCallback): class LogCallback(TrainerCallback): def __init__(self, output_dir: str) -> None: + r""" + Initializes a callback for logging training and evaluation status. + """ + """ Progress """ + self.start_time = 0 + self.cur_steps = 0 + self.max_steps = 0 + self.elapsed_time = "" + self.remaining_time = "" + self.thread_pool: Optional["ThreadPoolExecutor"] = None + """ Status """ self.aborted = False self.do_train = False + """ Web UI """ self.webui_mode = bool(int(os.environ.get("LLAMABOARD_ENABLED", "0"))) if self.webui_mode: signal.signal(signal.SIGABRT, self._set_abort) @@ -66,6 +78,19 @@ class LogCallback(TrainerCallback): self.elapsed_time = str(timedelta(seconds=int(elapsed_time))) self.remaining_time = str(timedelta(seconds=int(remaining_time))) + def _write_log(self, output_dir: str, logs: Dict[str, Any]) -> None: + with open(os.path.join(output_dir, TRAINER_LOG), "a", encoding="utf-8") as f: + f.write(json.dumps(logs) + "\n") + + def _create_thread_pool(self, output_dir: str) -> None: + os.makedirs(output_dir, exist_ok=True) + self.thread_pool = ThreadPoolExecutor(max_workers=1) + + def _close_thread_pool(self) -> None: + if self.thread_pool is not None: + self.thread_pool.shutdown(wait=True) + self.thread_pool = None + def on_train_begin(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): r""" Event called at the beginning of training. @@ -73,8 +98,7 @@ class LogCallback(TrainerCallback): if args.should_save: self.do_train = True self._reset(max_steps=state.max_steps) - os.makedirs(args.output_dir, exist_ok=True) - self.thread_pool = ThreadPoolExecutor(max_workers=1) + self._create_thread_pool(output_dir=args.output_dir) if ( args.should_save @@ -84,6 +108,12 @@ class LogCallback(TrainerCallback): logger.warning("Previous trainer log in this folder will be deleted.") os.remove(os.path.join(args.output_dir, TRAINER_LOG)) + def on_train_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): + r""" + Event called at the end of training. + """ + self._close_thread_pool() + def on_substep_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): r""" Event called at the end of an substep during gradient accumulation. @@ -103,31 +133,19 @@ class LogCallback(TrainerCallback): control.should_epoch_stop = True control.should_training_stop = True - def on_train_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): + def on_evaluate(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): r""" - Event called at the end of training. + Event called after an evaluation phase. """ - self.thread_pool.shutdown(wait=True) - self.thread_pool = None + self._close_thread_pool() - def on_prediction_step( - self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs - ): + def on_predict(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): r""" - Event called after a prediction step. + Event called after a successful prediction. """ - eval_dataloader = kwargs.pop("eval_dataloader", None) - if args.should_save and has_length(eval_dataloader) and not self.do_train: - if self.max_steps == 0: - self.max_steps = len(eval_dataloader) + self._close_thread_pool() - self._timing(cur_steps=self.cur_steps + 1) - - def _write_log(self, output_dir: str, logs: Dict[str, Any]): - with open(os.path.join(output_dir, TRAINER_LOG), "a", encoding="utf-8") as f: - f.write(json.dumps(logs) + "\n") - - def on_log(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs) -> None: + def on_log(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): r""" Event called after logging the last logs. """ @@ -158,3 +176,26 @@ class LogCallback(TrainerCallback): if self.thread_pool is not None: self.thread_pool.submit(self._write_log, args.output_dir, logs) + + def on_prediction_step( + self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs + ): + r""" + Event called after a prediction step. + """ + eval_dataloader = kwargs.pop("eval_dataloader", None) + if args.should_save and has_length(eval_dataloader) and not self.do_train: + if self.max_steps == 0: + self._reset(max_steps=len(eval_dataloader)) + self._create_thread_pool(output_dir=args.output_dir) + + self._timing(cur_steps=self.cur_steps + 1) + if self.cur_steps % 5 == 0 and self.thread_pool is not None: + logs = dict( + current_steps=self.cur_steps, + total_steps=self.max_steps, + percentage=round(self.cur_steps / self.max_steps * 100, 2) if self.max_steps != 0 else 100, + elapsed_time=self.elapsed_time, + remaining_time=self.remaining_time, + ) + self.thread_pool.submit(self._write_log, args.output_dir, logs) diff --git a/src/llmtuner/webui/common.py b/src/llmtuner/webui/common.py index a33e3db7..d569f1fa 100644 --- a/src/llmtuner/webui/common.py +++ b/src/llmtuner/webui/common.py @@ -17,6 +17,7 @@ from ..extras.constants import ( TRAINING_STAGES, DownloadSource, ) +from ..extras.logging import get_logger from ..extras.misc import use_modelscope from ..extras.packages import is_gradio_available @@ -25,6 +26,9 @@ if is_gradio_available(): import gradio as gr +logger = get_logger(__name__) + + ADAPTER_NAMES = {WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME} DEFAULT_CACHE_DIR = "cache" DEFAULT_CONFIG_DIR = "config" @@ -128,11 +132,15 @@ def list_adapters(model_name: str, finetuning_type: str) -> "gr.Dropdown": def load_dataset_info(dataset_dir: str) -> Dict[str, Dict[str, Any]]: + if dataset_dir == "ONLINE": + logger.info("dataset_dir is ONLINE, using online dataset.") + return {} + try: with open(os.path.join(dataset_dir, DATA_CONFIG), "r", encoding="utf-8") as f: return json.load(f) except Exception as err: - print("Cannot open {} due to {}.".format(os.path.join(dataset_dir, DATA_CONFIG), str(err))) + logger.warning("Cannot open {} due to {}.".format(os.path.join(dataset_dir, DATA_CONFIG), str(err))) return {} diff --git a/src/llmtuner/webui/components/eval.py b/src/llmtuner/webui/components/eval.py index 3910a746..222f9314 100644 --- a/src/llmtuner/webui/components/eval.py +++ b/src/llmtuner/webui/components/eval.py @@ -21,16 +21,16 @@ def create_eval_tab(engine: "Engine") -> Dict[str, "Component"]: with gr.Row(): dataset_dir = gr.Textbox(value=DEFAULT_DATA_DIR, scale=2) - dataset = gr.Dropdown(multiselect=True, scale=4) + dataset = gr.Dropdown(multiselect=True, allow_custom_value=True, scale=4) preview_elems = create_preview_box(dataset_dir, dataset) input_elems.update({dataset_dir, dataset}) elem_dict.update(dict(dataset_dir=dataset_dir, dataset=dataset, **preview_elems)) with gr.Row(): - cutoff_len = gr.Slider(value=1024, minimum=4, maximum=8192, step=1) + cutoff_len = gr.Slider(value=1024, minimum=4, maximum=65536, step=1) max_samples = gr.Textbox(value="100000") - batch_size = gr.Slider(value=8, minimum=1, maximum=512, step=1) + batch_size = gr.Slider(value=2, minimum=1, maximum=1024, step=1) predict = gr.Checkbox(value=True) input_elems.update({cutoff_len, max_samples, batch_size, predict}) @@ -48,30 +48,27 @@ def create_eval_tab(engine: "Engine") -> Dict[str, "Component"]: with gr.Row(): cmd_preview_btn = gr.Button() start_btn = gr.Button(variant="primary") - stop_btn = gr.Button(variant="stop") with gr.Row(): resume_btn = gr.Checkbox(visible=False, interactive=False) - process_bar = gr.Slider(visible=False, interactive=False) + progress_bar = gr.Slider(visible=False, interactive=False) with gr.Row(): output_box = gr.Markdown() - output_elems = [output_box, process_bar] + output_elems = [output_box, progress_bar] elem_dict.update( dict( cmd_preview_btn=cmd_preview_btn, start_btn=start_btn, - stop_btn=stop_btn, resume_btn=resume_btn, - process_bar=process_bar, + progress_bar=progress_bar, output_box=output_box, ) ) cmd_preview_btn.click(engine.runner.preview_eval, input_elems, output_elems, concurrency_limit=None) start_btn.click(engine.runner.run_eval, input_elems, output_elems) - stop_btn.click(engine.runner.set_abort) resume_btn.change(engine.runner.monitor, outputs=output_elems, concurrency_limit=None) dataset_dir.change(list_dataset, [dataset_dir], [dataset], queue=False) diff --git a/src/llmtuner/webui/components/train.py b/src/llmtuner/webui/components/train.py index c709b916..857c56ac 100644 --- a/src/llmtuner/webui/components/train.py +++ b/src/llmtuner/webui/components/train.py @@ -27,7 +27,7 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]: choices=list(TRAINING_STAGES.keys()), value=list(TRAINING_STAGES.keys())[0], scale=1 ) dataset_dir = gr.Textbox(value=DEFAULT_DATA_DIR, scale=1) - dataset = gr.Dropdown(multiselect=True, scale=4) + dataset = gr.Dropdown(multiselect=True, allow_custom_value=True, scale=4) preview_elems = create_preview_box(dataset_dir, dataset) input_elems.update({training_stage, dataset_dir, dataset}) @@ -52,7 +52,7 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]: ) with gr.Row(): - cutoff_len = gr.Slider(value=1024, minimum=4, maximum=16384, step=1) + cutoff_len = gr.Slider(value=1024, minimum=4, maximum=65536, step=1) batch_size = gr.Slider(value=2, minimum=1, maximum=1024, step=1) gradient_accumulation_steps = gr.Slider(value=8, minimum=1, maximum=1024, step=1) val_size = gr.Slider(value=0, minimum=0, maximum=1, step=0.001) diff --git a/src/llmtuner/webui/runner.py b/src/llmtuner/webui/runner.py index b04c9b00..59515a62 100644 --- a/src/llmtuner/webui/runner.py +++ b/src/llmtuner/webui/runner.py @@ -299,12 +299,12 @@ class Runner: progress_bar: gr.Slider(visible=False), } else: - running_log, running_progress, running_loss = get_trainer_info(output_path) + running_log, running_progress, running_loss = get_trainer_info(output_path, self.do_train) return_dict = { output_box: running_log, progress_bar: running_progress, } - if self.do_train and running_loss is not None: + if running_loss is not None: return_dict[loss_viewer] = running_loss yield return_dict diff --git a/src/llmtuner/webui/utils.py b/src/llmtuner/webui/utils.py index c8729d36..1f2b0591 100644 --- a/src/llmtuner/webui/utils.py +++ b/src/llmtuner/webui/utils.py @@ -63,7 +63,7 @@ def get_time() -> str: return datetime.now().strftime(r"%Y-%m-%d-%H-%M-%S") -def get_trainer_info(output_path: os.PathLike) -> Tuple[str, "gr.Slider", Optional["gr.Plot"]]: +def get_trainer_info(output_path: os.PathLike, do_train: bool) -> Tuple[str, "gr.Slider", Optional["gr.Plot"]]: running_log = "" running_progress = gr.Slider(visible=False) running_loss = None @@ -91,7 +91,7 @@ def get_trainer_info(output_path: os.PathLike) -> Tuple[str, "gr.Slider", Option ) running_progress = gr.Slider(label=label, value=percentage, visible=True) - if is_matplotlib_available(): + if do_train and is_matplotlib_available(): running_loss = gr.Plot(gen_loss_plot(trainer_log)) return running_log, running_progress, running_loss From 99125c882582d417de74c45b2bdc3b19aef7b496 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sat, 4 May 2024 00:31:02 +0800 Subject: [PATCH 18/32] update readme Former-commit-id: 012e5b9625682a628a0b7fb5879097be7166c7be --- README.md | 10 +++++++++- README_zh.md | 10 +++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8caac93f..5e7d61ea 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ [![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main) [![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/) [![Downloads](https://static.pepy.tech/badge/llmtuner)](https://pypi.org/project/llmtuner/) -[![Citation](https://img.shields.io/badge/citation-34-green)](#projects-using-llama-factory) +[![Citation](https://img.shields.io/badge/citation-42-green)](#projects-using-llama-factory) [![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls) [![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK) [![Twitter](https://img.shields.io/twitter/follow/llamafactory_ai)](https://twitter.com/llamafactory_ai) @@ -441,6 +441,7 @@ If you have a project that should be incorporated, please contact via email or c 1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333) 1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419) 1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228) +1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073) 1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541) 1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246) 1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. 2024. [[arxiv]](https://arxiv.org/abs/2403.16008) @@ -448,7 +449,14 @@ If you have a project that should be incorporated, please contact via email or c 1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604) 1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827) 1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167) +1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. 2024. [[arxiv]](https://arxiv.org/abs/2404.04316) 1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084) +1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836) +1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581) +1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215) +1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621) +1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2404.17140) +1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. 2024. [[arxiv]](https://arxiv.org/abs/2404.18585) 1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: A large language model for Astronomy, based on ChatGLM2-6B and Qwen-14B. 1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: A large language model specialized in Chinese legal domain, based on Baichuan-13B, is capable of retrieving and reasoning on legal knowledge. 1. **[Sunsimiao](https://github.com/thomas-yanxin/Sunsimiao)**: A large language model specialized in Chinese medical domain, based on Baichuan-7B and ChatGLM-6B. diff --git a/README_zh.md b/README_zh.md index 27522232..bfb9feaa 100644 --- a/README_zh.md +++ b/README_zh.md @@ -5,7 +5,7 @@ [![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main) [![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/) [![Downloads](https://static.pepy.tech/badge/llmtuner)](https://pypi.org/project/llmtuner/) -[![Citation](https://img.shields.io/badge/citation-34-green)](#使用了-llama-factory-的项目) +[![Citation](https://img.shields.io/badge/citation-42-green)](#使用了-llama-factory-的项目) [![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls) [![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK) [![Twitter](https://img.shields.io/twitter/follow/llamafactory_ai)](https://twitter.com/llamafactory_ai) @@ -441,6 +441,7 @@ export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1` 1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333) 1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419) 1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228) +1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073) 1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541) 1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246) 1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. 2024. [[arxiv]](https://arxiv.org/abs/2403.16008) @@ -448,7 +449,14 @@ export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1` 1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604) 1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827) 1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167) +1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. 2024. [[arxiv]](https://arxiv.org/abs/2404.04316) 1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084) +1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836) +1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581) +1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215) +1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621) +1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2404.17140) +1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. 2024. [[arxiv]](https://arxiv.org/abs/2404.18585) 1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: 天文大模型 StarWhisper,基于 ChatGLM2-6B 和 Qwen-14B 在天文数据上微调而得。 1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: 中文法律领域大模型 DISC-LawLLM,基于 Baichuan-13B 微调而得,具有法律推理和知识检索能力。 1. **[Sunsimiao](https://github.com/thomas-yanxin/Sunsimiao)**: 孙思邈中文医疗大模型 Sumsimiao,基于 Baichuan-7B 和 ChatGLM-6B 在中文医疗数据上微调而得。 From 37bcbf72b45c14eb70f0f14e808f47c9101ebb67 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sat, 4 May 2024 00:43:02 +0800 Subject: [PATCH 19/32] update readme and webui launch Former-commit-id: c66ffa57323ef6ea78a9b75ec5122d9ea25fd420 --- README.md | 8 +++++--- README_zh.md | 8 +++++--- src/llmtuner/webui/interface.py | 4 ++-- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 5e7d61ea..4f363099 100644 --- a/README.md +++ b/README.md @@ -344,11 +344,12 @@ To enable FlashAttention-2 on the Windows platform, you need to install the prec #### Use local environment ```bash -export CUDA_VISIBLE_DEVICES=0 # `set CUDA_VISIBLE_DEVICES=0` for Windows -export GRADIO_SERVER_PORT=7860 # `set GRADIO_SERVER_PORT=7860` for Windows llamafactory-cli webui ``` +> [!TIPS] +> To modify the default setting in the LLaMA Board GUI, you can use environment variables, e.g., `export CUDA_VISIBLE_DEVICES=0 GRADIO_SERVER_NAME=0.0.0.0 GRADIO_SERVER_PORT=7860 GRADIO_SHARE=False` (use `set` command on Windows OS). +
For Alibaba Cloud users If you encountered display problems in LLaMA Board on Alibaba Cloud, try using the following command to set environment variables before starting LLaMA Board: @@ -392,7 +393,8 @@ docker compose -f ./docker-compose.yml up -d See [examples/README.md](examples/README.md) for usage. -Use `llamafactory-cli train -h` to display arguments description. +> [!TIPS] +> Use `llamafactory-cli train -h` to display arguments description. ### Deploy with OpenAI-style API and vLLM diff --git a/README_zh.md b/README_zh.md index bfb9feaa..8f9d5513 100644 --- a/README_zh.md +++ b/README_zh.md @@ -344,11 +344,12 @@ pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/downl #### 使用本地环境 ```bash -export CUDA_VISIBLE_DEVICES=0 # Windows 使用 `set CUDA_VISIBLE_DEVICES=0` -export GRADIO_SERVER_PORT=7860 # Windows 使用 `set GRADIO_SERVER_PORT=7860` llamafactory-cli webui ``` +> [!TIPS] +> 您可以使用环境变量来修改 LLaMA Board 可视化界面的默认设置,例如 `export CUDA_VISIBLE_DEVICES=0 GRADIO_SERVER_NAME=0.0.0.0 GRADIO_SERVER_PORT=7860 GRADIO_SHARE=False`(Windows 系统可使用 `set` 指令)。 +
阿里云用户指南 如果您在阿里云上使用 LLaMA Board 时遇到显示问题,请尝试在启动前使用以下命令设置环境变量: @@ -392,7 +393,8 @@ docker compose -f ./docker-compose.yml up -d 使用方法请参考 [examples/README_zh.md](examples/README_zh.md)。 -您可以执行 `llamafactory-cli train -h` 来查看参数文档。 +> [!TIPS] +> 您可以执行 `llamafactory-cli train -h` 来查看参数文档。 ### 利用 vLLM 部署 OpenAI API diff --git a/src/llmtuner/webui/interface.py b/src/llmtuner/webui/interface.py index 5f17d76d..459802f2 100644 --- a/src/llmtuner/webui/interface.py +++ b/src/llmtuner/webui/interface.py @@ -69,8 +69,8 @@ def create_web_demo() -> gr.Blocks: def run_web_ui(): - create_ui().queue().launch(server_name="0.0.0.0") + create_ui().queue().launch() def run_web_demo(): - create_web_demo().queue().launch(server_name="0.0.0.0") + create_web_demo().queue().launch() From b1b18b2c5ade54fa8d1ea34f822b881f00417874 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sat, 4 May 2024 00:43:53 +0800 Subject: [PATCH 20/32] update readme Former-commit-id: 5061f7196a3278af5ebce77249d9c3c0f8a55b34 --- README.md | 4 ++-- README_zh.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 4f363099..45732220 100644 --- a/README.md +++ b/README.md @@ -347,7 +347,7 @@ To enable FlashAttention-2 on the Windows platform, you need to install the prec llamafactory-cli webui ``` -> [!TIPS] +> [!TIP] > To modify the default setting in the LLaMA Board GUI, you can use environment variables, e.g., `export CUDA_VISIBLE_DEVICES=0 GRADIO_SERVER_NAME=0.0.0.0 GRADIO_SERVER_PORT=7860 GRADIO_SHARE=False` (use `set` command on Windows OS).
For Alibaba Cloud users @@ -393,7 +393,7 @@ docker compose -f ./docker-compose.yml up -d See [examples/README.md](examples/README.md) for usage. -> [!TIPS] +> [!TIP] > Use `llamafactory-cli train -h` to display arguments description. ### Deploy with OpenAI-style API and vLLM diff --git a/README_zh.md b/README_zh.md index 8f9d5513..4db1f843 100644 --- a/README_zh.md +++ b/README_zh.md @@ -347,7 +347,7 @@ pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/downl llamafactory-cli webui ``` -> [!TIPS] +> [!TIP] > 您可以使用环境变量来修改 LLaMA Board 可视化界面的默认设置,例如 `export CUDA_VISIBLE_DEVICES=0 GRADIO_SERVER_NAME=0.0.0.0 GRADIO_SERVER_PORT=7860 GRADIO_SHARE=False`(Windows 系统可使用 `set` 指令)。
阿里云用户指南 @@ -393,7 +393,7 @@ docker compose -f ./docker-compose.yml up -d 使用方法请参考 [examples/README_zh.md](examples/README_zh.md)。 -> [!TIPS] +> [!TIP] > 您可以执行 `llamafactory-cli train -h` 来查看参数文档。 ### 利用 vLLM 部署 OpenAI API From efa9140577995a5d899a570e39785c75ab2b84e8 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sat, 4 May 2024 15:59:15 +0800 Subject: [PATCH 21/32] update api and support abort eval in webui Former-commit-id: 8661bed68812e9ded9439e8a821b1d7716bc797b --- src/llmtuner/api/app.py | 205 +++++--------------------- src/llmtuner/api/chat.py | 176 ++++++++++++++++++++++ src/llmtuner/api/common.py | 20 +++ src/llmtuner/api/protocol.py | 12 +- src/llmtuner/chat/chat_model.py | 2 +- src/llmtuner/eval/evaluator.py | 5 +- src/llmtuner/extras/callbacks.py | 36 +++-- src/llmtuner/train/tuner.py | 4 +- src/llmtuner/webui/components/eval.py | 3 + src/llmtuner/webui/interface.py | 4 +- src/llmtuner/webui/locales.py | 2 +- 11 files changed, 277 insertions(+), 192 deletions(-) create mode 100644 src/llmtuner/api/chat.py create mode 100644 src/llmtuner/api/common.py diff --git a/src/llmtuner/api/app.py b/src/llmtuner/api/app.py index 36918d1b..375ee61f 100644 --- a/src/llmtuner/api/app.py +++ b/src/llmtuner/api/app.py @@ -1,36 +1,29 @@ -import json import os from contextlib import asynccontextmanager -from typing import Any, Dict, Sequence - -from pydantic import BaseModel +from typing import Annotated, Optional from ..chat import ChatModel -from ..data import Role as DataRole from ..extras.misc import torch_gc from ..extras.packages import is_fastapi_availble, is_starlette_available, is_uvicorn_available +from .chat import ( + create_chat_completion_response, + create_score_evaluation_response, + create_stream_chat_completion_response, +) from .protocol import ( - ChatCompletionMessage, ChatCompletionRequest, ChatCompletionResponse, - ChatCompletionResponseChoice, - ChatCompletionResponseStreamChoice, - ChatCompletionResponseUsage, - ChatCompletionStreamResponse, - Finish, - Function, - FunctionCall, ModelCard, ModelList, - Role, ScoreEvaluationRequest, ScoreEvaluationResponse, ) if is_fastapi_availble(): - from fastapi import FastAPI, HTTPException, status + from fastapi import Depends, FastAPI, HTTPException, status from fastapi.middleware.cors import CORSMiddleware + from fastapi.security.http import HTTPAuthorizationCredentials, HTTPBearer if is_starlette_available(): @@ -47,23 +40,8 @@ async def lifespan(app: "FastAPI"): # collects GPU memory torch_gc() -def dictify(data: "BaseModel") -> Dict[str, Any]: - try: # pydantic v2 - return data.model_dump(exclude_unset=True) - except AttributeError: # pydantic v1 - return data.dict(exclude_unset=True) - - -def jsonify(data: "BaseModel") -> str: - try: # pydantic v2 - return json.dumps(data.model_dump(exclude_unset=True), ensure_ascii=False) - except AttributeError: # pydantic v1 - return data.json(exclude_unset=True, ensure_ascii=False) - - def create_app(chat_model: "ChatModel") -> "FastAPI": app = FastAPI(lifespan=lifespan) - app.add_middleware( CORSMiddleware, allow_origins=["*"], @@ -71,161 +49,58 @@ def create_app(chat_model: "ChatModel") -> "FastAPI": allow_methods=["*"], allow_headers=["*"], ) + api_key = os.environ.get("API_KEY", None) + security = HTTPBearer(auto_error=False) - role_mapping = { - Role.USER: DataRole.USER.value, - Role.ASSISTANT: DataRole.ASSISTANT.value, - Role.SYSTEM: DataRole.SYSTEM.value, - Role.FUNCTION: DataRole.FUNCTION.value, - Role.TOOL: DataRole.OBSERVATION.value, - } + async def verify_api_key(auth: Annotated[Optional[HTTPAuthorizationCredentials], Depends(security)]): + if api_key and (auth is None or auth.credentials != api_key): + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API key.") - @app.get("/v1/models", response_model=ModelList) + @app.get( + "/v1/models", + response_model=ModelList, + status_code=status.HTTP_200_OK, + dependencies=[Depends(verify_api_key)], + ) async def list_models(): model_card = ModelCard(id="gpt-3.5-turbo") return ModelList(data=[model_card]) - @app.post("/v1/chat/completions", response_model=ChatCompletionResponse, status_code=status.HTTP_200_OK) + @app.post( + "/v1/chat/completions", + response_model=ChatCompletionResponse, + status_code=status.HTTP_200_OK, + dependencies=[Depends(verify_api_key)], + ) async def create_chat_completion(request: ChatCompletionRequest): if not chat_model.engine.can_generate: raise HTTPException(status_code=status.HTTP_405_METHOD_NOT_ALLOWED, detail="Not allowed") - if len(request.messages) == 0: - raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid length") - - if request.messages[0].role == Role.SYSTEM: - system = request.messages.pop(0).content - else: - system = "" - - if len(request.messages) % 2 == 0: - raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Only supports u/a/u/a/u...") - - input_messages = [] - for i, message in enumerate(request.messages): - if i % 2 == 0 and message.role not in [Role.USER, Role.TOOL]: - raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid role") - elif i % 2 == 1 and message.role not in [Role.ASSISTANT, Role.FUNCTION]: - raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid role") - - if message.role == Role.ASSISTANT and isinstance(message.tool_calls, list) and len(message.tool_calls): - name = message.tool_calls[0].function.name - arguments = message.tool_calls[0].function.arguments - content = json.dumps({"name": name, "argument": arguments}, ensure_ascii=False) - input_messages.append({"role": role_mapping[Role.FUNCTION], "content": content}) - else: - input_messages.append({"role": role_mapping[message.role], "content": message.content}) - - tool_list = request.tools - if isinstance(tool_list, list) and len(tool_list): - try: - tools = json.dumps([dictify(tool.function) for tool in tool_list], ensure_ascii=False) - except Exception: - raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid tools") - else: - tools = "" - if request.stream: - if tools: - raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Cannot stream function calls.") - - generate = stream_chat_completion(input_messages, system, tools, request) + generate = create_stream_chat_completion_response(request, chat_model) return EventSourceResponse(generate, media_type="text/event-stream") + else: + return await create_chat_completion_response(request, chat_model) - responses = await chat_model.achat( - input_messages, - system, - tools, - do_sample=request.do_sample, - temperature=request.temperature, - top_p=request.top_p, - max_new_tokens=request.max_tokens, - num_return_sequences=request.n, - ) - - prompt_length, response_length = 0, 0 - choices = [] - for i, response in enumerate(responses): - if tools: - result = chat_model.engine.template.format_tools.extract(response.response_text) - else: - result = response.response_text - - if isinstance(result, tuple): - name, arguments = result - function = Function(name=name, arguments=arguments) - response_message = ChatCompletionMessage( - role=Role.ASSISTANT, tool_calls=[FunctionCall(function=function)] - ) - finish_reason = Finish.TOOL - else: - response_message = ChatCompletionMessage(role=Role.ASSISTANT, content=result) - finish_reason = Finish.STOP if response.finish_reason == "stop" else Finish.LENGTH - - choices.append( - ChatCompletionResponseChoice(index=i, message=response_message, finish_reason=finish_reason) - ) - prompt_length = response.prompt_length - response_length += response.response_length - - usage = ChatCompletionResponseUsage( - prompt_tokens=prompt_length, - completion_tokens=response_length, - total_tokens=prompt_length + response_length, - ) - - return ChatCompletionResponse(model=request.model, choices=choices, usage=usage) - - async def stream_chat_completion( - messages: Sequence[Dict[str, str]], system: str, tools: str, request: ChatCompletionRequest - ): - choice_data = ChatCompletionResponseStreamChoice( - index=0, delta=ChatCompletionMessage(role=Role.ASSISTANT, content=""), finish_reason=None - ) - chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data]) - yield jsonify(chunk) - - async for new_token in chat_model.astream_chat( - messages, - system, - tools, - do_sample=request.do_sample, - temperature=request.temperature, - top_p=request.top_p, - max_new_tokens=request.max_tokens, - ): - if len(new_token) == 0: - continue - - choice_data = ChatCompletionResponseStreamChoice( - index=0, delta=ChatCompletionMessage(content=new_token), finish_reason=None - ) - chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data]) - yield jsonify(chunk) - - choice_data = ChatCompletionResponseStreamChoice( - index=0, delta=ChatCompletionMessage(), finish_reason=Finish.STOP - ) - chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data]) - yield jsonify(chunk) - yield "[DONE]" - - @app.post("/v1/score/evaluation", response_model=ScoreEvaluationResponse, status_code=status.HTTP_200_OK) + @app.post( + "/v1/score/evaluation", + response_model=ScoreEvaluationResponse, + status_code=status.HTTP_200_OK, + dependencies=[Depends(verify_api_key)], + ) async def create_score_evaluation(request: ScoreEvaluationRequest): if chat_model.engine.can_generate: raise HTTPException(status_code=status.HTTP_405_METHOD_NOT_ALLOWED, detail="Not allowed") - if len(request.messages) == 0: - raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid request") - - scores = await chat_model.aget_scores(request.messages, max_length=request.max_length) - return ScoreEvaluationResponse(model=request.model, scores=scores) + return await create_score_evaluation_response(request, chat_model) return app -def run_api(): +def run_api() -> None: chat_model = ChatModel() app = create_app(chat_model) - print("Visit http://localhost:{}/docs for API document.".format(os.environ.get("API_PORT", 8000))) - uvicorn.run(app, host="0.0.0.0", port=int(os.environ.get("API_PORT", 8000)), workers=1) + api_host = os.environ.get("API_HOST", "0.0.0.0") + api_port = int(os.environ.get("API_PORT", "8000")) + print("Visit http://localhost:{}/docs for API document.".format(api_port)) + uvicorn.run(app, host=api_host, port=api_port) diff --git a/src/llmtuner/api/chat.py b/src/llmtuner/api/chat.py new file mode 100644 index 00000000..c9c00f16 --- /dev/null +++ b/src/llmtuner/api/chat.py @@ -0,0 +1,176 @@ +import json +import uuid +from typing import TYPE_CHECKING, AsyncGenerator, Dict, List, Optional, Tuple + +from ..data import Role as DataRole +from ..extras.packages import is_fastapi_availble +from .common import dictify, jsonify +from .protocol import ( + ChatCompletionMessage, + ChatCompletionResponse, + ChatCompletionResponseChoice, + ChatCompletionResponseUsage, + ChatCompletionStreamResponse, + ChatCompletionStreamResponseChoice, + Finish, + Function, + FunctionCall, + Role, + ScoreEvaluationResponse, +) + + +if is_fastapi_availble(): + from fastapi import HTTPException, status + + +if TYPE_CHECKING: + from ..chat import ChatModel + from .protocol import ChatCompletionRequest, ScoreEvaluationRequest + + +ROLE_MAPPING = { + Role.USER: DataRole.USER.value, + Role.ASSISTANT: DataRole.ASSISTANT.value, + Role.SYSTEM: DataRole.SYSTEM.value, + Role.FUNCTION: DataRole.FUNCTION.value, + Role.TOOL: DataRole.OBSERVATION.value, +} + + +async def _process_request(request: "ChatCompletionRequest") -> Tuple[List[Dict[str, str]], str, str]: + if len(request.messages) == 0: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid length") + + if request.messages[0].role == Role.SYSTEM: + system = request.messages.pop(0).content + else: + system = "" + + if len(request.messages) % 2 == 0: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Only supports u/a/u/a/u...") + + input_messages = [] + for i, message in enumerate(request.messages): + if i % 2 == 0 and message.role not in [Role.USER, Role.TOOL]: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid role") + elif i % 2 == 1 and message.role not in [Role.ASSISTANT, Role.FUNCTION]: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid role") + + if message.role == Role.ASSISTANT and isinstance(message.tool_calls, list) and len(message.tool_calls): + name = message.tool_calls[0].function.name + arguments = message.tool_calls[0].function.arguments + content = json.dumps({"name": name, "argument": arguments}, ensure_ascii=False) + input_messages.append({"role": ROLE_MAPPING[Role.FUNCTION], "content": content}) + else: + input_messages.append({"role": ROLE_MAPPING[message.role], "content": message.content}) + + tool_list = request.tools + if isinstance(tool_list, list) and len(tool_list): + try: + tools = json.dumps([dictify(tool.function) for tool in tool_list], ensure_ascii=False) + except Exception: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid tools") + else: + tools = "" + + return input_messages, system, tools + + +async def create_chat_completion_response( + request: "ChatCompletionRequest", chat_model: "ChatModel" +) -> "ChatCompletionResponse": + completion_id = "chatcmpl-{}".format(uuid.uuid4().hex) + input_messages, system, tools = await _process_request(request) + responses = await chat_model.achat( + input_messages, + system, + tools, + do_sample=request.do_sample, + temperature=request.temperature, + top_p=request.top_p, + max_new_tokens=request.max_tokens, + num_return_sequences=request.n, + ) + + prompt_length, response_length = 0, 0 + choices = [] + for i, response in enumerate(responses): + if tools: + result = chat_model.engine.template.format_tools.extract(response.response_text) + else: + result = response.response_text + + if isinstance(result, tuple): + name, arguments = result + function = Function(name=name, arguments=arguments) + tool_call = FunctionCall(id="call_{}".format(uuid.uuid4().hex), function=function) + response_message = ChatCompletionMessage(role=Role.ASSISTANT, tool_calls=[tool_call]) + finish_reason = Finish.TOOL + else: + response_message = ChatCompletionMessage(role=Role.ASSISTANT, content=result) + finish_reason = Finish.STOP if response.finish_reason == "stop" else Finish.LENGTH + + choices.append(ChatCompletionResponseChoice(index=i, message=response_message, finish_reason=finish_reason)) + prompt_length = response.prompt_length + response_length += response.response_length + + usage = ChatCompletionResponseUsage( + prompt_tokens=prompt_length, + completion_tokens=response_length, + total_tokens=prompt_length + response_length, + ) + + return ChatCompletionResponse(id=completion_id, model=request.model, choices=choices, usage=usage) + + +async def _create_stream_chat_completion_chunk( + completion_id: str, + model: str, + delta: "ChatCompletionMessage", + index: Optional[int] = 0, + finish_reason: Optional["Finish"] = None, +) -> str: + choice_data = ChatCompletionStreamResponseChoice(index=index, delta=delta, finish_reason=finish_reason) + chunk = ChatCompletionStreamResponse(id=completion_id, model=model, choices=[choice_data]) + return jsonify(chunk) + + +async def create_stream_chat_completion_response( + request: "ChatCompletionRequest", chat_model: "ChatModel" +) -> AsyncGenerator[str, None]: + completion_id = "chatcmpl-{}".format(uuid.uuid4().hex) + input_messages, system, tools = await _process_request(request) + if tools: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Cannot stream function calls.") + + yield _create_stream_chat_completion_chunk( + completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(role=Role.ASSISTANT, content="") + ) + async for new_token in chat_model.astream_chat( + input_messages, + system, + tools, + do_sample=request.do_sample, + temperature=request.temperature, + top_p=request.top_p, + max_new_tokens=request.max_tokens, + ): + yield _create_stream_chat_completion_chunk( + completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(content=new_token) + ) + + yield _create_stream_chat_completion_chunk( + completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(), finish_reason=Finish.STOP + ) + yield "[DONE]" + + +async def create_score_evaluation_response( + request: "ScoreEvaluationRequest", chat_model: "ChatModel" +) -> "ScoreEvaluationResponse": + if len(request.messages) == 0: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid request") + + scores = await chat_model.aget_scores(request.messages, max_length=request.max_length) + return ScoreEvaluationResponse(model=request.model, scores=scores) diff --git a/src/llmtuner/api/common.py b/src/llmtuner/api/common.py new file mode 100644 index 00000000..5ad9a071 --- /dev/null +++ b/src/llmtuner/api/common.py @@ -0,0 +1,20 @@ +import json +from typing import TYPE_CHECKING, Any, Dict + + +if TYPE_CHECKING: + from pydantic import BaseModel + + +def dictify(data: "BaseModel") -> Dict[str, Any]: + try: # pydantic v2 + return data.model_dump(exclude_unset=True) + except AttributeError: # pydantic v1 + return data.dict(exclude_unset=True) + + +def jsonify(data: "BaseModel") -> str: + try: # pydantic v2 + return json.dumps(data.model_dump(exclude_unset=True), ensure_ascii=False) + except AttributeError: # pydantic v1 + return data.json(exclude_unset=True, ensure_ascii=False) diff --git a/src/llmtuner/api/protocol.py b/src/llmtuner/api/protocol.py index ece2132b..ae6e2e9b 100644 --- a/src/llmtuner/api/protocol.py +++ b/src/llmtuner/api/protocol.py @@ -51,7 +51,7 @@ class FunctionAvailable(BaseModel): class FunctionCall(BaseModel): - id: Literal["call_default"] = "call_default" + id: str type: Literal["function"] = "function" function: Function @@ -86,7 +86,7 @@ class ChatCompletionResponseChoice(BaseModel): finish_reason: Finish -class ChatCompletionResponseStreamChoice(BaseModel): +class ChatCompletionStreamResponseChoice(BaseModel): index: int delta: ChatCompletionMessage finish_reason: Optional[Finish] = None @@ -99,7 +99,7 @@ class ChatCompletionResponseUsage(BaseModel): class ChatCompletionResponse(BaseModel): - id: Literal["chatcmpl-default"] = "chatcmpl-default" + id: str object: Literal["chat.completion"] = "chat.completion" created: int = Field(default_factory=lambda: int(time.time())) model: str @@ -108,11 +108,11 @@ class ChatCompletionResponse(BaseModel): class ChatCompletionStreamResponse(BaseModel): - id: Literal["chatcmpl-default"] = "chatcmpl-default" + id: str object: Literal["chat.completion.chunk"] = "chat.completion.chunk" created: int = Field(default_factory=lambda: int(time.time())) model: str - choices: List[ChatCompletionResponseStreamChoice] + choices: List[ChatCompletionStreamResponseChoice] class ScoreEvaluationRequest(BaseModel): @@ -122,7 +122,7 @@ class ScoreEvaluationRequest(BaseModel): class ScoreEvaluationResponse(BaseModel): - id: Literal["scoreeval-default"] = "scoreeval-default" + id: str object: Literal["score.evaluation"] = "score.evaluation" model: str scores: List[float] diff --git a/src/llmtuner/chat/chat_model.py b/src/llmtuner/chat/chat_model.py index 97ae87d7..281ef0c1 100644 --- a/src/llmtuner/chat/chat_model.py +++ b/src/llmtuner/chat/chat_model.py @@ -98,7 +98,7 @@ class ChatModel: return await self.engine.get_scores(batch_input, **input_kwargs) -def run_chat(): +def run_chat() -> None: try: import platform diff --git a/src/llmtuner/eval/evaluator.py b/src/llmtuner/eval/evaluator.py index 4ea134c6..192f4815 100644 --- a/src/llmtuner/eval/evaluator.py +++ b/src/llmtuner/eval/evaluator.py @@ -118,6 +118,5 @@ class Evaluator: f.write(score_info) -def run_eval(): - evaluator = Evaluator() - evaluator.eval() +def run_eval() -> None: + Evaluator().eval() diff --git a/src/llmtuner/extras/callbacks.py b/src/llmtuner/extras/callbacks.py index a07c7059..a142928a 100644 --- a/src/llmtuner/extras/callbacks.py +++ b/src/llmtuner/extras/callbacks.py @@ -2,6 +2,7 @@ import json import logging import os import signal +import sys import time from concurrent.futures import ThreadPoolExecutor from datetime import timedelta @@ -91,6 +92,18 @@ class LogCallback(TrainerCallback): self.thread_pool.shutdown(wait=True) self.thread_pool = None + def on_init_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): + r""" + Event called at the end of the initialization of the `Trainer`. + """ + if ( + args.should_save + and os.path.exists(os.path.join(args.output_dir, TRAINER_LOG)) + and args.overwrite_output_dir + ): + logger.warning("Previous trainer log in this folder will be deleted.") + os.remove(os.path.join(args.output_dir, TRAINER_LOG)) + def on_train_begin(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): r""" Event called at the beginning of training. @@ -100,14 +113,6 @@ class LogCallback(TrainerCallback): self._reset(max_steps=state.max_steps) self._create_thread_pool(output_dir=args.output_dir) - if ( - args.should_save - and os.path.exists(os.path.join(args.output_dir, TRAINER_LOG)) - and args.overwrite_output_dir - ): - logger.warning("Previous trainer log in this folder will be deleted.") - os.remove(os.path.join(args.output_dir, TRAINER_LOG)) - def on_train_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): r""" Event called at the end of training. @@ -126,9 +131,6 @@ class LogCallback(TrainerCallback): r""" Event called at the end of a training step. """ - if args.should_save: - self._timing(cur_steps=state.global_step) - if self.aborted: control.should_epoch_stop = True control.should_training_stop = True @@ -152,6 +154,7 @@ class LogCallback(TrainerCallback): if not args.should_save: return + self._timing(cur_steps=state.global_step) logs = dict( current_steps=self.cur_steps, total_steps=self.max_steps, @@ -183,8 +186,17 @@ class LogCallback(TrainerCallback): r""" Event called after a prediction step. """ + if self.do_train: + return + + if self.aborted: + sys.exit(0) + + if not args.should_save: + return + eval_dataloader = kwargs.pop("eval_dataloader", None) - if args.should_save and has_length(eval_dataloader) and not self.do_train: + if has_length(eval_dataloader): if self.max_steps == 0: self._reset(max_steps=len(eval_dataloader)) self._create_thread_pool(output_dir=args.output_dir) diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index 6822ffb5..e1a997c1 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -23,7 +23,7 @@ if TYPE_CHECKING: logger = get_logger(__name__) -def run_exp(args: Optional[Dict[str, Any]] = None, callbacks: List["TrainerCallback"] = []): +def run_exp(args: Optional[Dict[str, Any]] = None, callbacks: List["TrainerCallback"] = []) -> None: model_args, data_args, training_args, finetuning_args, generating_args = get_train_args(args) callbacks.append(LogCallback(training_args.output_dir)) @@ -43,7 +43,7 @@ def run_exp(args: Optional[Dict[str, Any]] = None, callbacks: List["TrainerCallb raise ValueError("Unknown task.") -def export_model(args: Optional[Dict[str, Any]] = None): +def export_model(args: Optional[Dict[str, Any]] = None) -> None: model_args, data_args, finetuning_args, _ = get_infer_args(args) if model_args.export_dir is None: diff --git a/src/llmtuner/webui/components/eval.py b/src/llmtuner/webui/components/eval.py index 222f9314..60e22bb7 100644 --- a/src/llmtuner/webui/components/eval.py +++ b/src/llmtuner/webui/components/eval.py @@ -48,6 +48,7 @@ def create_eval_tab(engine: "Engine") -> Dict[str, "Component"]: with gr.Row(): cmd_preview_btn = gr.Button() start_btn = gr.Button(variant="primary") + stop_btn = gr.Button(variant="stop") with gr.Row(): resume_btn = gr.Checkbox(visible=False, interactive=False) @@ -61,6 +62,7 @@ def create_eval_tab(engine: "Engine") -> Dict[str, "Component"]: dict( cmd_preview_btn=cmd_preview_btn, start_btn=start_btn, + stop_btn=stop_btn, resume_btn=resume_btn, progress_bar=progress_bar, output_box=output_box, @@ -69,6 +71,7 @@ def create_eval_tab(engine: "Engine") -> Dict[str, "Component"]: cmd_preview_btn.click(engine.runner.preview_eval, input_elems, output_elems, concurrency_limit=None) start_btn.click(engine.runner.run_eval, input_elems, output_elems) + stop_btn.click(engine.runner.set_abort) resume_btn.change(engine.runner.monitor, outputs=output_elems, concurrency_limit=None) dataset_dir.change(list_dataset, [dataset_dir], [dataset], queue=False) diff --git a/src/llmtuner/webui/interface.py b/src/llmtuner/webui/interface.py index 459802f2..b293db90 100644 --- a/src/llmtuner/webui/interface.py +++ b/src/llmtuner/webui/interface.py @@ -68,9 +68,9 @@ def create_web_demo() -> gr.Blocks: return demo -def run_web_ui(): +def run_web_ui() -> None: create_ui().queue().launch() -def run_web_demo(): +def run_web_demo() -> None: create_web_demo().queue().launch() diff --git a/src/llmtuner/webui/locales.py b/src/llmtuner/webui/locales.py index 1c474f34..5bf925b7 100644 --- a/src/llmtuner/webui/locales.py +++ b/src/llmtuner/webui/locales.py @@ -1449,7 +1449,7 @@ ALERTS = { "info_aborting": { "en": "Aborted, wait for terminating...", "ru": "Прервано, ожидание завершения...", - "zh": "训练中断,正在等待线程结束……", + "zh": "训练中断,正在等待进程结束……", }, "info_aborted": { "en": "Ready.", From 9381fecca708eb7b251e59204d8471bf82542885 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sat, 4 May 2024 16:11:18 +0800 Subject: [PATCH 22/32] fix async stream api response Former-commit-id: d70bbcae6513e50aa6094f2d98c4aa5c6641ea02 --- src/llmtuner/api/chat.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/llmtuner/api/chat.py b/src/llmtuner/api/chat.py index c9c00f16..716dec56 100644 --- a/src/llmtuner/api/chat.py +++ b/src/llmtuner/api/chat.py @@ -38,7 +38,7 @@ ROLE_MAPPING = { } -async def _process_request(request: "ChatCompletionRequest") -> Tuple[List[Dict[str, str]], str, str]: +def _process_request(request: "ChatCompletionRequest") -> Tuple[List[Dict[str, str]], str, str]: if len(request.messages) == 0: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid length") @@ -77,11 +77,23 @@ async def _process_request(request: "ChatCompletionRequest") -> Tuple[List[Dict[ return input_messages, system, tools +def _create_stream_chat_completion_chunk( + completion_id: str, + model: str, + delta: "ChatCompletionMessage", + index: Optional[int] = 0, + finish_reason: Optional["Finish"] = None, +) -> str: + choice_data = ChatCompletionStreamResponseChoice(index=index, delta=delta, finish_reason=finish_reason) + chunk = ChatCompletionStreamResponse(id=completion_id, model=model, choices=[choice_data]) + return jsonify(chunk) + + async def create_chat_completion_response( request: "ChatCompletionRequest", chat_model: "ChatModel" ) -> "ChatCompletionResponse": completion_id = "chatcmpl-{}".format(uuid.uuid4().hex) - input_messages, system, tools = await _process_request(request) + input_messages, system, tools = _process_request(request) responses = await chat_model.achat( input_messages, system, @@ -124,23 +136,11 @@ async def create_chat_completion_response( return ChatCompletionResponse(id=completion_id, model=request.model, choices=choices, usage=usage) -async def _create_stream_chat_completion_chunk( - completion_id: str, - model: str, - delta: "ChatCompletionMessage", - index: Optional[int] = 0, - finish_reason: Optional["Finish"] = None, -) -> str: - choice_data = ChatCompletionStreamResponseChoice(index=index, delta=delta, finish_reason=finish_reason) - chunk = ChatCompletionStreamResponse(id=completion_id, model=model, choices=[choice_data]) - return jsonify(chunk) - - async def create_stream_chat_completion_response( request: "ChatCompletionRequest", chat_model: "ChatModel" ) -> AsyncGenerator[str, None]: completion_id = "chatcmpl-{}".format(uuid.uuid4().hex) - input_messages, system, tools = await _process_request(request) + input_messages, system, tools = _process_request(request) if tools: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Cannot stream function calls.") From e9fe8815bece9384ae13b0a2f4916368ef3e8aa8 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sat, 4 May 2024 16:13:52 +0800 Subject: [PATCH 23/32] remove empty stream response Former-commit-id: 070d0da928b1e974a094279a2782201016d2a3ab --- src/llmtuner/api/chat.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/llmtuner/api/chat.py b/src/llmtuner/api/chat.py index 716dec56..fa2f0d03 100644 --- a/src/llmtuner/api/chat.py +++ b/src/llmtuner/api/chat.py @@ -156,9 +156,10 @@ async def create_stream_chat_completion_response( top_p=request.top_p, max_new_tokens=request.max_tokens, ): - yield _create_stream_chat_completion_chunk( - completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(content=new_token) - ) + if len(new_token) != 0: + yield _create_stream_chat_completion_chunk( + completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(content=new_token) + ) yield _create_stream_chat_completion_chunk( completion_id=completion_id, model=request.model, delta=ChatCompletionMessage(), finish_reason=Finish.STOP From 6eda42eb7c0904b7bead943a50d24e5fa026fa91 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sat, 4 May 2024 17:01:21 +0800 Subject: [PATCH 24/32] update readme Former-commit-id: eaf83847ef6d89d8b70429138e73b04fd2aa3ef8 --- README.md | 2 +- README_zh.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 45732220..347ebe7e 100644 --- a/README.md +++ b/README.md @@ -339,7 +339,7 @@ To enable FlashAttention-2 on the Windows platform, you need to install the prec ### Train with LLaMA Board GUI (powered by [Gradio](https://github.com/gradio-app/gradio)) > [!IMPORTANT] -> LLaMA Board GUI only supports training on a single GPU, please use [CLI](#command-line-interface) for distributed training. +> LLaMA Board GUI only supports training on a single GPU, please use [CLI](#train-with-command-line-interface) for distributed training. #### Use local environment diff --git a/README_zh.md b/README_zh.md index 4db1f843..8a2fb79b 100644 --- a/README_zh.md +++ b/README_zh.md @@ -339,7 +339,7 @@ pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/downl ### 利用 LLaMA Board 可视化界面训练(由 [Gradio](https://github.com/gradio-app/gradio) 驱动) > [!IMPORTANT] -> LLaMA Board 可视化界面目前仅支持单 GPU 训练,请使用[命令行接口](#命令行接口)来进行多 GPU 分布式训练。 +> LLaMA Board 可视化界面目前仅支持单 GPU 训练,请使用[命令行接口](#利用命令行接口训练)来进行多 GPU 分布式训练。 #### 使用本地环境 From 342d7da8d73bfddef5a60e9a6236db9fc6c0c28c Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sat, 4 May 2024 22:02:25 +0800 Subject: [PATCH 25/32] add cal_ppl script Former-commit-id: 947068c11c0be00db2cecddb2c5842a0d6e2c321 --- scripts/cal_flops.py | 12 +++---- scripts/cal_lr.py | 17 ++++------ scripts/cal_ppl.py | 79 +++++++++++++++++++++++++++++++++++++++++++ scripts/length_cdf.py | 9 +++-- 4 files changed, 95 insertions(+), 22 deletions(-) create mode 100644 scripts/cal_ppl.py diff --git a/scripts/cal_flops.py b/scripts/cal_flops.py index 35d98254..19414ce5 100644 --- a/scripts/cal_flops.py +++ b/scripts/cal_flops.py @@ -3,24 +3,22 @@ # Usage: python cal_flops.py --model_name_or_path path_to_model --batch_size 1 --seq_length 512 # Inspired by: https://www.deepspeed.ai/tutorials/flops-profiler/ -from typing import Optional - import fire import torch from deepspeed.accelerator import get_accelerator # type: ignore from deepspeed.profiling.flops_profiler import get_model_profile # type: ignore -from llmtuner import ChatModel +from llmtuner.chat import ChatModel def calculate_flops( model_name_or_path: str, - batch_size: Optional[int] = 1, - seq_length: Optional[int] = 256, - flash_attn: Optional[bool] = False, + batch_size: int = 1, + seq_length: int = 256, + flash_attn: str = "auto", ): with get_accelerator().device(0): - chat_model = ChatModel(dict(model_name_or_path=model_name_or_path, template="vanilla", flash_attn=flash_attn)) + chat_model = ChatModel(dict(model_name_or_path=model_name_or_path, template="empty", flash_attn=flash_attn)) fake_input = torch.ones((batch_size, seq_length), dtype=torch.long, device=chat_model.model.device) input_dict = {"input_ids": fake_input, "labels": fake_input.clone()} flops, macs, params = get_model_profile(chat_model.model, kwargs=input_dict, print_profile=True, detailed=True) diff --git a/scripts/cal_lr.py b/scripts/cal_lr.py index c1c1f7a2..7bf8839d 100644 --- a/scripts/cal_lr.py +++ b/scripts/cal_lr.py @@ -4,7 +4,6 @@ # Inspired by: https://github.com/imoneoi/openchat/blob/master/ochat/training_deepspeed/train.py import math -from typing import Optional import fire import torch @@ -25,12 +24,12 @@ BASE_BS = 4_000_000 # from llama paper def calculate_lr( model_name_or_path: str, batch_size: int, # total batch size, namely (batch size * gradient accumulation * world size) - stage: Optional[str] = "sft", - dataset: Optional[str] = "alpaca_en", - dataset_dir: Optional[str] = "data", - template: Optional[str] = "default", - cutoff_len: Optional[int] = 1024, # i.e. maximum input length during training - is_mistral: Optional[bool] = False, # mistral model uses a smaller learning rate, + stage: str = "sft", + dataset: str = "alpaca_en", + dataset_dir: str = "data", + template: str = "default", + cutoff_len: int = 1024, # i.e. maximum input length during training + is_mistral: bool = False, # mistral model uses a smaller learning rate, ): model_args, data_args, training_args, _, _ = get_train_args( dict( @@ -54,9 +53,7 @@ def calculate_lr( else: raise NotImplementedError - dataloader = DataLoader( - dataset=trainset, batch_size=batch_size, shuffle=True, collate_fn=data_collator, pin_memory=True - ) + dataloader = DataLoader(trainset, batch_size, shuffle=False, collate_fn=data_collator, pin_memory=True) valid_tokens, total_tokens = 0, 0 for batch in tqdm(dataloader): valid_tokens += torch.sum(batch["labels"] != IGNORE_INDEX).item() diff --git a/scripts/cal_ppl.py b/scripts/cal_ppl.py new file mode 100644 index 00000000..bdfc210b --- /dev/null +++ b/scripts/cal_ppl.py @@ -0,0 +1,79 @@ +# coding=utf-8 +# Calculates the ppl of pre-trained models. +# Usage: python cal_flops.py --model_name_or_path path_to_model --batch_size 1 --seq_length 512 + +import json +from typing import Dict + +import fire +import torch +from torch.utils.data import DataLoader +from tqdm import tqdm +from transformers import DataCollatorForLanguageModeling, DataCollatorForSeq2Seq + +from llmtuner.data import get_dataset +from llmtuner.extras.constants import IGNORE_INDEX +from llmtuner.hparams import get_train_args +from llmtuner.model import load_model, load_tokenizer + + +def cal_ppl( + model_name_or_path: str, + batch_size: int = 4, + stage: str = "sft", + dataset: str = "alpaca_en", + dataset_dir: str = "data", + template: str = "default", + cutoff_len: int = 1024, + train_on_prompt: bool = False, +): + model_args, data_args, training_args, finetuning_args, _ = get_train_args( + dict( + stage=stage, + model_name_or_path=model_name_or_path, + dataset=dataset, + dataset_dir=dataset_dir, + template=template, + cutoff_len=cutoff_len, + train_on_prompt=train_on_prompt, + output_dir="dummy_dir", + overwrite_cache=True, + ) + ) + tokenizer_module = load_tokenizer(model_args) + tokenizer = tokenizer_module["tokenizer"] + trainset = get_dataset(model_args, data_args, training_args, stage, **tokenizer_module) + model = load_model(tokenizer, model_args, finetuning_args, is_trainable=False) + if stage == "pt": + data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) + elif stage == "sft": + data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, label_pad_token_id=IGNORE_INDEX) + else: + raise NotImplementedError + + dataloader = DataLoader(trainset, batch_size, shuffle=False, collate_fn=data_collator, pin_memory=True) + criterion = torch.nn.CrossEntropyLoss(reduction="none") + perplexities = [] + batch: Dict[str, "torch.Tensor"] + with torch.no_grad(): + for batch in tqdm(dataloader): + batch = batch.to(model.device) + outputs = model(**batch) + shift_logits: "torch.Tensor" = outputs["logits"][..., :-1, :] + shift_labels: "torch.Tensor" = batch["labels"][..., 1:] + loss_mask = shift_labels != IGNORE_INDEX + flatten_logits = shift_logits.contiguous().view(shift_labels.size(0) * shift_labels.size(1), -1) + flatten_labels = shift_labels.contiguous().view(-1) + token_logps: "torch.Tensor" = criterion(flatten_logits, flatten_labels) + token_logps = token_logps.contiguous().view(shift_logits.size(0), -1) + sentence_logps = (token_logps * loss_mask).sum(-1) / loss_mask.sum(-1) + perplexities.extend(sentence_logps.exp().tolist()) + + with open("ppl.json", "w", encoding="utf-8") as f: + json.dump(perplexities, f, indent=2) + + print("Perplexities have been saved at ppl.json.") + + +if __name__ == "__main__": + fire.Fire(cal_ppl) diff --git a/scripts/length_cdf.py b/scripts/length_cdf.py index 1446f77a..da41a942 100644 --- a/scripts/length_cdf.py +++ b/scripts/length_cdf.py @@ -3,7 +3,6 @@ # Usage: python length_cdf.py --model_name_or_path path_to_model --dataset alpaca_en --template default from collections import defaultdict -from typing import Optional import fire from tqdm import tqdm @@ -15,10 +14,10 @@ from llmtuner.model import load_tokenizer def length_cdf( model_name_or_path: str, - dataset: Optional[str] = "alpaca_en", - dataset_dir: Optional[str] = "data", - template: Optional[str] = "default", - interval: Optional[int] = 1000, + dataset: str = "alpaca_en", + dataset_dir: str = "data", + template: str = "default", + interval: int = 1000, ): model_args, data_args, training_args, _, _ = get_train_args( dict( From 68ed89f351568ea5c13db47ff0d7e25f13afc9fb Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sat, 4 May 2024 22:13:14 +0800 Subject: [PATCH 26/32] update ppl script Former-commit-id: 07606fa4ab303f088170a569c1f86141a1b496c5 --- scripts/cal_ppl.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/scripts/cal_ppl.py b/scripts/cal_ppl.py index bdfc210b..6c8c6174 100644 --- a/scripts/cal_ppl.py +++ b/scripts/cal_ppl.py @@ -1,6 +1,6 @@ # coding=utf-8 -# Calculates the ppl of pre-trained models. -# Usage: python cal_flops.py --model_name_or_path path_to_model --batch_size 1 --seq_length 512 +# Calculates the ppl on the dataset of the pre-trained models. +# Usage: python cal_ppl.py --model_name_or_path path_to_model --save_name ppl.json import json from typing import Dict @@ -19,6 +19,7 @@ from llmtuner.model import load_model, load_tokenizer def cal_ppl( model_name_or_path: str, + save_name: str, batch_size: int = 4, stage: str = "sft", dataset: str = "alpaca_en", @@ -69,10 +70,10 @@ def cal_ppl( sentence_logps = (token_logps * loss_mask).sum(-1) / loss_mask.sum(-1) perplexities.extend(sentence_logps.exp().tolist()) - with open("ppl.json", "w", encoding="utf-8") as f: + with open(save_name, "w", encoding="utf-8") as f: json.dump(perplexities, f, indent=2) - print("Perplexities have been saved at ppl.json.") + print("Perplexities have been saved at {}.".format(save_name)) if __name__ == "__main__": From 9b187b274c39aa5cea47fd99fcb9d6919c530309 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sat, 4 May 2024 22:35:31 +0800 Subject: [PATCH 27/32] add avg ppl Former-commit-id: 40caeb6f0fdf76a1e2c9ca3761299d087fc643e0 --- scripts/cal_ppl.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/cal_ppl.py b/scripts/cal_ppl.py index 6c8c6174..06c2a43b 100644 --- a/scripts/cal_ppl.py +++ b/scripts/cal_ppl.py @@ -54,6 +54,7 @@ def cal_ppl( dataloader = DataLoader(trainset, batch_size, shuffle=False, collate_fn=data_collator, pin_memory=True) criterion = torch.nn.CrossEntropyLoss(reduction="none") + total_ppl = 0 perplexities = [] batch: Dict[str, "torch.Tensor"] with torch.no_grad(): @@ -68,11 +69,13 @@ def cal_ppl( token_logps: "torch.Tensor" = criterion(flatten_logits, flatten_labels) token_logps = token_logps.contiguous().view(shift_logits.size(0), -1) sentence_logps = (token_logps * loss_mask).sum(-1) / loss_mask.sum(-1) + total_ppl += sentence_logps.exp().sum().item() perplexities.extend(sentence_logps.exp().tolist()) with open(save_name, "w", encoding="utf-8") as f: json.dump(perplexities, f, indent=2) + print("Average perplexity is {:.2f}".format(total_ppl / len(perplexities))) print("Perplexities have been saved at {}.".format(save_name)) From f9aa74715aa0876cedf0a6825cdc2f6de9e74de3 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sat, 4 May 2024 23:05:17 +0800 Subject: [PATCH 28/32] update scripts Former-commit-id: 1c07648c4bb4bb0c46bc0240547b46bd2835dce1 --- scripts/cal_lr.py | 3 ++- scripts/cal_ppl.py | 35 +++++++++++++++++++++++++++++++++-- 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/scripts/cal_lr.py b/scripts/cal_lr.py index 7bf8839d..dd864162 100644 --- a/scripts/cal_lr.py +++ b/scripts/cal_lr.py @@ -4,6 +4,7 @@ # Inspired by: https://github.com/imoneoi/openchat/blob/master/ochat/training_deepspeed/train.py import math +from typing import Literal import fire import torch @@ -24,7 +25,7 @@ BASE_BS = 4_000_000 # from llama paper def calculate_lr( model_name_or_path: str, batch_size: int, # total batch size, namely (batch size * gradient accumulation * world size) - stage: str = "sft", + stage: Literal["pt", "sft"] = "sft", dataset: str = "alpaca_en", dataset_dir: str = "data", template: str = "default", diff --git a/scripts/cal_ppl.py b/scripts/cal_ppl.py index 06c2a43b..2e74c70a 100644 --- a/scripts/cal_ppl.py +++ b/scripts/cal_ppl.py @@ -3,7 +3,8 @@ # Usage: python cal_ppl.py --model_name_or_path path_to_model --save_name ppl.json import json -from typing import Dict +from dataclasses import dataclass +from typing import Any, Dict, Literal, Sequence import fire import torch @@ -17,11 +18,37 @@ from llmtuner.hparams import get_train_args from llmtuner.model import load_model, load_tokenizer +@dataclass +class PairwiseDataCollatorWithPadding(DataCollatorForSeq2Seq): + r""" + Data collator for pairwise data. + """ + + train_on_prompt: bool = False + + def __call__(self, features: Sequence[Dict[str, Any]]) -> Dict[str, torch.Tensor]: + r""" + Pads batched data to the longest sequence in the batch. + + We generate 2 * n examples where the first n examples represent chosen examples and + the last n examples represent rejected examples. + """ + chosen_features = [] + for feature in features: + prompt_len, answer_len = len(feature["prompt_ids"]), len(feature["chosen_ids"]) + input_ids = feature["prompt_ids"] + feature["chosen_ids"] + attention_mask = [1] * (prompt_len + answer_len) + labels = input_ids if self.train_on_prompt else [IGNORE_INDEX] * prompt_len + feature["chosen_ids"] + chosen_features.append({"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels}) + + return super().__call__(chosen_features) + + def cal_ppl( model_name_or_path: str, save_name: str, batch_size: int = 4, - stage: str = "sft", + stage: Literal["pt", "sft", "rm"] = "sft", dataset: str = "alpaca_en", dataset_dir: str = "data", template: str = "default", @@ -49,6 +76,10 @@ def cal_ppl( data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) elif stage == "sft": data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, label_pad_token_id=IGNORE_INDEX) + elif stage == "rm": + data_collator = PairwiseDataCollatorWithPadding( + tokenizer=tokenizer, label_pad_token_id=IGNORE_INDEX, train_on_prompt=train_on_prompt + ) else: raise NotImplementedError From 7ef3788ff4ebea4df3ef4424e65ac0f6e51fe77a Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sun, 5 May 2024 00:17:54 +0800 Subject: [PATCH 29/32] update webui Former-commit-id: 17a53d25cdadd2df70a8afa0488f75bbf1918b89 --- scripts/cal_ppl.py | 4 ++- src/llmtuner/webui/components/chatbot.py | 6 ++-- src/llmtuner/webui/components/eval.py | 10 +++--- src/llmtuner/webui/components/export.py | 2 +- src/llmtuner/webui/components/train.py | 42 ++++++++++++------------ 5 files changed, 33 insertions(+), 31 deletions(-) diff --git a/scripts/cal_ppl.py b/scripts/cal_ppl.py index 2e74c70a..9eebc57d 100644 --- a/scripts/cal_ppl.py +++ b/scripts/cal_ppl.py @@ -4,7 +4,7 @@ import json from dataclasses import dataclass -from typing import Any, Dict, Literal, Sequence +from typing import Any, Dict, Literal, Optional, Sequence import fire import torch @@ -53,6 +53,7 @@ def cal_ppl( dataset_dir: str = "data", template: str = "default", cutoff_len: int = 1024, + max_samples: Optional[int] = None, train_on_prompt: bool = False, ): model_args, data_args, training_args, finetuning_args, _ = get_train_args( @@ -63,6 +64,7 @@ def cal_ppl( dataset_dir=dataset_dir, template=template, cutoff_len=cutoff_len, + max_samples=max_samples, train_on_prompt=train_on_prompt, output_dir="dummy_dir", overwrite_cache=True, diff --git a/src/llmtuner/webui/components/chatbot.py b/src/llmtuner/webui/components/chatbot.py index 0a55460c..f83694b1 100644 --- a/src/llmtuner/webui/components/chatbot.py +++ b/src/llmtuner/webui/components/chatbot.py @@ -36,9 +36,9 @@ def create_chat_box( submit_btn = gr.Button(variant="primary") with gr.Column(scale=1): - max_new_tokens = gr.Slider(8, 4096, value=512, step=1) - top_p = gr.Slider(0.01, 1.0, value=0.7, step=0.01) - temperature = gr.Slider(0.01, 1.5, value=0.95, step=0.01) + max_new_tokens = gr.Slider(minimum=8, maximum=4096, value=512, step=1) + top_p = gr.Slider(minimum=0.01, maximum=1.0, value=0.7, step=0.01) + temperature = gr.Slider(minimum=0.01, maximum=1.5, value=0.95, step=0.01) clear_btn = gr.Button() tools.input(check_json_schema, inputs=[tools, engine.manager.get_elem_by_id("top.lang")]) diff --git a/src/llmtuner/webui/components/eval.py b/src/llmtuner/webui/components/eval.py index 60e22bb7..8b70283b 100644 --- a/src/llmtuner/webui/components/eval.py +++ b/src/llmtuner/webui/components/eval.py @@ -28,18 +28,18 @@ def create_eval_tab(engine: "Engine") -> Dict[str, "Component"]: elem_dict.update(dict(dataset_dir=dataset_dir, dataset=dataset, **preview_elems)) with gr.Row(): - cutoff_len = gr.Slider(value=1024, minimum=4, maximum=65536, step=1) + cutoff_len = gr.Slider(minimum=4, maximum=65536, value=1024, step=1) max_samples = gr.Textbox(value="100000") - batch_size = gr.Slider(value=2, minimum=1, maximum=1024, step=1) + batch_size = gr.Slider(minimum=1, maximum=1024, value=2, step=1) predict = gr.Checkbox(value=True) input_elems.update({cutoff_len, max_samples, batch_size, predict}) elem_dict.update(dict(cutoff_len=cutoff_len, max_samples=max_samples, batch_size=batch_size, predict=predict)) with gr.Row(): - max_new_tokens = gr.Slider(10, 2048, value=128, step=1) - top_p = gr.Slider(0.01, 1, value=0.7, step=0.01) - temperature = gr.Slider(0.01, 1.5, value=0.95, step=0.01) + max_new_tokens = gr.Slider(minimum=8, maximum=4096, value=512, step=1) + top_p = gr.Slider(minimum=0.01, maximum=1, value=0.7, step=0.01) + temperature = gr.Slider(minimum=0.01, maximum=1.5, value=0.95, step=0.01) output_dir = gr.Textbox() input_elems.update({max_new_tokens, top_p, temperature, output_dir}) diff --git a/src/llmtuner/webui/components/export.py b/src/llmtuner/webui/components/export.py index 64273882..134b77e0 100644 --- a/src/llmtuner/webui/components/export.py +++ b/src/llmtuner/webui/components/export.py @@ -85,7 +85,7 @@ def save_model( def create_export_tab(engine: "Engine") -> Dict[str, "Component"]: with gr.Row(): - export_size = gr.Slider(value=1, minimum=1, maximum=100, step=1) + export_size = gr.Slider(minimum=1, maximum=100, value=1, step=1) export_quantization_bit = gr.Dropdown(choices=["none", "8", "4", "3", "2"], value="none") export_quantization_dataset = gr.Textbox(value="data/c4_demo.json") export_device = gr.Radio(choices=["cpu", "cuda"], value="cpu") diff --git a/src/llmtuner/webui/components/train.py b/src/llmtuner/webui/components/train.py index 857c56ac..5cde660c 100644 --- a/src/llmtuner/webui/components/train.py +++ b/src/llmtuner/webui/components/train.py @@ -52,10 +52,10 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]: ) with gr.Row(): - cutoff_len = gr.Slider(value=1024, minimum=4, maximum=65536, step=1) - batch_size = gr.Slider(value=2, minimum=1, maximum=1024, step=1) - gradient_accumulation_steps = gr.Slider(value=8, minimum=1, maximum=1024, step=1) - val_size = gr.Slider(value=0, minimum=0, maximum=1, step=0.001) + cutoff_len = gr.Slider(minimum=4, maximum=65536, value=1024, step=1) + batch_size = gr.Slider(minimum=1, maximum=1024, value=2, step=1) + gradient_accumulation_steps = gr.Slider(minimum=1, maximum=1024, value=8, step=1) + val_size = gr.Slider(minimum=0, maximum=1, value=0, step=0.001) lr_scheduler_type = gr.Dropdown(choices=[scheduler.value for scheduler in SchedulerType], value="cosine") input_elems.update({cutoff_len, batch_size, gradient_accumulation_steps, val_size, lr_scheduler_type}) @@ -71,10 +71,10 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]: with gr.Accordion(open=False) as extra_tab: with gr.Row(): - logging_steps = gr.Slider(value=5, minimum=5, maximum=1000, step=5) - save_steps = gr.Slider(value=100, minimum=10, maximum=5000, step=10) - warmup_steps = gr.Slider(value=0, minimum=0, maximum=5000, step=1) - neftune_alpha = gr.Slider(value=0, minimum=0, maximum=10, step=0.1) + logging_steps = gr.Slider(minimum=1, maximum=1000, value=5, step=5) + save_steps = gr.Slider(minimum=10, maximum=5000, value=100, step=10) + warmup_steps = gr.Slider(minimum=0, maximum=5000, value=0, step=1) + neftune_alpha = gr.Slider(minimum=0, maximum=10, value=0, step=0.1) optim = gr.Textbox(value="adamw_torch") with gr.Row(): @@ -124,7 +124,7 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]: with gr.Accordion(open=False) as freeze_tab: with gr.Row(): - num_layer_trainable = gr.Slider(value=3, minimum=1, maximum=128, step=1) + num_layer_trainable = gr.Slider(minimum=1, maximum=128, value=2, step=1) name_module_trainable = gr.Textbox(value="all") input_elems.update({num_layer_trainable, name_module_trainable}) @@ -136,10 +136,10 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]: with gr.Accordion(open=False) as lora_tab: with gr.Row(): - lora_rank = gr.Slider(value=8, minimum=1, maximum=1024, step=1) - lora_alpha = gr.Slider(value=16, minimum=1, maximum=2048, step=1) - lora_dropout = gr.Slider(value=0, minimum=0, maximum=1, step=0.01) - loraplus_lr_ratio = gr.Slider(value=0, minimum=0, maximum=64, step=0.01) + lora_rank = gr.Slider(minimum=1, maximum=1024, value=8, step=1) + lora_alpha = gr.Slider(minimum=1, maximum=2048, value=16, step=1) + lora_dropout = gr.Slider(minimum=0, maximum=1, value=0, step=0.01) + loraplus_lr_ratio = gr.Slider(minimum=0, maximum=64, value=0, step=0.01) create_new_adapter = gr.Checkbox() with gr.Row(): @@ -180,9 +180,9 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]: with gr.Accordion(open=False) as rlhf_tab: with gr.Row(): - dpo_beta = gr.Slider(value=0.1, minimum=0, maximum=1, step=0.01) - dpo_ftx = gr.Slider(value=0, minimum=0, maximum=10, step=0.01) - orpo_beta = gr.Slider(value=0.1, minimum=0, maximum=1, step=0.01) + dpo_beta = gr.Slider(minimum=0, maximum=1, value=0.1, step=0.01) + dpo_ftx = gr.Slider(minimum=0, maximum=10, value=0, step=0.01) + orpo_beta = gr.Slider(minimum=0, maximum=1, value=0.1, step=0.01) reward_model = gr.Dropdown(multiselect=True, allow_custom_value=True) input_elems.update({dpo_beta, dpo_ftx, orpo_beta, reward_model}) @@ -193,9 +193,9 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]: with gr.Accordion(open=False) as galore_tab: with gr.Row(): use_galore = gr.Checkbox() - galore_rank = gr.Slider(value=16, minimum=1, maximum=1024, step=1) - galore_update_interval = gr.Slider(value=200, minimum=1, maximum=1024, step=1) - galore_scale = gr.Slider(value=0.25, minimum=0, maximum=1, step=0.01) + galore_rank = gr.Slider(minimum=1, maximum=1024, value=16, step=1) + galore_update_interval = gr.Slider(minimum=1, maximum=1024, value=200, step=1) + galore_scale = gr.Slider(minimum=0, maximum=1, value=0.25, step=0.01) galore_target = gr.Textbox(value="all") input_elems.update({use_galore, galore_rank, galore_update_interval, galore_scale, galore_target}) @@ -215,8 +215,8 @@ def create_train_tab(engine: "Engine") -> Dict[str, "Component"]: use_badam = gr.Checkbox() badam_mode = gr.Dropdown(choices=["layer", "ratio"], value="layer") badam_switch_mode = gr.Dropdown(choices=["ascending", "descending", "random", "fixed"], value="ascending") - badam_switch_interval = gr.Slider(value=50, minimum=1, maximum=1024, step=1) - badam_update_ratio = gr.Slider(value=0.05, minimum=0, maximum=1, step=0.01) + badam_switch_interval = gr.Slider(minimum=1, maximum=1024, value=50, step=1) + badam_update_ratio = gr.Slider(minimum=0, maximum=1, value=0.05, step=0.01) input_elems.update({use_badam, badam_mode, badam_switch_mode, badam_switch_interval, badam_update_ratio}) elem_dict.update( From 2f5f6722cf138c72e69acc73c966ca9c5c200a3e Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sun, 5 May 2024 00:53:07 +0800 Subject: [PATCH 30/32] fix eval scripts Former-commit-id: fc3743d0b82c28fbff1170761139e4fa5d2a8939 --- evaluation/ceval/ceval.py | 14 ++-- evaluation/cmmlu/cmmlu.py | 134 +++++++++++++++++++------------------- evaluation/mmlu/mmlu.py | 12 +--- 3 files changed, 74 insertions(+), 86 deletions(-) diff --git a/evaluation/ceval/ceval.py b/evaluation/ceval/ceval.py index 33005de3..4111d6b4 100644 --- a/evaluation/ceval/ceval.py +++ b/evaluation/ceval/ceval.py @@ -19,7 +19,7 @@ import pandas as pd _CITATION = """\ @article{huang2023ceval, - title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models}, + title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models}, author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian}, journal={arXiv preprint arXiv:2305.08322}, year={2023} @@ -133,25 +133,19 @@ class Ceval(datasets.GeneratorBasedBuilder): datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ - "filepath": os.path.join( - data_dir, "test", f"{task_name}_test.csv" - ), + "filepath": os.path.join(data_dir, "test", f"{task_name}_test.csv"), }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ - "filepath": os.path.join( - data_dir, "val", f"{task_name}_val.csv" - ), + "filepath": os.path.join(data_dir, "val", f"{task_name}_val.csv"), }, ), datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ - "filepath": os.path.join( - data_dir, "dev", f"{task_name}_dev.csv" - ), + "filepath": os.path.join(data_dir, "dev", f"{task_name}_dev.csv"), }, ), ] diff --git a/evaluation/cmmlu/cmmlu.py b/evaluation/cmmlu/cmmlu.py index 62096203..37efb328 100644 --- a/evaluation/cmmlu/cmmlu.py +++ b/evaluation/cmmlu/cmmlu.py @@ -37,73 +37,73 @@ _LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 Internatio _URL = "cmmlu.zip" task_list = [ - 'agronomy', - 'anatomy', - 'ancient_chinese', - 'arts', - 'astronomy', - 'business_ethics', - 'chinese_civil_service_exam', - 'chinese_driving_rule', - 'chinese_food_culture', - 'chinese_foreign_policy', - 'chinese_history', - 'chinese_literature', - 'chinese_teacher_qualification', - 'clinical_knowledge', - 'college_actuarial_science', - 'college_education', - 'college_engineering_hydrology', - 'college_law', - 'college_mathematics', - 'college_medical_statistics', - 'college_medicine', - 'computer_science', - 'computer_security', - 'conceptual_physics', - 'construction_project_management', - 'economics', - 'education', - 'electrical_engineering', - 'elementary_chinese', - 'elementary_commonsense', - 'elementary_information_and_technology', - 'elementary_mathematics', - 'ethnology', - 'food_science', - 'genetics', - 'global_facts', - 'high_school_biology', - 'high_school_chemistry', - 'high_school_geography', - 'high_school_mathematics', - 'high_school_physics', - 'high_school_politics', - 'human_sexuality', - 'international_law', - 'journalism', - 'jurisprudence', - 'legal_and_moral_basis', - 'logical', - 'machine_learning', - 'management', - 'marketing', - 'marxist_theory', - 'modern_chinese', - 'nutrition', - 'philosophy', - 'professional_accounting', - 'professional_law', - 'professional_medicine', - 'professional_psychology', - 'public_relations', - 'security_study', - 'sociology', - 'sports_science', - 'traditional_chinese_medicine', - 'virology', - 'world_history', - 'world_religions', + "agronomy", + "anatomy", + "ancient_chinese", + "arts", + "astronomy", + "business_ethics", + "chinese_civil_service_exam", + "chinese_driving_rule", + "chinese_food_culture", + "chinese_foreign_policy", + "chinese_history", + "chinese_literature", + "chinese_teacher_qualification", + "clinical_knowledge", + "college_actuarial_science", + "college_education", + "college_engineering_hydrology", + "college_law", + "college_mathematics", + "college_medical_statistics", + "college_medicine", + "computer_science", + "computer_security", + "conceptual_physics", + "construction_project_management", + "economics", + "education", + "electrical_engineering", + "elementary_chinese", + "elementary_commonsense", + "elementary_information_and_technology", + "elementary_mathematics", + "ethnology", + "food_science", + "genetics", + "global_facts", + "high_school_biology", + "high_school_chemistry", + "high_school_geography", + "high_school_mathematics", + "high_school_physics", + "high_school_politics", + "human_sexuality", + "international_law", + "journalism", + "jurisprudence", + "legal_and_moral_basis", + "logical", + "machine_learning", + "management", + "marketing", + "marxist_theory", + "modern_chinese", + "nutrition", + "philosophy", + "professional_accounting", + "professional_law", + "professional_medicine", + "professional_psychology", + "public_relations", + "security_study", + "sociology", + "sports_science", + "traditional_chinese_medicine", + "virology", + "world_history", + "world_religions", ] diff --git a/evaluation/mmlu/mmlu.py b/evaluation/mmlu/mmlu.py index 9f1bd101..f3218c38 100644 --- a/evaluation/mmlu/mmlu.py +++ b/evaluation/mmlu/mmlu.py @@ -136,25 +136,19 @@ class MMLU(datasets.GeneratorBasedBuilder): datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ - "filepath": os.path.join( - data_dir, "data", "test", f"{task_name}_test.csv" - ), + "filepath": os.path.join(data_dir, "data", "test", f"{task_name}_test.csv"), }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ - "filepath": os.path.join( - data_dir, "data", "val", f"{task_name}_val.csv" - ), + "filepath": os.path.join(data_dir, "data", "val", f"{task_name}_val.csv"), }, ), datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ - "filepath": os.path.join( - data_dir, "data", "dev", f"{task_name}_dev.csv" - ), + "filepath": os.path.join(data_dir, "data", "dev", f"{task_name}_dev.csv"), }, ), ] From 4674f3baa7d456abe8386f2e7fbd5144acc34b28 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sun, 5 May 2024 02:44:35 +0800 Subject: [PATCH 31/32] add version and help to cli Former-commit-id: f762f2215169b9fe55564d5600b758ddc66f9c9c --- src/llmtuner/cli.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/llmtuner/cli.py b/src/llmtuner/cli.py index 1b5bd658..f2619ab9 100644 --- a/src/llmtuner/cli.py +++ b/src/llmtuner/cli.py @@ -1,6 +1,7 @@ import sys from enum import Enum, unique +from . import __version__ from .api.app import run_api from .chat.chat_model import run_chat from .eval.evaluator import run_eval @@ -8,6 +9,19 @@ from .train.tuner import export_model, run_exp from .webui.interface import run_web_demo, run_web_ui +USAGE = """ +Usage: + llamafactory-cli api -h: launch an API server + llamafactory-cli chat -h: launch a chat interface in CLI + llamafactory-cli eval -h: do evaluation + llamafactory-cli export -h: merge LoRA adapters and export model + llamafactory-cli train -h: do training + llamafactory-cli webchat -h: launch a chat interface in Web UI + llamafactory-cli webui: launch LlamaBoard + llamafactory-cli version: show version info +""" + + @unique class Command(str, Enum): API = "api" @@ -17,6 +31,8 @@ class Command(str, Enum): TRAIN = "train" WEBDEMO = "webchat" WEBUI = "webui" + VERSION = "version" + HELP = "help" def main(): @@ -35,5 +51,9 @@ def main(): run_web_demo() elif command == Command.WEBUI: run_web_ui() + elif command == Command.VERSION: + print("Welcome to LLaMA Factory, version {}".format(__version__)) + elif command == Command.HELP: + print(USAGE) else: raise NotImplementedError("Unknown command: {}".format(command)) From d0597897bf88a123a2fb833368eca1a027d7b70a Mon Sep 17 00:00:00 2001 From: Oscar Date: Sun, 5 May 2024 23:35:19 +0800 Subject: [PATCH 32/32] Fix badam example outdated argument Former-commit-id: 29aa188cc774cb72367f706f1cd4c07bc5a9f241 --- examples/extras/badam/sft.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/extras/badam/sft.sh b/examples/extras/badam/sft.sh index 61167dad..4bcfe9d2 100644 --- a/examples/extras/badam/sft.sh +++ b/examples/extras/badam/sft.sh @@ -10,7 +10,7 @@ CUDA_VISIBLE_DEVICES=0 llamafactory-cli train \ --finetuning_type full \ --use_badam \ --badam_switch_mode descending \ - --badam_switch_block_every 50 \ + --badam_switch_interval 50 \ --badam_verbose 2 \ --output_dir ../../../saves/LLaMA2-7B/badam/sft \ --overwrite_cache \