mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-14 10:56:56 +08:00
refactor dataset_attr, add eos in pt, fix #757
Former-commit-id: a9d1fb72f7
This commit is contained in:
@@ -6,13 +6,13 @@ If you are using a custom dataset, please provide your dataset definition in the
|
||||
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore below 2 arguments)",
|
||||
"file_name": "the name of the dataset file in the this directory. (required if above are not specified)",
|
||||
"file_sha1": "the SHA-1 hash value of the dataset file. (optional)",
|
||||
"ranking": "whether the examples contains ranked responses or not. (default: false)",
|
||||
"columns": {
|
||||
"prompt": "the name of the column in the datasets containing the prompts. (default: instruction)",
|
||||
"query": "the name of the column in the datasets containing the queries. (default: input)",
|
||||
"response": "the name of the column in the datasets containing the responses. (default: output)",
|
||||
"history": "the name of the column in the datasets containing the history of chat. (default: None)"
|
||||
},
|
||||
"stage": "The stage at which the data is being used: pt, sft, and rm, which correspond to pre-training, supervised fine-tuning(PPO), and reward model (DPO) training, respectively.(default: None)"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -27,7 +27,6 @@ For datasets used in reward modeling or DPO training, the `response` column shou
|
||||
"output": [
|
||||
"Chosen answer",
|
||||
"Rejected answer"
|
||||
],
|
||||
"stage": "rm"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -6,19 +6,19 @@
|
||||
"script_url": "包含数据加载脚本的本地文件夹名称(若指定,则忽略下列两个参数)",
|
||||
"file_name": "该目录下数据集文件的名称(若上述参数未指定,则此项必需)",
|
||||
"file_sha1": "数据集文件的SHA-1哈希值(可选)",
|
||||
"ranking": "数据集是否包含排序后的回答(默认:false)",
|
||||
"columns": {
|
||||
"prompt": "数据集代表提示词的表头名称(默认:instruction)",
|
||||
"query": "数据集代表请求的表头名称(默认:input)",
|
||||
"response": "数据集代表回答的表头名称(默认:output)",
|
||||
"history": "数据集代表历史对话的表头名称(默认:None)"
|
||||
},
|
||||
"stage": "数据所应用的训练阶段,可选值有 pt, sft, rm 三个,对应预训练,指令监督微调(PPO),奖励模型(DPO)训练, 默认为None,表示不限制"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
其中 `prompt` 和 `response` 列应当是非空的字符串。`query` 列的内容将会和 `prompt` 列拼接作为模型输入。`history` 列应当是一个列表,其中每个元素是一个字符串二元组,分别代表用户请求和模型答复。
|
||||
|
||||
对于奖励模型或 DPO 训练的数据集,`response` 列应当是一个字符串列表,排在前面的代表更优的答案,例如:
|
||||
对于训练奖励模型或 DPO 训练的数据集,`response` 列应当是一个字符串列表,排在前面的代表更优的答案,例如:
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -27,7 +27,6 @@
|
||||
"output": [
|
||||
"Chosen answer",
|
||||
"Rejected answer"
|
||||
],
|
||||
"stage": "rm"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -1,28 +1,23 @@
|
||||
{
|
||||
"alpaca_en": {
|
||||
"file_name": "alpaca_data_en_52k.json",
|
||||
"file_sha1": "607f94a7f581341e59685aef32f531095232cf23",
|
||||
"stage": "sft"
|
||||
"file_sha1": "607f94a7f581341e59685aef32f531095232cf23"
|
||||
},
|
||||
"alpaca_zh": {
|
||||
"file_name": "alpaca_data_zh_51k.json",
|
||||
"file_sha1": "e655af3db557a4197f7b0cf92e1986b08fae6311",
|
||||
"stage": "sft"
|
||||
"file_sha1": "e655af3db557a4197f7b0cf92e1986b08fae6311"
|
||||
},
|
||||
"alpaca_gpt4_en": {
|
||||
"file_name": "alpaca_gpt4_data_en.json",
|
||||
"file_sha1": "647f4ad447bd993e4b6b6223d1be15208bab694a",
|
||||
"stage": "sft"
|
||||
"file_sha1": "647f4ad447bd993e4b6b6223d1be15208bab694a"
|
||||
},
|
||||
"alpaca_gpt4_zh": {
|
||||
"file_name": "alpaca_gpt4_data_zh.json",
|
||||
"file_sha1": "3eaa3bda364ccdd59925d7448a698256c31ef845",
|
||||
"stage": "sft"
|
||||
"file_sha1": "3eaa3bda364ccdd59925d7448a698256c31ef845"
|
||||
},
|
||||
"self_cognition": {
|
||||
"file_name": "self_cognition.json",
|
||||
"file_sha1": "6287a730ada924fc5d9eadc6d8f865e01b7a6f67",
|
||||
"stage": "sft"
|
||||
"file_sha1": "6287a730ada924fc5d9eadc6d8f865e01b7a6f67"
|
||||
},
|
||||
"oaast_sft": {
|
||||
"file_name": "oaast_sft.json",
|
||||
@@ -32,8 +27,7 @@
|
||||
"query": "input",
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
},
|
||||
"stage": "sft"
|
||||
}
|
||||
},
|
||||
"oaast_sft_zh": {
|
||||
"file_name": "oaast_sft_zh.json",
|
||||
@@ -43,8 +37,7 @@
|
||||
"query": "input",
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
},
|
||||
"stage": "sft"
|
||||
}
|
||||
},
|
||||
"sharegpt_zh": {
|
||||
"file_name": "sharegpt_zh_27k.json",
|
||||
@@ -54,8 +47,7 @@
|
||||
"query": "input",
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
},
|
||||
"stage": "sft"
|
||||
}
|
||||
},
|
||||
"lima": {
|
||||
"file_name": "lima.json",
|
||||
@@ -65,8 +57,7 @@
|
||||
"query": "input",
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
},
|
||||
"stage": "sft"
|
||||
}
|
||||
},
|
||||
"example": {
|
||||
"script_url": "example_dataset",
|
||||
@@ -75,32 +66,25 @@
|
||||
"query": "input",
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
},
|
||||
"stage": "sft"
|
||||
}
|
||||
},
|
||||
"guanaco": {
|
||||
"hf_hub_url": "JosephusCheung/GuanacoDataset",
|
||||
"stage": "sft"
|
||||
"hf_hub_url": "JosephusCheung/GuanacoDataset"
|
||||
},
|
||||
"belle_0.5m": {
|
||||
"hf_hub_url": "BelleGroup/train_0.5M_CN",
|
||||
"stage": "sft"
|
||||
"hf_hub_url": "BelleGroup/train_0.5M_CN"
|
||||
},
|
||||
"belle_1m": {
|
||||
"hf_hub_url": "BelleGroup/train_1M_CN",
|
||||
"stage": "sft"
|
||||
"hf_hub_url": "BelleGroup/train_1M_CN"
|
||||
},
|
||||
"belle_2m": {
|
||||
"hf_hub_url": "BelleGroup/train_2M_CN",
|
||||
"stage": "sft"
|
||||
"hf_hub_url": "BelleGroup/train_2M_CN"
|
||||
},
|
||||
"belle_dialog": {
|
||||
"hf_hub_url": "BelleGroup/generated_chat_0.4M",
|
||||
"stage": "sft"
|
||||
"hf_hub_url": "BelleGroup/generated_chat_0.4M"
|
||||
},
|
||||
"belle_math": {
|
||||
"hf_hub_url": "BelleGroup/school_math_0.25M",
|
||||
"stage": "sft"
|
||||
"hf_hub_url": "BelleGroup/school_math_0.25M"
|
||||
},
|
||||
"belle_multiturn": {
|
||||
"script_url": "belle_multiturn",
|
||||
@@ -109,8 +93,7 @@
|
||||
"query": "",
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
},
|
||||
"stage": "sft"
|
||||
}
|
||||
},
|
||||
"firefly": {
|
||||
"hf_hub_url": "YeungNLP/firefly-train-1.1M",
|
||||
@@ -119,16 +102,13 @@
|
||||
"query": "",
|
||||
"response": "target",
|
||||
"history": ""
|
||||
},
|
||||
"stage": "sft"
|
||||
}
|
||||
},
|
||||
"codealpaca": {
|
||||
"hf_hub_url": "sahil2801/CodeAlpaca-20k",
|
||||
"stage": "sft"
|
||||
"hf_hub_url": "sahil2801/CodeAlpaca-20k"
|
||||
},
|
||||
"alpaca_cot": {
|
||||
"hf_hub_url": "QingyiSi/Alpaca-CoT",
|
||||
"stage": "sft"
|
||||
"hf_hub_url": "QingyiSi/Alpaca-CoT"
|
||||
},
|
||||
"webqa": {
|
||||
"hf_hub_url": "suolyer/webqa",
|
||||
@@ -137,8 +117,7 @@
|
||||
"query": "",
|
||||
"response": "output",
|
||||
"history": ""
|
||||
},
|
||||
"stage": "sft"
|
||||
}
|
||||
},
|
||||
"ultra_chat": {
|
||||
"script_url": "ultra_chat",
|
||||
@@ -147,32 +126,29 @@
|
||||
"query": "",
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
},
|
||||
"stage": "sft"
|
||||
}
|
||||
},
|
||||
"novel_tokens512_50k": {
|
||||
"hf_hub_url": "zxbsmk/webnovel_cn",
|
||||
"stage": "sft"
|
||||
"hf_hub_url": "zxbsmk/webnovel_cn"
|
||||
},
|
||||
"ad_gen": {
|
||||
"adgen": {
|
||||
"hf_hub_url": "HasturOfficial/adgen",
|
||||
"columns": {
|
||||
"prompt": "content",
|
||||
"query": "",
|
||||
"response": "summary",
|
||||
"history": ""
|
||||
},
|
||||
"stage": "sft"
|
||||
}
|
||||
},
|
||||
"comparison_gpt4_en": {
|
||||
"file_name": "comparison_gpt4_data_en.json",
|
||||
"file_sha1": "96fa18313544e22444fe20eead7754b17da452ae",
|
||||
"stage": "rm"
|
||||
"ranking": true
|
||||
},
|
||||
"comparison_gpt4_zh": {
|
||||
"file_name": "comparison_gpt4_data_zh.json",
|
||||
"file_sha1": "515b18ed497199131ddcc1af950345c11dc5c7fd",
|
||||
"stage": "rm"
|
||||
"ranking": true
|
||||
},
|
||||
"hh_rlhf_en": {
|
||||
"script_url": "hh_rlhf_en",
|
||||
@@ -182,7 +158,7 @@
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
},
|
||||
"stage": "rm"
|
||||
"ranking": true
|
||||
},
|
||||
"oaast_rm": {
|
||||
"file_name": "oaast_rm.json",
|
||||
@@ -193,7 +169,7 @@
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
},
|
||||
"stage": "rm"
|
||||
"ranking": true
|
||||
},
|
||||
"oaast_rm_zh": {
|
||||
"file_name": "oaast_rm_zh.json",
|
||||
@@ -204,7 +180,7 @@
|
||||
"response": "output",
|
||||
"history": "history"
|
||||
},
|
||||
"stage": "rm"
|
||||
"ranking": true
|
||||
},
|
||||
"wiki_demo": {
|
||||
"file_name": "wiki_demo.txt",
|
||||
@@ -214,8 +190,7 @@
|
||||
"query": "",
|
||||
"response": "",
|
||||
"history": ""
|
||||
},
|
||||
"stage": "pt"
|
||||
}
|
||||
},
|
||||
"refinedweb": {
|
||||
"hf_hub_url": "tiiuae/falcon-refinedweb",
|
||||
@@ -224,18 +199,7 @@
|
||||
"query": "",
|
||||
"response": "",
|
||||
"history": ""
|
||||
},
|
||||
"stage": "pt"
|
||||
},
|
||||
"starcoder": {
|
||||
"hf_hub_url": "bigcode/starcoderdata",
|
||||
"columns": {
|
||||
"prompt": "content",
|
||||
"query": "",
|
||||
"response": "",
|
||||
"history": ""
|
||||
},
|
||||
"stage": "pt"
|
||||
}
|
||||
},
|
||||
"wikipedia_en": {
|
||||
"hf_hub_url": "olm/olm-wikipedia-20221220",
|
||||
@@ -244,8 +208,7 @@
|
||||
"query": "",
|
||||
"response": "",
|
||||
"history": ""
|
||||
},
|
||||
"stage": "pt"
|
||||
}
|
||||
},
|
||||
"wikipedia_zh": {
|
||||
"hf_hub_url": "pleisto/wikipedia-cn-20230720-filtered",
|
||||
@@ -254,7 +217,24 @@
|
||||
"query": "",
|
||||
"response": "",
|
||||
"history": ""
|
||||
},
|
||||
"stage": "pt"
|
||||
}
|
||||
},
|
||||
"the_stack": {
|
||||
"hf_hub_url": "bigcode/the-stack",
|
||||
"columns": {
|
||||
"prompt": "content",
|
||||
"query": "",
|
||||
"response": "",
|
||||
"history": ""
|
||||
}
|
||||
},
|
||||
"starcoder": {
|
||||
"hf_hub_url": "bigcode/starcoderdata",
|
||||
"columns": {
|
||||
"prompt": "content",
|
||||
"query": "",
|
||||
"response": "",
|
||||
"history": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user