mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-09-16 10:02:48 +08:00
add template, modify datasets
Former-commit-id: 386f590209e466b51c17a7ac8cee55fc3ce928d7
This commit is contained in:
parent
28a9176784
commit
38755bced7
@ -1,6 +1,5 @@
|
|||||||
import json
|
import json
|
||||||
import datasets
|
import datasets
|
||||||
from typing import Any, Dict, List
|
|
||||||
|
|
||||||
|
|
||||||
_DESCRIPTION = "BELLE multiturn chat dataset."
|
_DESCRIPTION = "BELLE multiturn chat dataset."
|
||||||
@ -23,7 +22,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
|||||||
|
|
||||||
VERSION = datasets.Version("0.0.0")
|
VERSION = datasets.Version("0.0.0")
|
||||||
|
|
||||||
def _info(self) -> datasets.DatasetInfo:
|
def _info(self):
|
||||||
features = datasets.Features({
|
features = datasets.Features({
|
||||||
"instruction": datasets.Value("string"),
|
"instruction": datasets.Value("string"),
|
||||||
"output": datasets.Value("string"),
|
"output": datasets.Value("string"),
|
||||||
@ -37,7 +36,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
|||||||
citation=_CITATION
|
citation=_CITATION
|
||||||
)
|
)
|
||||||
|
|
||||||
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
||||||
file_path = dl_manager.download(_URL)
|
file_path = dl_manager.download(_URL)
|
||||||
return [
|
return [
|
||||||
datasets.SplitGenerator(
|
datasets.SplitGenerator(
|
||||||
@ -48,7 +47,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
|||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
def _generate_examples(self, filepath: str) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat with history
|
def _generate_examples(self, filepath: str):
|
||||||
with open(filepath, "r", encoding="utf-8") as f:
|
with open(filepath, "r", encoding="utf-8") as f:
|
||||||
for key, row in enumerate(f):
|
for key, row in enumerate(f):
|
||||||
data = json.loads(row)
|
data = json.loads(row)
|
||||||
|
@ -96,11 +96,7 @@
|
|||||||
},
|
},
|
||||||
"ultra_chat": {
|
"ultra_chat": {
|
||||||
"script_url": "ultra_chat",
|
"script_url": "ultra_chat",
|
||||||
"columns": {
|
"formatting": "sharegpt"
|
||||||
"prompt": "instruction",
|
|
||||||
"response": "output",
|
|
||||||
"history": "history"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"open_platypus": {
|
"open_platypus": {
|
||||||
"hf_hub_url": "garage-bAInd/Open-Platypus"
|
"hf_hub_url": "garage-bAInd/Open-Platypus"
|
||||||
|
@ -3,7 +3,7 @@ import datasets
|
|||||||
from typing import Any, Dict, List
|
from typing import Any, Dict, List
|
||||||
|
|
||||||
|
|
||||||
_DESCRIPTION = "An example of dataset for LLaMA."
|
_DESCRIPTION = "An example of dataset."
|
||||||
_CITATION = ""
|
_CITATION = ""
|
||||||
_HOMEPAGE = ""
|
_HOMEPAGE = ""
|
||||||
_LICENSE = ""
|
_LICENSE = ""
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
import json
|
import json
|
||||||
import datasets
|
import datasets
|
||||||
from typing import Any, Dict, List
|
from typing import List
|
||||||
|
|
||||||
|
|
||||||
_DESCRIPTION = "Human preference data about helpfulness and harmlessness for ChatGLM."
|
_DESCRIPTION = "Human preference data about helpfulness and harmlessness."
|
||||||
_CITATION = ""
|
_CITATION = ""
|
||||||
_HOMEPAGE = "https://huggingface.co/datasets/Anthropic/hh-rlhf"
|
_HOMEPAGE = "https://huggingface.co/datasets/Anthropic/hh-rlhf"
|
||||||
_LICENSE = "mit"
|
_LICENSE = "mit"
|
||||||
@ -42,7 +42,7 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder):
|
|||||||
citation=_CITATION
|
citation=_CITATION
|
||||||
)
|
)
|
||||||
|
|
||||||
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
||||||
file_path = dl_manager.download_and_extract(_URLS)
|
file_path = dl_manager.download_and_extract(_URLS)
|
||||||
return [
|
return [
|
||||||
datasets.SplitGenerator(
|
datasets.SplitGenerator(
|
||||||
@ -59,7 +59,7 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder):
|
|||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
def _generate_examples(self, filepaths: List[str]) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat for ChatGLM
|
def _generate_examples(self, filepaths: List[str]):
|
||||||
key = 0
|
key = 0
|
||||||
for filepath in filepaths:
|
for filepath in filepaths:
|
||||||
with open(filepath, "r", encoding="utf-8") as f:
|
with open(filepath, "r", encoding="utf-8") as f:
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
import datasets
|
import datasets
|
||||||
from typing import Any, Dict, List
|
from typing import List
|
||||||
|
|
||||||
|
|
||||||
_DESCRIPTION = "UltraChat: Large-scale, Informative, and Diverse Multi-round Dialogue Data."
|
_DESCRIPTION = "UltraChat: Large-scale, Informative, and Diverse Multi-round Dialogue Data."
|
||||||
@ -21,15 +21,13 @@ _LICENSE = "cc-by-nc-4.0"
|
|||||||
_BASE_DATA_URL = "https://huggingface.co/datasets/stingning/ultrachat/resolve/main/train_{idx}.jsonl"
|
_BASE_DATA_URL = "https://huggingface.co/datasets/stingning/ultrachat/resolve/main/train_{idx}.jsonl"
|
||||||
|
|
||||||
|
|
||||||
class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
class UltraChat(datasets.GeneratorBasedBuilder):
|
||||||
|
|
||||||
VERSION = datasets.Version("0.0.0")
|
VERSION = datasets.Version("0.0.0")
|
||||||
|
|
||||||
def _info(self) -> datasets.DatasetInfo:
|
def _info(self):
|
||||||
features = datasets.Features({
|
features = datasets.Features({
|
||||||
"instruction": datasets.Value("string"),
|
"conversations": [{"from": datasets.Value("string"), "value": datasets.Value("string")}]
|
||||||
"output": datasets.Value("string"),
|
|
||||||
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
|
|
||||||
})
|
})
|
||||||
return datasets.DatasetInfo(
|
return datasets.DatasetInfo(
|
||||||
description=_DESCRIPTION,
|
description=_DESCRIPTION,
|
||||||
@ -39,8 +37,8 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
|||||||
citation=_CITATION
|
citation=_CITATION
|
||||||
)
|
)
|
||||||
|
|
||||||
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
||||||
file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(9)] # multiple shards
|
file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(10)] # multiple shards
|
||||||
return [
|
return [
|
||||||
datasets.SplitGenerator(
|
datasets.SplitGenerator(
|
||||||
name=datasets.Split.TRAIN,
|
name=datasets.Split.TRAIN,
|
||||||
@ -50,7 +48,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
|||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
def _generate_examples(self, filepaths: List[str]) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat for ChatGLM
|
def _generate_examples(self, filepaths: List[str]):
|
||||||
for filepath in filepaths:
|
for filepath in filepaths:
|
||||||
with open(filepath, "r", encoding="utf-8") as f:
|
with open(filepath, "r", encoding="utf-8") as f:
|
||||||
for row in f:
|
for row in f:
|
||||||
@ -58,19 +56,16 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
|||||||
data = json.loads(row)
|
data = json.loads(row)
|
||||||
except:
|
except:
|
||||||
continue
|
continue
|
||||||
key = data["id"]
|
key: int = data["id"]
|
||||||
content = data["data"]
|
content: List[str] = data["data"]
|
||||||
if len(content) % 2 == 1:
|
if len(content) % 2 == 1:
|
||||||
content.pop(-1)
|
content.pop(-1)
|
||||||
if len(content) < 2:
|
if len(content) < 2:
|
||||||
continue
|
continue
|
||||||
|
conversations = [{
|
||||||
query = content[-2]
|
"from": "human" if i % 2 == 0 else "gpt",
|
||||||
response = content[-1]
|
"value": content[i]
|
||||||
history = [[content[2*i], content[2*i+1]] for i in range(len(content) // 2 - 1)]
|
} for i in range(len(content))]
|
||||||
|
|
||||||
yield key, {
|
yield key, {
|
||||||
"instruction": query,
|
"conversations": conversations
|
||||||
"output": response,
|
|
||||||
"history": history
|
|
||||||
}
|
}
|
||||||
|
@ -51,9 +51,6 @@ def preprocess_dataset(
|
|||||||
setattr(tokenizer, "add_eos_token", True)
|
setattr(tokenizer, "add_eos_token", True)
|
||||||
|
|
||||||
tokenized_examples = tokenizer(examples["prompt"], **kwargs)
|
tokenized_examples = tokenizer(examples["prompt"], **kwargs)
|
||||||
# Make sure the saved tokenizer is the same as the original
|
|
||||||
if hasattr(tokenizer, "add_eos_token"): # for Baichuan2 tokenizer
|
|
||||||
setattr(tokenizer, "add_eos_token", add_eos_token_flag)
|
|
||||||
concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()}
|
concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()}
|
||||||
total_length = len(concatenated_examples[list(concatenated_examples.keys())[0]])
|
total_length = len(concatenated_examples[list(concatenated_examples.keys())[0]])
|
||||||
block_size = data_args.cutoff_len
|
block_size = data_args.cutoff_len
|
||||||
@ -64,6 +61,9 @@ def preprocess_dataset(
|
|||||||
k: [t[i: i + block_size] for i in range(0, total_length, block_size)]
|
k: [t[i: i + block_size] for i in range(0, total_length, block_size)]
|
||||||
for k, t in concatenated_examples.items()
|
for k, t in concatenated_examples.items()
|
||||||
}
|
}
|
||||||
|
# make sure the saved tokenizer is the same as the original one
|
||||||
|
if hasattr(tokenizer, "add_eos_token"):
|
||||||
|
setattr(tokenizer, "add_eos_token", add_eos_token_flag)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def preprocess_supervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, List[List[int]]]:
|
def preprocess_supervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, List[List[int]]]:
|
||||||
|
@ -546,7 +546,7 @@ register_template(
|
|||||||
{"token": "<|end_of_turn|>"},
|
{"token": "<|end_of_turn|>"},
|
||||||
"GPT4 Correct Assistant:"
|
"GPT4 Correct Assistant:"
|
||||||
],
|
],
|
||||||
system="You are a helpful assistant.",
|
system="",
|
||||||
sep=[
|
sep=[
|
||||||
{"token": "<|end_of_turn|>"}
|
{"token": "<|end_of_turn|>"}
|
||||||
],
|
],
|
||||||
@ -668,6 +668,43 @@ register_template(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
r"""
|
||||||
|
Supports: https://huggingface.co/wenge-research/yayi-7b
|
||||||
|
https://huggingface.co/wenge-research/yayi-7b-llama2
|
||||||
|
https://huggingface.co/wenge-research/yayi-13b-llama2
|
||||||
|
"""
|
||||||
|
register_template(
|
||||||
|
name="yayi",
|
||||||
|
prefix=[
|
||||||
|
{"token": "<|System|>"},
|
||||||
|
":\n{{system}}"
|
||||||
|
],
|
||||||
|
prompt=[
|
||||||
|
{"token": "<|Human|>"},
|
||||||
|
":\n{{query}}\n\n",
|
||||||
|
{"token": "<|YaYi|>"},
|
||||||
|
":"
|
||||||
|
],
|
||||||
|
system=(
|
||||||
|
"You are a helpful, respectful and honest assistant named YaYi "
|
||||||
|
"developed by Beijing Wenge Technology Co.,Ltd. "
|
||||||
|
"Always answer as helpfully as possible, while being safe. "
|
||||||
|
"Your answers should not include any harmful, unethical, "
|
||||||
|
"racist, sexist, toxic, dangerous, or illegal content. "
|
||||||
|
"Please ensure that your responses are socially unbiased and positive in nature.\n\n"
|
||||||
|
"If a question does not make any sense, or is not factually coherent, "
|
||||||
|
"explain why instead of answering something not correct. "
|
||||||
|
"If you don't know the answer to a question, please don't share false information."
|
||||||
|
),
|
||||||
|
sep=[
|
||||||
|
"\n\n"
|
||||||
|
],
|
||||||
|
stop_words=[
|
||||||
|
"<|End|>"
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
r"""
|
r"""
|
||||||
Supports: https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha
|
Supports: https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha
|
||||||
https://huggingface.co/HuggingFaceH4/zephyr-7b-beta
|
https://huggingface.co/HuggingFaceH4/zephyr-7b-beta
|
||||||
|
Loading…
x
Reference in New Issue
Block a user