From 38755bced7f06cc5d64b535e4f8c28045342df7d Mon Sep 17 00:00:00 2001 From: hiyouga Date: Thu, 9 Nov 2023 15:53:23 +0800 Subject: [PATCH] add template, modify datasets Former-commit-id: 386f590209e466b51c17a7ac8cee55fc3ce928d7 --- data/belle_multiturn/belle_multiturn.py | 7 ++--- data/dataset_info.json | 6 +--- data/example_dataset/example_dataset.py | 2 +- data/hh_rlhf_en/hh_rlhf_en.py | 8 ++--- data/ultra_chat/ultra_chat.py | 33 +++++++++----------- src/llmtuner/dsets/preprocess.py | 6 ++-- src/llmtuner/extras/template.py | 41 +++++++++++++++++++++++-- 7 files changed, 65 insertions(+), 38 deletions(-) diff --git a/data/belle_multiturn/belle_multiturn.py b/data/belle_multiturn/belle_multiturn.py index 4426b480..816a38bf 100644 --- a/data/belle_multiturn/belle_multiturn.py +++ b/data/belle_multiturn/belle_multiturn.py @@ -1,6 +1,5 @@ import json import datasets -from typing import Any, Dict, List _DESCRIPTION = "BELLE multiturn chat dataset." @@ -23,7 +22,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("0.0.0") - def _info(self) -> datasets.DatasetInfo: + def _info(self): features = datasets.Features({ "instruction": datasets.Value("string"), "output": datasets.Value("string"), @@ -37,7 +36,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder): citation=_CITATION ) - def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: + def _split_generators(self, dl_manager: datasets.DownloadManager): file_path = dl_manager.download(_URL) return [ datasets.SplitGenerator( @@ -48,7 +47,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder): ) ] - def _generate_examples(self, filepath: str) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat with history + def _generate_examples(self, filepath: str): with open(filepath, "r", encoding="utf-8") as f: for key, row in enumerate(f): data = json.loads(row) diff --git a/data/dataset_info.json b/data/dataset_info.json index 5a25a34a..7a7557bc 100644 --- a/data/dataset_info.json +++ b/data/dataset_info.json @@ -96,11 +96,7 @@ }, "ultra_chat": { "script_url": "ultra_chat", - "columns": { - "prompt": "instruction", - "response": "output", - "history": "history" - } + "formatting": "sharegpt" }, "open_platypus": { "hf_hub_url": "garage-bAInd/Open-Platypus" diff --git a/data/example_dataset/example_dataset.py b/data/example_dataset/example_dataset.py index db3e9ffb..d7492b44 100644 --- a/data/example_dataset/example_dataset.py +++ b/data/example_dataset/example_dataset.py @@ -3,7 +3,7 @@ import datasets from typing import Any, Dict, List -_DESCRIPTION = "An example of dataset for LLaMA." +_DESCRIPTION = "An example of dataset." _CITATION = "" _HOMEPAGE = "" _LICENSE = "" diff --git a/data/hh_rlhf_en/hh_rlhf_en.py b/data/hh_rlhf_en/hh_rlhf_en.py index 8d51e4c4..daa9bf98 100644 --- a/data/hh_rlhf_en/hh_rlhf_en.py +++ b/data/hh_rlhf_en/hh_rlhf_en.py @@ -1,9 +1,9 @@ import json import datasets -from typing import Any, Dict, List +from typing import List -_DESCRIPTION = "Human preference data about helpfulness and harmlessness for ChatGLM." +_DESCRIPTION = "Human preference data about helpfulness and harmlessness." _CITATION = "" _HOMEPAGE = "https://huggingface.co/datasets/Anthropic/hh-rlhf" _LICENSE = "mit" @@ -42,7 +42,7 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder): citation=_CITATION ) - def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: + def _split_generators(self, dl_manager: datasets.DownloadManager): file_path = dl_manager.download_and_extract(_URLS) return [ datasets.SplitGenerator( @@ -59,7 +59,7 @@ class HhRlhfEn(datasets.GeneratorBasedBuilder): ) ] - def _generate_examples(self, filepaths: List[str]) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat for ChatGLM + def _generate_examples(self, filepaths: List[str]): key = 0 for filepath in filepaths: with open(filepath, "r", encoding="utf-8") as f: diff --git a/data/ultra_chat/ultra_chat.py b/data/ultra_chat/ultra_chat.py index dd29311c..c187abb2 100644 --- a/data/ultra_chat/ultra_chat.py +++ b/data/ultra_chat/ultra_chat.py @@ -1,6 +1,6 @@ import json import datasets -from typing import Any, Dict, List +from typing import List _DESCRIPTION = "UltraChat: Large-scale, Informative, and Diverse Multi-round Dialogue Data." @@ -21,15 +21,13 @@ _LICENSE = "cc-by-nc-4.0" _BASE_DATA_URL = "https://huggingface.co/datasets/stingning/ultrachat/resolve/main/train_{idx}.jsonl" -class BelleMultiturn(datasets.GeneratorBasedBuilder): +class UltraChat(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("0.0.0") - def _info(self) -> datasets.DatasetInfo: + def _info(self): features = datasets.Features({ - "instruction": datasets.Value("string"), - "output": datasets.Value("string"), - "history": datasets.Sequence(datasets.Sequence(datasets.Value("string"))) + "conversations": [{"from": datasets.Value("string"), "value": datasets.Value("string")}] }) return datasets.DatasetInfo( description=_DESCRIPTION, @@ -39,8 +37,8 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder): citation=_CITATION ) - def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: - file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(9)] # multiple shards + def _split_generators(self, dl_manager: datasets.DownloadManager): + file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(10)] # multiple shards return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, @@ -50,7 +48,7 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder): ) ] - def _generate_examples(self, filepaths: List[str]) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat for ChatGLM + def _generate_examples(self, filepaths: List[str]): for filepath in filepaths: with open(filepath, "r", encoding="utf-8") as f: for row in f: @@ -58,19 +56,16 @@ class BelleMultiturn(datasets.GeneratorBasedBuilder): data = json.loads(row) except: continue - key = data["id"] - content = data["data"] + key: int = data["id"] + content: List[str] = data["data"] if len(content) % 2 == 1: content.pop(-1) if len(content) < 2: continue - - query = content[-2] - response = content[-1] - history = [[content[2*i], content[2*i+1]] for i in range(len(content) // 2 - 1)] - + conversations = [{ + "from": "human" if i % 2 == 0 else "gpt", + "value": content[i] + } for i in range(len(content))] yield key, { - "instruction": query, - "output": response, - "history": history + "conversations": conversations } diff --git a/src/llmtuner/dsets/preprocess.py b/src/llmtuner/dsets/preprocess.py index 9e2b6a20..327dfd44 100644 --- a/src/llmtuner/dsets/preprocess.py +++ b/src/llmtuner/dsets/preprocess.py @@ -51,9 +51,6 @@ def preprocess_dataset( setattr(tokenizer, "add_eos_token", True) tokenized_examples = tokenizer(examples["prompt"], **kwargs) - # Make sure the saved tokenizer is the same as the original - if hasattr(tokenizer, "add_eos_token"): # for Baichuan2 tokenizer - setattr(tokenizer, "add_eos_token", add_eos_token_flag) concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()} total_length = len(concatenated_examples[list(concatenated_examples.keys())[0]]) block_size = data_args.cutoff_len @@ -64,6 +61,9 @@ def preprocess_dataset( k: [t[i: i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } + # make sure the saved tokenizer is the same as the original one + if hasattr(tokenizer, "add_eos_token"): + setattr(tokenizer, "add_eos_token", add_eos_token_flag) return result def preprocess_supervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, List[List[int]]]: diff --git a/src/llmtuner/extras/template.py b/src/llmtuner/extras/template.py index 401750ce..6e31fe83 100644 --- a/src/llmtuner/extras/template.py +++ b/src/llmtuner/extras/template.py @@ -488,7 +488,7 @@ register_template( ], system=( "You are a helpful, respectful and honest assistant. " - "Always answer as helpfully as possible, while being safe. " + "Always answer as helpfully as possible, while being safe. " "Your answers should not include any harmful, unethical, " "racist, sexist, toxic, dangerous, or illegal content. " "Please ensure that your responses are socially unbiased and positive in nature.\n\n" @@ -546,7 +546,7 @@ register_template( {"token": "<|end_of_turn|>"}, "GPT4 Correct Assistant:" ], - system="You are a helpful assistant.", + system="", sep=[ {"token": "<|end_of_turn|>"} ], @@ -668,6 +668,43 @@ register_template( ) +r""" +Supports: https://huggingface.co/wenge-research/yayi-7b + https://huggingface.co/wenge-research/yayi-7b-llama2 + https://huggingface.co/wenge-research/yayi-13b-llama2 +""" +register_template( + name="yayi", + prefix=[ + {"token": "<|System|>"}, + ":\n{{system}}" + ], + prompt=[ + {"token": "<|Human|>"}, + ":\n{{query}}\n\n", + {"token": "<|YaYi|>"}, + ":" + ], + system=( + "You are a helpful, respectful and honest assistant named YaYi " + "developed by Beijing Wenge Technology Co.,Ltd. " + "Always answer as helpfully as possible, while being safe. " + "Your answers should not include any harmful, unethical, " + "racist, sexist, toxic, dangerous, or illegal content. " + "Please ensure that your responses are socially unbiased and positive in nature.\n\n" + "If a question does not make any sense, or is not factually coherent, " + "explain why instead of answering something not correct. " + "If you don't know the answer to a question, please don't share false information." + ), + sep=[ + "\n\n" + ], + stop_words=[ + "<|End|>" + ] +) + + r""" Supports: https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha https://huggingface.co/HuggingFaceH4/zephyr-7b-beta