mirror of
				https://github.com/hiyouga/LLaMA-Factory.git
				synced 2025-11-04 18:02:19 +08:00 
			
		
		
		
	add orca_dpo_pairs dataset
Former-commit-id: af683aacbae462a2a37d76d37df583e217664bd5
This commit is contained in:
		
							parent
							
								
									04884a0911
								
							
						
					
					
						commit
						5ed234ca63
					
				@ -2,6 +2,7 @@ import os
 | 
			
		||||
import json
 | 
			
		||||
import datasets
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
 | 
			
		||||
 | 
			
		||||
_DESCRIPTION = "BELLE multiturn chat dataset."
 | 
			
		||||
@ -15,9 +16,9 @@ _CITATION = """\
 | 
			
		||||
}
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/BelleGroup/multiturn_chat_0.8M"
 | 
			
		||||
_HOMEPAGE = "{}/datasets/BelleGroup/multiturn_chat_0.8M".format(_HF_ENDPOINT)
 | 
			
		||||
_LICENSE = "gpl-3.0"
 | 
			
		||||
_URL = f"{_HF_ENDPOINT}/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json"
 | 
			
		||||
_URL = "{}/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json".format(_HF_ENDPOINT)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BelleMultiturn(datasets.GeneratorBasedBuilder):
 | 
			
		||||
 | 
			
		||||
@ -3,15 +3,12 @@ import json
 | 
			
		||||
import datasets
 | 
			
		||||
from typing import List
 | 
			
		||||
 | 
			
		||||
_HF_ENDPOINT = os.getenv("_HF_ENDPOINT", "https://huggingface.co")
 | 
			
		||||
 | 
			
		||||
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
 | 
			
		||||
_DESCRIPTION = "Human preference data about helpfulness and harmlessness."
 | 
			
		||||
 | 
			
		||||
_CITATION = ""
 | 
			
		||||
 | 
			
		||||
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/Anthropic/hh-rlhf"
 | 
			
		||||
_HOMEPAGE = "{}/datasets/Anthropic/hh-rlhf".format(_HF_ENDPOINT)
 | 
			
		||||
_LICENSE = "mit"
 | 
			
		||||
_URL = f"{_HF_ENDPOINT}/datasets/Anthropic/hh-rlhf/resolve/main/"
 | 
			
		||||
_URL = "{}/datasets/Anthropic/hh-rlhf/resolve/main/".format(_HF_ENDPOINT)
 | 
			
		||||
_URLS = {
 | 
			
		||||
    "train": [
 | 
			
		||||
        _URL + "harmless-base/train.jsonl.gz",
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1
									
								
								data/orca_rlhf.json.REMOVED.git-id
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								data/orca_rlhf.json.REMOVED.git-id
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1 @@
 | 
			
		||||
736bcedea2b24a1414765c6d69cbdafaea839f3c
 | 
			
		||||
@ -3,7 +3,7 @@ import json
 | 
			
		||||
import datasets
 | 
			
		||||
from typing import List
 | 
			
		||||
 | 
			
		||||
_HF_ENDPOINT = os.getenv("_HF_ENDPOINT", "https://huggingface.co")
 | 
			
		||||
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
 | 
			
		||||
 | 
			
		||||
_DESCRIPTION = "UltraChat: Large-scale, Informative, and Diverse Multi-round Dialogue Data."
 | 
			
		||||
 | 
			
		||||
@ -18,9 +18,9 @@ _CITATION = """\
 | 
			
		||||
}
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
_HOMEPAGE = f"{_HF_ENDPOINT}/datasets/stingning/ultrachat"
 | 
			
		||||
_HOMEPAGE = "{}/datasets/stingning/ultrachat".format(_HF_ENDPOINT)
 | 
			
		||||
_LICENSE = "cc-by-nc-4.0"
 | 
			
		||||
_BASE_DATA_URL = "{_HF_ENDPOINT}/datasets/stingning/ultrachat/resolve/main/train_{idx}.jsonl"
 | 
			
		||||
_BASE_DATA_URL = "{}/datasets/stingning/ultrachat/resolve/main/train_{{idx}}.jsonl".format(_HF_ENDPOINT)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class UltraChat(datasets.GeneratorBasedBuilder):
 | 
			
		||||
@ -40,7 +40,7 @@ class UltraChat(datasets.GeneratorBasedBuilder):
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    def _split_generators(self, dl_manager: datasets.DownloadManager):
 | 
			
		||||
        file_paths = [dl_manager.download(_BASE_DATA_URL.format(_HF_ENDPOINT=_HF_ENDPOINT,idx=idx)) for idx in range(10)] # multiple shards
 | 
			
		||||
        file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(10)] # multiple shards
 | 
			
		||||
        return [
 | 
			
		||||
            datasets.SplitGenerator(
 | 
			
		||||
                name=datasets.Split.TRAIN,
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user