mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-17 04:10:36 +08:00
[misc] fix packing and eval plot (#7623)
This commit is contained in:
@@ -25,10 +25,10 @@ from llamafactory.train.test_utils import load_dataset_module
|
||||
|
||||
DEMO_DATA = os.getenv("DEMO_DATA", "llamafactory/demo_data")
|
||||
|
||||
TINY_LLAMA = os.getenv("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3")
|
||||
|
||||
TRAIN_ARGS = {
|
||||
"model_name_or_path": TINY_LLAMA,
|
||||
"model_name_or_path": TINY_LLAMA3,
|
||||
"stage": "kto",
|
||||
"do_train": True,
|
||||
"finetuning_type": "full",
|
||||
@@ -45,7 +45,7 @@ TRAIN_ARGS = {
|
||||
@pytest.mark.parametrize("num_samples", [16])
|
||||
def test_feedback_data(num_samples: int):
|
||||
train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"]
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
|
||||
original_data = load_dataset(DEMO_DATA, name="kto_en_demo", split="train")
|
||||
indexes = random.choices(range(len(original_data)), k=num_samples)
|
||||
for index in indexes:
|
||||
|
||||
@@ -25,10 +25,10 @@ from llamafactory.train.test_utils import load_dataset_module
|
||||
|
||||
DEMO_DATA = os.getenv("DEMO_DATA", "llamafactory/demo_data")
|
||||
|
||||
TINY_LLAMA = os.getenv("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3")
|
||||
|
||||
TRAIN_ARGS = {
|
||||
"model_name_or_path": TINY_LLAMA,
|
||||
"model_name_or_path": TINY_LLAMA3,
|
||||
"stage": "rm",
|
||||
"do_train": True,
|
||||
"finetuning_type": "full",
|
||||
@@ -54,7 +54,7 @@ def _convert_sharegpt_to_openai(messages: list[dict[str, str]]) -> list[dict[str
|
||||
@pytest.mark.parametrize("num_samples", [16])
|
||||
def test_pairwise_data(num_samples: int):
|
||||
train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"]
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
|
||||
original_data = load_dataset(DEMO_DATA, name="dpo_en_demo", split="train")
|
||||
indexes = random.choices(range(len(original_data)), k=num_samples)
|
||||
for index in indexes:
|
||||
|
||||
@@ -25,12 +25,12 @@ from llamafactory.train.test_utils import load_dataset_module
|
||||
|
||||
DEMO_DATA = os.getenv("DEMO_DATA", "llamafactory/demo_data")
|
||||
|
||||
TINY_LLAMA = os.getenv("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3")
|
||||
|
||||
TINY_DATA = os.getenv("TINY_DATA", "llamafactory/tiny-supervised-dataset")
|
||||
|
||||
TRAIN_ARGS = {
|
||||
"model_name_or_path": TINY_LLAMA,
|
||||
"model_name_or_path": TINY_LLAMA3,
|
||||
"stage": "sft",
|
||||
"do_train": True,
|
||||
"finetuning_type": "full",
|
||||
@@ -45,7 +45,7 @@ TRAIN_ARGS = {
|
||||
@pytest.mark.parametrize("num_samples", [16])
|
||||
def test_supervised_single_turn(num_samples: int):
|
||||
train_dataset = load_dataset_module(dataset_dir="ONLINE", dataset=TINY_DATA, **TRAIN_ARGS)["train_dataset"]
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
|
||||
original_data = load_dataset(TINY_DATA, split="train")
|
||||
indexes = random.choices(range(len(original_data)), k=num_samples)
|
||||
for index in indexes:
|
||||
@@ -66,7 +66,7 @@ def test_supervised_multi_turn(num_samples: int):
|
||||
train_dataset = load_dataset_module(dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", **TRAIN_ARGS)[
|
||||
"train_dataset"
|
||||
]
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
|
||||
original_data = load_dataset(DEMO_DATA, name="system_chat", split="train")
|
||||
indexes = random.choices(range(len(original_data)), k=num_samples)
|
||||
for index in indexes:
|
||||
@@ -79,7 +79,7 @@ def test_supervised_train_on_prompt(num_samples: int):
|
||||
train_dataset = load_dataset_module(
|
||||
dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", train_on_prompt=True, **TRAIN_ARGS
|
||||
)["train_dataset"]
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
|
||||
original_data = load_dataset(DEMO_DATA, name="system_chat", split="train")
|
||||
indexes = random.choices(range(len(original_data)), k=num_samples)
|
||||
for index in indexes:
|
||||
@@ -93,7 +93,7 @@ def test_supervised_mask_history(num_samples: int):
|
||||
train_dataset = load_dataset_module(
|
||||
dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", mask_history=True, **TRAIN_ARGS
|
||||
)["train_dataset"]
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
|
||||
original_data = load_dataset(DEMO_DATA, name="system_chat", split="train")
|
||||
indexes = random.choices(range(len(original_data)), k=num_samples)
|
||||
for index in indexes:
|
||||
|
||||
@@ -24,12 +24,12 @@ from llamafactory.train.test_utils import load_dataset_module
|
||||
|
||||
DEMO_DATA = os.getenv("DEMO_DATA", "llamafactory/demo_data")
|
||||
|
||||
TINY_LLAMA = os.getenv("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3")
|
||||
|
||||
TINY_DATA = os.getenv("TINY_DATA", "llamafactory/tiny-supervised-dataset")
|
||||
|
||||
TRAIN_ARGS = {
|
||||
"model_name_or_path": TINY_LLAMA,
|
||||
"model_name_or_path": TINY_LLAMA3,
|
||||
"stage": "ppo",
|
||||
"do_train": True,
|
||||
"finetuning_type": "full",
|
||||
@@ -48,7 +48,7 @@ TRAIN_ARGS = {
|
||||
@pytest.mark.parametrize("num_samples", [16])
|
||||
def test_unsupervised_data(num_samples: int):
|
||||
train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"]
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA)
|
||||
ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
|
||||
original_data = load_dataset(DEMO_DATA, name="system_chat", split="train")
|
||||
indexes = random.choices(range(len(original_data)), k=num_samples)
|
||||
for index in indexes:
|
||||
|
||||
Reference in New Issue
Block a user