mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2026-04-27 18:29:08 +08:00
[data] support discard history cot for multiturn (#10435)
This commit is contained in:
@@ -181,6 +181,39 @@ def test_reasoning_encode_multiturn(cot_messages: bool, enable_thinking: bool):
|
||||
(prompt_str_1, answer_str_1, prompt_str_2, answer_str_2),
|
||||
)
|
||||
|
||||
@pytest.mark.runs_on(["cpu", "mps"])
|
||||
@pytest.mark.parametrize("enable_thinking", [True, False, None])
|
||||
@pytest.mark.parametrize("discarding_history_cot", [True, False])
|
||||
def test_reasoning_encode_multiturn_discarding_history_cot(enable_thinking: bool, discarding_history_cot: bool):
|
||||
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B")
|
||||
data_args = DataArguments(template="qwen3", enable_thinking=enable_thinking)
|
||||
template = get_template_and_fix_tokenizer(tokenizer, data_args)
|
||||
encoded_pairs = template.encode_multiturn(tokenizer, MESSAGES_WITH_THOUGHT, discarding_history_cot=discarding_history_cot)
|
||||
|
||||
prompt_str_1 = f"<|im_start|>user\n{MESSAGES_WITH_THOUGHT[0]['content']}<|im_end|>\n<|im_start|>assistant\n"
|
||||
prompt_str_2 = f"<|im_start|>user\n{MESSAGES_WITH_THOUGHT[2]['content']}<|im_end|>\n<|im_start|>assistant\n"
|
||||
|
||||
if enable_thinking is False:
|
||||
answer_str_1 = f"{MESSAGES[1]['content']}<|im_end|>\n"
|
||||
answer_str_2 = f"{MESSAGES[3]['content']}<|im_end|>\n"
|
||||
if discarding_history_cot:
|
||||
prompt_str_2 = prompt_str_2 + "<think>\n\n</think>\n\n"
|
||||
else:
|
||||
prompt_str_1 = prompt_str_1 + "<think>\n\n</think>\n\n"
|
||||
prompt_str_2 = prompt_str_2 + "<think>\n\n</think>\n\n"
|
||||
else:
|
||||
if discarding_history_cot:
|
||||
answer_str_1 = f"{MESSAGES[1]['content']}<|im_end|>\n"
|
||||
else:
|
||||
answer_str_1 = f"{MESSAGES_WITH_THOUGHT[1]['content']}<|im_end|>\n"
|
||||
answer_str_2 = f"{MESSAGES_WITH_THOUGHT[3]['content']}<|im_end|>\n"
|
||||
|
||||
_check_tokenization(
|
||||
tokenizer,
|
||||
(encoded_pairs[0][0], encoded_pairs[0][1], encoded_pairs[1][0], encoded_pairs[1][1]),
|
||||
(prompt_str_1, answer_str_1, prompt_str_2, answer_str_2),
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu", "mps"])
|
||||
def test_jinja_template():
|
||||
|
||||
Reference in New Issue
Block a user