mirror of
				https://github.com/hiyouga/LLaMA-Factory.git
				synced 2025-11-04 18:02:19 +08:00 
			
		
		
		
	[deps] upgrade transformers (#8159)
This commit is contained in:
		
							parent
							
								
									501e7d8a8f
								
							
						
					
					
						commit
						ba032828e2
					
				@ -1,10 +1,10 @@
 | 
			
		||||
transformers>=4.45.0,<=4.52.1,!=4.46.*,!=4.47.*,!=4.48.0,!=4.52.0
 | 
			
		||||
transformers>=4.45.0,<=4.52.3,!=4.46.*,!=4.47.*,!=4.48.0,!=4.52.0
 | 
			
		||||
datasets>=2.16.0,<=3.6.0
 | 
			
		||||
accelerate>=0.34.0,<=1.7.0
 | 
			
		||||
peft>=0.14.0,<=0.15.2
 | 
			
		||||
trl>=0.8.6,<=0.9.6
 | 
			
		||||
tokenizers>=0.19.0,<=0.21.1
 | 
			
		||||
gradio>=4.38.0,<=5.30.0
 | 
			
		||||
gradio>=4.38.0,<=5.31.0
 | 
			
		||||
scipy
 | 
			
		||||
einops
 | 
			
		||||
sentencepiece
 | 
			
		||||
 | 
			
		||||
@ -95,7 +95,7 @@ def check_version(requirement: str, mandatory: bool = False) -> None:
 | 
			
		||||
def check_dependencies() -> None:
 | 
			
		||||
    r"""Check the version of the required packages."""
 | 
			
		||||
    check_version(
 | 
			
		||||
        "transformers>=4.45.0,<=4.52.1,!=4.46.0,!=4.46.1,!=4.46.2,!=4.46.3,!=4.47.0,!=4.47.1,!=4.48.0,!=4.52.0"
 | 
			
		||||
        "transformers>=4.45.0,<=4.52.3,!=4.46.0,!=4.46.1,!=4.46.2,!=4.46.3,!=4.47.0,!=4.47.1,!=4.48.0,!=4.52.0"
 | 
			
		||||
    )
 | 
			
		||||
    check_version("datasets>=2.16.0,<=3.6.0")
 | 
			
		||||
    check_version("accelerate>=0.34.0,<=1.7.0")
 | 
			
		||||
 | 
			
		||||
@ -163,7 +163,7 @@ def save_args(config_path: str, config_dict: dict[str, Any]) -> None:
 | 
			
		||||
 | 
			
		||||
def _clean_cmd(args: dict[str, Any]) -> dict[str, Any]:
 | 
			
		||||
    r"""Remove args with NoneType or False or empty string value."""
 | 
			
		||||
    no_skip_keys = ["packing"]
 | 
			
		||||
    no_skip_keys = ["packing", "freeze_vision_tower", "freeze_multi_modal_projector", "freeze_language_model"]
 | 
			
		||||
    return {k: v for k, v in args.items() if (k in no_skip_keys) or (v is not None and v is not False and v != "")}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -22,14 +22,13 @@ from typing import TYPE_CHECKING, Any, Optional
 | 
			
		||||
from transformers.trainer import TRAINING_ARGS_NAME
 | 
			
		||||
from transformers.utils import is_torch_npu_available
 | 
			
		||||
 | 
			
		||||
from ..extras.constants import LLAMABOARD_CONFIG, PEFT_METHODS, TRAINING_STAGES
 | 
			
		||||
from ..extras.constants import LLAMABOARD_CONFIG, MULTIMODAL_SUPPORTED_MODELS, PEFT_METHODS, TRAINING_STAGES
 | 
			
		||||
from ..extras.misc import is_accelerator_available, torch_gc, use_ray
 | 
			
		||||
from ..extras.packages import is_gradio_available
 | 
			
		||||
from .common import (
 | 
			
		||||
    DEFAULT_CACHE_DIR,
 | 
			
		||||
    DEFAULT_CONFIG_DIR,
 | 
			
		||||
    abort_process,
 | 
			
		||||
    calculate_pixels,
 | 
			
		||||
    gen_cmd,
 | 
			
		||||
    get_save_dir,
 | 
			
		||||
    load_args,
 | 
			
		||||
@ -165,13 +164,6 @@ class Runner:
 | 
			
		||||
            use_llama_pro=get("train.use_llama_pro"),
 | 
			
		||||
            enable_thinking=get("train.enable_thinking"),
 | 
			
		||||
            report_to=get("train.report_to"),
 | 
			
		||||
            freeze_vision_tower=get("train.freeze_vision_tower"),
 | 
			
		||||
            freeze_multi_modal_projector=get("train.freeze_multi_modal_projector"),
 | 
			
		||||
            freeze_language_model=get("train.freeze_language_model"),
 | 
			
		||||
            image_max_pixels=calculate_pixels(get("train.image_max_pixels")),
 | 
			
		||||
            image_min_pixels=calculate_pixels(get("train.image_min_pixels")),
 | 
			
		||||
            video_max_pixels=calculate_pixels(get("train.video_max_pixels")),
 | 
			
		||||
            video_min_pixels=calculate_pixels(get("train.video_min_pixels")),
 | 
			
		||||
            use_galore=get("train.use_galore"),
 | 
			
		||||
            use_apollo=get("train.use_apollo"),
 | 
			
		||||
            use_badam=get("train.use_badam"),
 | 
			
		||||
@ -244,6 +236,16 @@ class Runner:
 | 
			
		||||
            args["pref_ftx"] = get("train.pref_ftx")
 | 
			
		||||
            args["pref_loss"] = get("train.pref_loss")
 | 
			
		||||
 | 
			
		||||
        # multimodal config
 | 
			
		||||
        if model_name in MULTIMODAL_SUPPORTED_MODELS:
 | 
			
		||||
            args["freeze_vision_tower"] = get("train.freeze_vision_tower")
 | 
			
		||||
            args["freeze_multi_modal_projector"] = get("train.freeze_multi_modal_projector")
 | 
			
		||||
            args["freeze_language_model"] = get("train.freeze_language_model")
 | 
			
		||||
            args["image_max_pixels"] = get("train.image_max_pixels")
 | 
			
		||||
            args["image_min_pixels"] = get("train.image_min_pixels")
 | 
			
		||||
            args["video_max_pixels"] = get("train.video_max_pixels")
 | 
			
		||||
            args["video_min_pixels"] = get("train.video_min_pixels")
 | 
			
		||||
 | 
			
		||||
        # galore config
 | 
			
		||||
        if args["use_galore"]:
 | 
			
		||||
            args["galore_rank"] = get("train.galore_rank")
 | 
			
		||||
 | 
			
		||||
@ -129,23 +129,24 @@ def test_encode_multiturn(use_fast: bool):
 | 
			
		||||
@pytest.mark.parametrize("cot_messages", [True, False])
 | 
			
		||||
@pytest.mark.parametrize("enable_thinking", [True, False, None])
 | 
			
		||||
def test_reasoning_encode_oneturn(use_fast: bool, cot_messages: bool, enable_thinking: bool):
 | 
			
		||||
    input_messages = MESSAGES_WITH_THOUGHT if cot_messages else MESSAGES
 | 
			
		||||
    tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B", use_fast=use_fast)
 | 
			
		||||
    data_args = DataArguments(template="qwen3", enable_thinking=enable_thinking)
 | 
			
		||||
    template = get_template_and_fix_tokenizer(tokenizer, data_args)
 | 
			
		||||
    prompt_ids, answer_ids = template.encode_oneturn(tokenizer, input_messages)
 | 
			
		||||
    output_messages = MESSAGES if enable_thinking is False else input_messages
 | 
			
		||||
    prompt_ids, answer_ids = template.encode_oneturn(tokenizer, MESSAGES_WITH_THOUGHT if cot_messages else MESSAGES)
 | 
			
		||||
 | 
			
		||||
    prompt_str = (
 | 
			
		||||
        f"<|im_start|>user\n{output_messages[0]['content']}<|im_end|>\n<|im_start|>assistant\n"
 | 
			
		||||
        f"<|im_start|>user\n{MESSAGES[0]['content']}<|im_end|>\n<|im_start|>assistant\n"
 | 
			
		||||
        f"{MESSAGES[1]['content']}<|im_end|>\n"
 | 
			
		||||
        f"<|im_start|>user\n{output_messages[2]['content']}<|im_end|>\n<|im_start|>assistant\n"
 | 
			
		||||
        f"<|im_start|>user\n{MESSAGES[2]['content']}<|im_end|>\n<|im_start|>assistant\n"
 | 
			
		||||
    )
 | 
			
		||||
    answer_str = f"{output_messages[3]['content']}<|im_end|>\n"
 | 
			
		||||
    if not cot_messages or enable_thinking is False:
 | 
			
		||||
        answer_str = f"{MESSAGES[3]['content']}<|im_end|>\n"
 | 
			
		||||
        if enable_thinking:
 | 
			
		||||
            answer_str = "<think>\n\n</think>\n\n" + answer_str
 | 
			
		||||
        else:
 | 
			
		||||
            prompt_str = prompt_str + "<think>\n\n</think>\n\n"
 | 
			
		||||
    else:
 | 
			
		||||
        answer_str = f"{MESSAGES_WITH_THOUGHT[3]['content']}<|im_end|>\n"
 | 
			
		||||
 | 
			
		||||
    _check_tokenization(tokenizer, (prompt_ids, answer_ids), (prompt_str, answer_str))
 | 
			
		||||
 | 
			
		||||
@ -154,16 +155,16 @@ def test_reasoning_encode_oneturn(use_fast: bool, cot_messages: bool, enable_thi
 | 
			
		||||
@pytest.mark.parametrize("cot_messages", [True, False])
 | 
			
		||||
@pytest.mark.parametrize("enable_thinking", [True, False, None])
 | 
			
		||||
def test_reasoning_encode_multiturn(use_fast: bool, cot_messages: bool, enable_thinking: bool):
 | 
			
		||||
    input_messages = MESSAGES_WITH_THOUGHT if cot_messages else MESSAGES
 | 
			
		||||
    tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B", use_fast=use_fast)
 | 
			
		||||
    data_args = DataArguments(template="qwen3", enable_thinking=enable_thinking)
 | 
			
		||||
    template = get_template_and_fix_tokenizer(tokenizer, data_args)
 | 
			
		||||
    encoded_pairs = template.encode_multiturn(tokenizer, input_messages)
 | 
			
		||||
    output_messages = MESSAGES if enable_thinking is False else input_messages
 | 
			
		||||
    prompt_str_1 = f"<|im_start|>user\n{output_messages[0]['content']}<|im_end|>\n<|im_start|>assistant\n"
 | 
			
		||||
    answer_str_1 = f"{output_messages[1]['content']}<|im_end|>\n"
 | 
			
		||||
    prompt_str_2 = f"<|im_start|>user\n{output_messages[2]['content']}<|im_end|>\n<|im_start|>assistant\n"
 | 
			
		||||
    answer_str_2 = f"{output_messages[3]['content']}<|im_end|>\n"
 | 
			
		||||
    encoded_pairs = template.encode_multiturn(tokenizer, MESSAGES_WITH_THOUGHT if cot_messages else MESSAGES)
 | 
			
		||||
 | 
			
		||||
    messages = MESSAGES if not cot_messages or enable_thinking is False else MESSAGES_WITH_THOUGHT
 | 
			
		||||
    prompt_str_1 = f"<|im_start|>user\n{MESSAGES[0]['content']}<|im_end|>\n<|im_start|>assistant\n"
 | 
			
		||||
    answer_str_1 = f"{messages[1]['content']}<|im_end|>\n"
 | 
			
		||||
    prompt_str_2 = f"<|im_start|>user\n{MESSAGES[2]['content']}<|im_end|>\n<|im_start|>assistant\n"
 | 
			
		||||
    answer_str_2 = f"{messages[3]['content']}<|im_end|>\n"
 | 
			
		||||
    if not cot_messages or enable_thinking is False:
 | 
			
		||||
        if enable_thinking:
 | 
			
		||||
            answer_str_1 = "<think>\n\n</think>\n\n" + answer_str_1
 | 
			
		||||
@ -253,7 +254,11 @@ def test_llama4_template(use_fast: bool):
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@pytest.mark.parametrize(
 | 
			
		||||
    "use_fast", [True, pytest.param(False, marks=pytest.mark.xfail(reason="Phi-4 slow tokenizer is broken."))]
 | 
			
		||||
    "use_fast",
 | 
			
		||||
    [
 | 
			
		||||
        pytest.param(True, marks=pytest.mark.xfail(not HF_TOKEN, reason="Authorization.")),
 | 
			
		||||
        pytest.param(False, marks=pytest.mark.xfail(reason="Phi-4 slow tokenizer is broken.")),
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
def test_phi4_template(use_fast: bool):
 | 
			
		||||
    prompt_str = (
 | 
			
		||||
@ -266,6 +271,7 @@ def test_phi4_template(use_fast: bool):
 | 
			
		||||
    _check_template("microsoft/phi-4", "phi4", prompt_str, answer_str, use_fast)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@pytest.mark.xfail(not HF_TOKEN, reason="Authorization.")
 | 
			
		||||
@pytest.mark.parametrize("use_fast", [True, False])
 | 
			
		||||
def test_qwen2_5_template(use_fast: bool):
 | 
			
		||||
    prompt_str = (
 | 
			
		||||
@ -282,16 +288,18 @@ def test_qwen2_5_template(use_fast: bool):
 | 
			
		||||
@pytest.mark.parametrize("use_fast", [True, False])
 | 
			
		||||
@pytest.mark.parametrize("cot_messages", [True, False])
 | 
			
		||||
def test_qwen3_template(use_fast: bool, cot_messages: bool):
 | 
			
		||||
    messages = MESSAGES_WITH_THOUGHT if cot_messages else MESSAGES
 | 
			
		||||
    prompt_str = (
 | 
			
		||||
        f"<|im_start|>user\n{messages[0]['content']}<|im_end|>\n"
 | 
			
		||||
        f"<|im_start|>user\n{MESSAGES[0]['content']}<|im_end|>\n"
 | 
			
		||||
        f"<|im_start|>assistant\n{MESSAGES[1]['content']}<|im_end|>\n"
 | 
			
		||||
        f"<|im_start|>user\n{messages[2]['content']}<|im_end|>\n"
 | 
			
		||||
        f"<|im_start|>user\n{MESSAGES[2]['content']}<|im_end|>\n"
 | 
			
		||||
        "<|im_start|>assistant\n"
 | 
			
		||||
    )
 | 
			
		||||
    answer_str = f"{messages[3]['content']}<|im_end|>\n"
 | 
			
		||||
    if not cot_messages:
 | 
			
		||||
        answer_str = "<think>\n\n</think>\n\n" + answer_str
 | 
			
		||||
        answer_str = f"<think>\n\n</think>\n\n{MESSAGES[3]['content']}<|im_end|>\n"
 | 
			
		||||
        messages = MESSAGES
 | 
			
		||||
    else:
 | 
			
		||||
        answer_str = f"{MESSAGES_WITH_THOUGHT[3]['content']}<|im_end|>\n"
 | 
			
		||||
        messages = MESSAGES_WITH_THOUGHT
 | 
			
		||||
 | 
			
		||||
    _check_template("Qwen/Qwen3-8B", "qwen3", prompt_str, answer_str, use_fast, messages=messages)
 | 
			
		||||
 | 
			
		||||
@ -309,6 +317,7 @@ def test_parse_llama3_template():
 | 
			
		||||
    assert template.default_system == ""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@pytest.mark.xfail(not HF_TOKEN, reason="Authorization.")
 | 
			
		||||
def test_parse_qwen_template():
 | 
			
		||||
    tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
 | 
			
		||||
    template = parse_template(tokenizer)
 | 
			
		||||
@ -320,6 +329,7 @@ def test_parse_qwen_template():
 | 
			
		||||
    assert template.default_system == "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@pytest.mark.xfail(not HF_TOKEN, reason="Authorization.")
 | 
			
		||||
def test_parse_qwen3_template():
 | 
			
		||||
    tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B", token=HF_TOKEN)
 | 
			
		||||
    template = parse_template(tokenizer)
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user