[misc] lint (#9593)

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
Yaowei Zheng
2025-12-09 18:00:35 +08:00
committed by GitHub
parent 1bbb461f76
commit 5d56817e2b
23 changed files with 51 additions and 47 deletions

View File

@@ -25,7 +25,7 @@ TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3")
UNUSED_TOKEN = "<|UNUSED_TOKEN|>"
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.parametrize("special_tokens", [False, True])
def test_add_tokens(special_tokens: bool):
if special_tokens:

View File

@@ -17,13 +17,16 @@ import os
import pytest
from transformers.utils import is_flash_attn_2_available
# Compatible with Transformers v4 and Transformers v5
try:
from transformers.utils import is_torch_sdpa_available
except ImportError:
def is_torch_sdpa_available():
return True
from llamafactory.extras.packages import is_transformers_version_greater_than
from llamafactory.train.test_utils import load_infer_model
@@ -36,7 +39,7 @@ INFER_ARGS = {
}
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.xfail(is_transformers_version_greater_than("4.48"), reason="Attention refactor.")
def test_attention():
attention_available = ["disabled"]

View File

@@ -39,7 +39,7 @@ TRAIN_ARGS = {
}
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.parametrize("disable_gradient_checkpointing", [False, True])
def test_vanilla_checkpointing(disable_gradient_checkpointing: bool):
model = load_train_model(disable_gradient_checkpointing=disable_gradient_checkpointing, **TRAIN_ARGS)
@@ -47,14 +47,14 @@ def test_vanilla_checkpointing(disable_gradient_checkpointing: bool):
assert getattr(module, "gradient_checkpointing") != disable_gradient_checkpointing
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
def test_unsloth_gradient_checkpointing():
model = load_train_model(use_unsloth_gc=True, **TRAIN_ARGS)
for module in filter(lambda m: hasattr(m, "gradient_checkpointing"), model.modules()):
assert module._gradient_checkpointing_func.__self__.__name__ == "UnslothGradientCheckpointing"
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
def test_upcast_layernorm():
model = load_train_model(upcast_layernorm=True, **TRAIN_ARGS)
for name, param in model.named_parameters():
@@ -62,7 +62,7 @@ def test_upcast_layernorm():
assert param.dtype == torch.float32
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
def test_upcast_lmhead_output():
model = load_train_model(upcast_lmhead_output=True, **TRAIN_ARGS)
inputs = torch.randn((1, 16), dtype=torch.float16, device=get_current_device())

View File

@@ -24,7 +24,7 @@ from llamafactory.model.model_utils.misc import find_expanded_modules
HF_TOKEN = os.getenv("HF_TOKEN")
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
def test_expanded_modules():
config = AutoConfig.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")

View File

@@ -18,7 +18,7 @@ import torch
from llamafactory.model.model_utils.packing import get_seqlens_in_batch, get_unpad_data
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.parametrize(
"attention_mask,golden_seq_lens",
[

View File

@@ -23,7 +23,7 @@ from llamafactory.hparams import FinetuningArguments, ModelArguments
from llamafactory.model.adapter import init_adapter
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.parametrize("freeze_vision_tower", (False, True))
@pytest.mark.parametrize("freeze_multi_modal_projector", (False, True))
@pytest.mark.parametrize("freeze_language_model", (False, True))
@@ -49,7 +49,7 @@ def test_visual_full(freeze_vision_tower: bool, freeze_multi_modal_projector: bo
assert param.requires_grad != freeze_language_model
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.parametrize("freeze_vision_tower,freeze_language_model", ((False, False), (False, True), (True, False)))
def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool):
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")
@@ -82,7 +82,7 @@ def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool):
assert (merger_param_name in trainable_params) is False
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
def test_visual_model_save_load():
# check VLM's state dict: https://github.com/huggingface/transformers/pull/38385
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")

View File

@@ -29,14 +29,16 @@ INFER_ARGS = {
"infer_dtype": "float16",
}
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.skip_on_devices("npu")
def test_base():
model = load_infer_model(**INFER_ARGS)
ref_model = load_reference_model(TINY_LLAMA3)
compare_model(model, ref_model)
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.skip_on_devices("npu")
@pytest.mark.usefixtures("fix_valuehead_cpu_loading")
def test_valuehead():

View File

@@ -44,7 +44,7 @@ INFER_ARGS = {
}
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
def test_freeze_train_all_modules():
model = load_train_model(freeze_trainable_layers=1, **TRAIN_ARGS)
for name, param in model.named_parameters():
@@ -56,7 +56,7 @@ def test_freeze_train_all_modules():
assert param.dtype == torch.float16
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
def test_freeze_train_extra_modules():
model = load_train_model(freeze_trainable_layers=1, freeze_extra_modules="embed_tokens,lm_head", **TRAIN_ARGS)
for name, param in model.named_parameters():
@@ -68,7 +68,7 @@ def test_freeze_train_extra_modules():
assert param.dtype == torch.float16
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
def test_freeze_inference():
model = load_infer_model(**INFER_ARGS)
for param in model.parameters():

View File

@@ -43,14 +43,16 @@ INFER_ARGS = {
"infer_dtype": "float16",
}
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
def test_full_train():
model = load_train_model(**TRAIN_ARGS)
for param in model.parameters():
assert param.requires_grad is True
assert param.dtype == torch.float32
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
def test_full_inference():
model = load_infer_model(**INFER_ARGS)
for param in model.parameters():

View File

@@ -55,35 +55,35 @@ INFER_ARGS = {
}
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
def test_lora_train_qv_modules():
model = load_train_model(lora_target="q_proj,v_proj", **TRAIN_ARGS)
linear_modules, _ = check_lora_model(model)
assert linear_modules == {"q_proj", "v_proj"}
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
def test_lora_train_all_modules():
model = load_train_model(lora_target="all", **TRAIN_ARGS)
linear_modules, _ = check_lora_model(model)
assert linear_modules == {"q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "gate_proj", "down_proj"}
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
def test_lora_train_extra_modules():
model = load_train_model(additional_target="embed_tokens,lm_head", **TRAIN_ARGS)
_, extra_modules = check_lora_model(model)
assert extra_modules == {"embed_tokens", "lm_head"}
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
def test_lora_train_old_adapters():
model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=False, **TRAIN_ARGS)
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True)
compare_model(model, ref_model)
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
def test_lora_train_new_adapters():
model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=True, **TRAIN_ARGS)
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True)
@@ -92,7 +92,7 @@ def test_lora_train_new_adapters():
)
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.usefixtures("fix_valuehead_cpu_loading")
def test_lora_train_valuehead():
model = load_train_model(add_valuehead=True, **TRAIN_ARGS)
@@ -102,7 +102,8 @@ def test_lora_train_valuehead():
assert torch.allclose(state_dict["v_head.summary.weight"], ref_state_dict["v_head.summary.weight"])
assert torch.allclose(state_dict["v_head.summary.bias"], ref_state_dict["v_head.summary.bias"])
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.skip_on_devices("npu")
def test_lora_inference():
model = load_infer_model(**INFER_ARGS)

View File

@@ -49,14 +49,15 @@ INFER_ARGS = {
}
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.xfail(reason="PiSSA initialization is not stable in different platform.")
def test_pissa_train():
model = load_train_model(**TRAIN_ARGS)
ref_model = load_reference_model(TINY_LLAMA_PISSA, TINY_LLAMA_PISSA, use_pissa=True, is_trainable=True)
compare_model(model, ref_model)
@pytest.mark.runs_on(["cpu","npu"])
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.xfail(reason="Known connection error.")
def test_pissa_inference():
model = load_infer_model(**INFER_ARGS)