From 5d56817e2bdae02d6358510fd9e83a8d81655a86 Mon Sep 17 00:00:00 2001 From: Yaowei Zheng Date: Tue, 9 Dec 2025 18:00:35 +0800 Subject: [PATCH] [misc] lint (#9593) Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- .github/workflows/tests_npu.yml | 2 +- examples/accelerate/fsdp2_config.yaml | 2 +- examples/ascend/qwen3vlmoe_lora_sft_fsdp.yaml | 2 +- .../model/model_utils/checkpointing.py | 8 +++----- src/llamafactory/train/sft/workflow.py | 3 ++- src/llamafactory/v1/extras/__init__.py | 1 - tests/conftest.py | 10 +++------- tests/data/processor/test_feedback.py | 2 +- tests/data/test_template.py | 1 - tests/e2e/test_train.py | 1 + tests/eval/test_eval_template.py | 1 + tests/model/model_utils/test_add_tokens.py | 2 +- tests/model/model_utils/test_attention.py | 5 ++++- tests/model/model_utils/test_checkpointing.py | 8 ++++---- tests/model/model_utils/test_misc.py | 2 +- tests/model/model_utils/test_packing.py | 2 +- tests/model/model_utils/test_visual.py | 6 +++--- tests/model/test_base.py | 6 ++++-- tests/model/test_freeze.py | 6 +++--- tests/model/test_full.py | 6 ++++-- tests/model/test_lora.py | 15 ++++++++------- tests/model/test_pissa.py | 5 +++-- tests/train/test_sft_trainer.py | 2 +- 23 files changed, 51 insertions(+), 47 deletions(-) diff --git a/.github/workflows/tests_npu.yml b/.github/workflows/tests_npu.yml index 316ed2f7..38ea5a24 100644 --- a/.github/workflows/tests_npu.yml +++ b/.github/workflows/tests_npu.yml @@ -84,4 +84,4 @@ jobs: make test env: HF_HOME: /root/.cache/huggingface - HF_HUB_OFFLINE: "${{ steps.hf-hub-cache.outputs.cache-hit == 'true' && '1' || '0' }}" \ No newline at end of file + HF_HUB_OFFLINE: "${{ steps.hf-hub-cache.outputs.cache-hit == 'true' && '1' || '0' }}" diff --git a/examples/accelerate/fsdp2_config.yaml b/examples/accelerate/fsdp2_config.yaml index 5ea46683..34348fdf 100644 --- a/examples/accelerate/fsdp2_config.yaml +++ b/examples/accelerate/fsdp2_config.yaml @@ -19,4 +19,4 @@ same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false -use_cpu: false \ No newline at end of file +use_cpu: false diff --git a/examples/ascend/qwen3vlmoe_lora_sft_fsdp.yaml b/examples/ascend/qwen3vlmoe_lora_sft_fsdp.yaml index 41540fb6..b689fa02 100644 --- a/examples/ascend/qwen3vlmoe_lora_sft_fsdp.yaml +++ b/examples/ascend/qwen3vlmoe_lora_sft_fsdp.yaml @@ -39,4 +39,4 @@ warmup_ratio: 0.1 bf16: true ddp_timeout: 180000000 resume_from_checkpoint: null -seed: 1234 \ No newline at end of file +seed: 1234 diff --git a/src/llamafactory/model/model_utils/checkpointing.py b/src/llamafactory/model/model_utils/checkpointing.py index bdd06e7d..3e8341c1 100644 --- a/src/llamafactory/model/model_utils/checkpointing.py +++ b/src/llamafactory/model/model_utils/checkpointing.py @@ -18,8 +18,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os import inspect +import os from functools import WRAPPER_ASSIGNMENTS, partial, wraps from types import MethodType from typing import TYPE_CHECKING, Any, Callable, Optional, Union @@ -156,11 +156,9 @@ def prepare_model_for_training(model: "PreTrainedModel", model_args: "ModelArgum if ( os.environ.get("ACCELERATE_USE_FSDP", "false").lower() == "true" and int(os.environ.get("FSDP_VERSION", "1")) == 2 - ): + ): model_args.use_reentrant_gc = False - logger.warning_rank0( - "You are using fsdp2, `use_reentrant_gc` has been set to False. " - ) + logger.warning_rank0("You are using fsdp2, `use_reentrant_gc` has been set to False.") if not model_args.disable_gradient_checkpointing: if not getattr(model, "supports_gradient_checkpointing", False): diff --git a/src/llamafactory/train/sft/workflow.py b/src/llamafactory/train/sft/workflow.py index c5470258..52e0fd12 100644 --- a/src/llamafactory/train/sft/workflow.py +++ b/src/llamafactory/train/sft/workflow.py @@ -28,6 +28,7 @@ from ..trainer_utils import create_modelcard_and_push from .metric import ComputeAccuracy, ComputeSimilarity, eval_logit_processor from .trainer import CustomSeq2SeqTrainer + if TYPE_CHECKING: from transformers import Seq2SeqTrainingArguments, TrainerCallback @@ -144,4 +145,4 @@ def run_sft( trainer.save_predictions(dataset_module["eval_dataset"], predict_results, generating_args.skip_special_tokens) # Create model card - create_modelcard_and_push(trainer, model_args, data_args, training_args, finetuning_args) \ No newline at end of file + create_modelcard_and_push(trainer, model_args, data_args, training_args, finetuning_args) diff --git a/src/llamafactory/v1/extras/__init__.py b/src/llamafactory/v1/extras/__init__.py index 8b137891..e69de29b 100644 --- a/src/llamafactory/v1/extras/__init__.py +++ b/src/llamafactory/v1/extras/__init__.py @@ -1 +0,0 @@ - diff --git a/tests/conftest.py b/tests/conftest.py index ddcaf22f..ab436b5d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -40,9 +40,7 @@ def pytest_configure(config): config.addinivalue_line( "markers", "require_device: test requires specific device, e.g., @pytest.mark.require_device('cuda')" ) - config.addinivalue_line( - "markers", "runs_on: test requires specific device, e.g., @pytest.mark.runs_on(['cpu'])" - ) + config.addinivalue_line("markers", "runs_on: test requires specific device, e.g., @pytest.mark.runs_on(['cpu'])") def _handle_runs_on(items): @@ -64,14 +62,12 @@ def _handle_runs_on(items): if isinstance(runs_on_devices, str): runs_on_devices = [runs_on_devices] - if CURRENT_DEVICE not in runs_on_devices: item.add_marker( - pytest.mark.skip( - reason=f"test requires one of {runs_on_devices} (current: {CURRENT_DEVICE})" - ) + pytest.mark.skip(reason=f"test requires one of {runs_on_devices} (current: {CURRENT_DEVICE})") ) + def _handle_slow_tests(items): """Skip slow tests unless RUN_SLOW environment variable is set. diff --git a/tests/data/processor/test_feedback.py b/tests/data/processor/test_feedback.py index 73b06675..34ccba06 100644 --- a/tests/data/processor/test_feedback.py +++ b/tests/data/processor/test_feedback.py @@ -42,7 +42,7 @@ TRAIN_ARGS = { } -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.parametrize("num_samples", [16]) def test_feedback_data(num_samples: int): train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"] diff --git a/tests/data/test_template.py b/tests/data/test_template.py index dc510172..8a6ea3ea 100644 --- a/tests/data/test_template.py +++ b/tests/data/test_template.py @@ -284,7 +284,6 @@ def test_llama4_template(use_fast: bool): pytest.param(False, marks=pytest.mark.xfail(reason="Phi-4 slow tokenizer is broken.")), ], ) - @pytest.mark.runs_on(["cpu"]) def test_phi4_template(use_fast: bool): prompt_str = ( diff --git a/tests/e2e/test_train.py b/tests/e2e/test_train.py index 0a2a1cba..c405dc97 100644 --- a/tests/e2e/test_train.py +++ b/tests/e2e/test_train.py @@ -48,6 +48,7 @@ INFER_ARGS = { OS_NAME = os.getenv("OS_NAME", "") + @pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize( "stage,dataset", diff --git a/tests/eval/test_eval_template.py b/tests/eval/test_eval_template.py index 6f61cc2d..91ad869a 100644 --- a/tests/eval/test_eval_template.py +++ b/tests/eval/test_eval_template.py @@ -55,6 +55,7 @@ def test_eval_template_en(): {"role": "assistant", "content": "C"}, ] + @pytest.mark.runs_on(["cpu"]) def test_eval_template_zh(): support_set = [ diff --git a/tests/model/model_utils/test_add_tokens.py b/tests/model/model_utils/test_add_tokens.py index 4710819a..187ca5f5 100644 --- a/tests/model/model_utils/test_add_tokens.py +++ b/tests/model/model_utils/test_add_tokens.py @@ -25,7 +25,7 @@ TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") UNUSED_TOKEN = "<|UNUSED_TOKEN|>" -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.parametrize("special_tokens", [False, True]) def test_add_tokens(special_tokens: bool): if special_tokens: diff --git a/tests/model/model_utils/test_attention.py b/tests/model/model_utils/test_attention.py index 6bf51639..02742512 100644 --- a/tests/model/model_utils/test_attention.py +++ b/tests/model/model_utils/test_attention.py @@ -17,13 +17,16 @@ import os import pytest from transformers.utils import is_flash_attn_2_available + # Compatible with Transformers v4 and Transformers v5 try: from transformers.utils import is_torch_sdpa_available except ImportError: + def is_torch_sdpa_available(): return True + from llamafactory.extras.packages import is_transformers_version_greater_than from llamafactory.train.test_utils import load_infer_model @@ -36,7 +39,7 @@ INFER_ARGS = { } -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.xfail(is_transformers_version_greater_than("4.48"), reason="Attention refactor.") def test_attention(): attention_available = ["disabled"] diff --git a/tests/model/model_utils/test_checkpointing.py b/tests/model/model_utils/test_checkpointing.py index 63df0730..45faa178 100644 --- a/tests/model/model_utils/test_checkpointing.py +++ b/tests/model/model_utils/test_checkpointing.py @@ -39,7 +39,7 @@ TRAIN_ARGS = { } -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.parametrize("disable_gradient_checkpointing", [False, True]) def test_vanilla_checkpointing(disable_gradient_checkpointing: bool): model = load_train_model(disable_gradient_checkpointing=disable_gradient_checkpointing, **TRAIN_ARGS) @@ -47,14 +47,14 @@ def test_vanilla_checkpointing(disable_gradient_checkpointing: bool): assert getattr(module, "gradient_checkpointing") != disable_gradient_checkpointing -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) def test_unsloth_gradient_checkpointing(): model = load_train_model(use_unsloth_gc=True, **TRAIN_ARGS) for module in filter(lambda m: hasattr(m, "gradient_checkpointing"), model.modules()): assert module._gradient_checkpointing_func.__self__.__name__ == "UnslothGradientCheckpointing" -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) def test_upcast_layernorm(): model = load_train_model(upcast_layernorm=True, **TRAIN_ARGS) for name, param in model.named_parameters(): @@ -62,7 +62,7 @@ def test_upcast_layernorm(): assert param.dtype == torch.float32 -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) def test_upcast_lmhead_output(): model = load_train_model(upcast_lmhead_output=True, **TRAIN_ARGS) inputs = torch.randn((1, 16), dtype=torch.float16, device=get_current_device()) diff --git a/tests/model/model_utils/test_misc.py b/tests/model/model_utils/test_misc.py index 537ae4f1..1cb94a95 100644 --- a/tests/model/model_utils/test_misc.py +++ b/tests/model/model_utils/test_misc.py @@ -24,7 +24,7 @@ from llamafactory.model.model_utils.misc import find_expanded_modules HF_TOKEN = os.getenv("HF_TOKEN") -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.skipif(not HF_TOKEN, reason="Gated model.") def test_expanded_modules(): config = AutoConfig.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct") diff --git a/tests/model/model_utils/test_packing.py b/tests/model/model_utils/test_packing.py index 6dde0751..c76611a8 100644 --- a/tests/model/model_utils/test_packing.py +++ b/tests/model/model_utils/test_packing.py @@ -18,7 +18,7 @@ import torch from llamafactory.model.model_utils.packing import get_seqlens_in_batch, get_unpad_data -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.parametrize( "attention_mask,golden_seq_lens", [ diff --git a/tests/model/model_utils/test_visual.py b/tests/model/model_utils/test_visual.py index 703bbb7f..f3ddd69f 100644 --- a/tests/model/model_utils/test_visual.py +++ b/tests/model/model_utils/test_visual.py @@ -23,7 +23,7 @@ from llamafactory.hparams import FinetuningArguments, ModelArguments from llamafactory.model.adapter import init_adapter -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.parametrize("freeze_vision_tower", (False, True)) @pytest.mark.parametrize("freeze_multi_modal_projector", (False, True)) @pytest.mark.parametrize("freeze_language_model", (False, True)) @@ -49,7 +49,7 @@ def test_visual_full(freeze_vision_tower: bool, freeze_multi_modal_projector: bo assert param.requires_grad != freeze_language_model -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.parametrize("freeze_vision_tower,freeze_language_model", ((False, False), (False, True), (True, False))) def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool): model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct") @@ -82,7 +82,7 @@ def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool): assert (merger_param_name in trainable_params) is False -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) def test_visual_model_save_load(): # check VLM's state dict: https://github.com/huggingface/transformers/pull/38385 model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct") diff --git a/tests/model/test_base.py b/tests/model/test_base.py index 382bea2f..e40f84fe 100644 --- a/tests/model/test_base.py +++ b/tests/model/test_base.py @@ -29,14 +29,16 @@ INFER_ARGS = { "infer_dtype": "float16", } -@pytest.mark.runs_on(["cpu","npu"]) + +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.skip_on_devices("npu") def test_base(): model = load_infer_model(**INFER_ARGS) ref_model = load_reference_model(TINY_LLAMA3) compare_model(model, ref_model) -@pytest.mark.runs_on(["cpu","npu"]) + +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.skip_on_devices("npu") @pytest.mark.usefixtures("fix_valuehead_cpu_loading") def test_valuehead(): diff --git a/tests/model/test_freeze.py b/tests/model/test_freeze.py index 9d39ded1..76d12abb 100644 --- a/tests/model/test_freeze.py +++ b/tests/model/test_freeze.py @@ -44,7 +44,7 @@ INFER_ARGS = { } -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) def test_freeze_train_all_modules(): model = load_train_model(freeze_trainable_layers=1, **TRAIN_ARGS) for name, param in model.named_parameters(): @@ -56,7 +56,7 @@ def test_freeze_train_all_modules(): assert param.dtype == torch.float16 -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) def test_freeze_train_extra_modules(): model = load_train_model(freeze_trainable_layers=1, freeze_extra_modules="embed_tokens,lm_head", **TRAIN_ARGS) for name, param in model.named_parameters(): @@ -68,7 +68,7 @@ def test_freeze_train_extra_modules(): assert param.dtype == torch.float16 -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) def test_freeze_inference(): model = load_infer_model(**INFER_ARGS) for param in model.parameters(): diff --git a/tests/model/test_full.py b/tests/model/test_full.py index 3f55f1a0..578b20b4 100644 --- a/tests/model/test_full.py +++ b/tests/model/test_full.py @@ -43,14 +43,16 @@ INFER_ARGS = { "infer_dtype": "float16", } -@pytest.mark.runs_on(["cpu","npu"]) + +@pytest.mark.runs_on(["cpu", "npu"]) def test_full_train(): model = load_train_model(**TRAIN_ARGS) for param in model.parameters(): assert param.requires_grad is True assert param.dtype == torch.float32 -@pytest.mark.runs_on(["cpu","npu"]) + +@pytest.mark.runs_on(["cpu", "npu"]) def test_full_inference(): model = load_infer_model(**INFER_ARGS) for param in model.parameters(): diff --git a/tests/model/test_lora.py b/tests/model/test_lora.py index 3a855391..3b238edb 100644 --- a/tests/model/test_lora.py +++ b/tests/model/test_lora.py @@ -55,35 +55,35 @@ INFER_ARGS = { } -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) def test_lora_train_qv_modules(): model = load_train_model(lora_target="q_proj,v_proj", **TRAIN_ARGS) linear_modules, _ = check_lora_model(model) assert linear_modules == {"q_proj", "v_proj"} -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) def test_lora_train_all_modules(): model = load_train_model(lora_target="all", **TRAIN_ARGS) linear_modules, _ = check_lora_model(model) assert linear_modules == {"q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "gate_proj", "down_proj"} -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) def test_lora_train_extra_modules(): model = load_train_model(additional_target="embed_tokens,lm_head", **TRAIN_ARGS) _, extra_modules = check_lora_model(model) assert extra_modules == {"embed_tokens", "lm_head"} -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) def test_lora_train_old_adapters(): model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=False, **TRAIN_ARGS) ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True) compare_model(model, ref_model) -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) def test_lora_train_new_adapters(): model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=True, **TRAIN_ARGS) ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True) @@ -92,7 +92,7 @@ def test_lora_train_new_adapters(): ) -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.usefixtures("fix_valuehead_cpu_loading") def test_lora_train_valuehead(): model = load_train_model(add_valuehead=True, **TRAIN_ARGS) @@ -102,7 +102,8 @@ def test_lora_train_valuehead(): assert torch.allclose(state_dict["v_head.summary.weight"], ref_state_dict["v_head.summary.weight"]) assert torch.allclose(state_dict["v_head.summary.bias"], ref_state_dict["v_head.summary.bias"]) -@pytest.mark.runs_on(["cpu","npu"]) + +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.skip_on_devices("npu") def test_lora_inference(): model = load_infer_model(**INFER_ARGS) diff --git a/tests/model/test_pissa.py b/tests/model/test_pissa.py index 6b830290..f82a085a 100644 --- a/tests/model/test_pissa.py +++ b/tests/model/test_pissa.py @@ -49,14 +49,15 @@ INFER_ARGS = { } -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.xfail(reason="PiSSA initialization is not stable in different platform.") def test_pissa_train(): model = load_train_model(**TRAIN_ARGS) ref_model = load_reference_model(TINY_LLAMA_PISSA, TINY_LLAMA_PISSA, use_pissa=True, is_trainable=True) compare_model(model, ref_model) -@pytest.mark.runs_on(["cpu","npu"]) + +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.xfail(reason="Known connection error.") def test_pissa_inference(): model = load_infer_model(**INFER_ARGS) diff --git a/tests/train/test_sft_trainer.py b/tests/train/test_sft_trainer.py index 1dd2c0e6..4b534d16 100644 --- a/tests/train/test_sft_trainer.py +++ b/tests/train/test_sft_trainer.py @@ -59,7 +59,7 @@ class DataCollatorWithVerbose(DataCollatorWithPadding): return {k: v[:, :1] for k, v in batch.items()} # truncate input length -@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.runs_on(["cpu", "npu"]) @pytest.mark.parametrize("disable_shuffling", [False, True]) def test_shuffle(disable_shuffling: bool): model_args, data_args, training_args, finetuning_args, _ = get_train_args(