[test] add allreduce test on npu (#9619)

Co-authored-by: frozenleaves <frozen@Mac.local>
This commit is contained in:
浮梦
2025-12-16 21:33:30 +08:00
committed by GitHub
parent a0179772ab
commit 18c21bce5a
20 changed files with 419 additions and 70 deletions

View File

@@ -17,15 +17,18 @@
Contains shared fixtures, pytest configuration, and custom markers.
"""
import os
import pytest
from pytest import Config, Item
from llamafactory.extras.misc import get_current_device, is_env_enabled
from llamafactory.extras.misc import get_current_device, get_device_count, is_env_enabled
from llamafactory.extras.packages import is_transformers_version_greater_than
from llamafactory.train.test_utils import patch_valuehead_model
try:
CURRENT_DEVICE = get_current_device().type
CURRENT_DEVICE = get_current_device().type # cpu | cuda | npu
except Exception:
CURRENT_DEVICE = "cpu"
@@ -33,46 +36,36 @@ except Exception:
def pytest_configure(config: Config):
"""Register custom pytest markers."""
config.addinivalue_line(
"markers", "slow: marks tests as slow (deselect with '-m \"not slow\"' or set RUN_SLOW=1 to run)"
"markers",
"slow: marks tests as slow (deselect with '-m \"not slow\"' or set RUN_SLOW=1 to run)",
)
config.addinivalue_line(
"markers",
"runs_on: test requires specific device type, e.g., @pytest.mark.runs_on(['cuda'])",
)
config.addinivalue_line(
"markers",
"require_distributed(num_devices): allow multi-device execution (default: 2)",
)
config.addinivalue_line("markers", "runs_on: test requires specific device, e.g., @pytest.mark.runs_on(['cpu'])")
def _handle_runs_on(items: list[Item]):
"""Skip tests on specified devices based on runs_on marker.
Usage:
# Skip tests on specified devices
@pytest.mark.runs_on(['cpu'])
def test_something():
pass
"""
"""Skip tests on specified device TYPES (cpu/cuda/npu)."""
for item in items:
runs_on_marker = item.get_closest_marker("runs_on")
if runs_on_marker:
runs_on_devices = runs_on_marker.args[0]
marker = item.get_closest_marker("runs_on")
if not marker:
continue
# Compatibility handling: Allow a single string instead of a list
# Example: @pytest.mark.("cpu")
if isinstance(runs_on_devices, str):
runs_on_devices = [runs_on_devices]
devices = marker.args[0]
if isinstance(devices, str):
devices = [devices]
if CURRENT_DEVICE not in runs_on_devices:
item.add_marker(
pytest.mark.skip(reason=f"test requires one of {runs_on_devices} (current: {CURRENT_DEVICE})")
)
if CURRENT_DEVICE not in devices:
item.add_marker(pytest.mark.skip(reason=f"test requires one of {devices} (current: {CURRENT_DEVICE})"))
def _handle_slow_tests(items: list[Item]):
"""Skip slow tests unless RUN_SLOW environment variable is set.
Usage:
# Skip slow tests (default)
@pytest.mark.slow
# Run slow tests
RUN_SLOW=1 pytest tests/
"""
"""Skip slow tests unless RUN_SLOW is enabled."""
if not is_env_enabled("RUN_SLOW", "0"):
skip_slow = pytest.mark.skip(reason="slow test (set RUN_SLOW=1 to run)")
for item in items:
@@ -80,10 +73,82 @@ def _handle_slow_tests(items: list[Item]):
item.add_marker(skip_slow)
def _get_visible_devices_env():
"""Return device visibility env var name."""
if CURRENT_DEVICE == "cuda":
return "CUDA_VISIBLE_DEVICES"
if CURRENT_DEVICE == "npu":
return "ASCEND_RT_VISIBLE_DEVICES"
return None
def _handle_device_visibility(items: list[Item]):
"""Handle device visibility based on test markers."""
env_key = _get_visible_devices_env()
if env_key is None or CURRENT_DEVICE == "cpu":
return
# Parse visible devices
visible_devices_env = os.environ.get(env_key)
if visible_devices_env is None:
available = get_device_count()
else:
visible_devices = [v for v in visible_devices_env.split(",") if v != ""]
available = len(visible_devices)
for item in items:
marker = item.get_closest_marker("require_distributed")
if not marker:
continue
required = marker.args[0] if marker.args else 2
if available < required:
item.add_marker(pytest.mark.skip(reason=f"test requires {required} devices, but only {available} visible"))
def pytest_collection_modifyitems(config: Config, items: list[Item]):
"""Modify test collection based on markers and environment."""
# Handle version compatibility (from HEAD)
if not is_transformers_version_greater_than("4.57.0"):
skip_bc = pytest.mark.skip(reason="Skip backward compatibility tests")
for item in items:
if "tests_v1" in str(item.fspath):
item.add_marker(skip_bc)
_handle_slow_tests(items)
_handle_runs_on(items)
_handle_device_visibility(items)
@pytest.fixture(autouse=True)
def _manage_distributed_env(request, monkeypatch):
"""Set environment variables for distributed tests if specific devices are requested."""
env_key = _get_visible_devices_env()
if not env_key:
return
# Save old environment for logic checks, monkeypatch handles restoration
old_value = os.environ.get(env_key)
marker = request.node.get_closest_marker("require_distributed")
if marker:
# Distributed test
required = marker.args[0] if marker.args else 2
specific_devices = marker.args[1] if len(marker.args) > 1 else None
if specific_devices:
devices_str = ",".join(map(str, specific_devices))
else:
devices_str = ",".join(str(i) for i in range(required))
monkeypatch.setenv(env_key, devices_str)
else:
# Non-distributed test
if old_value:
visible_devices = [v for v in old_value.split(",") if v != ""]
monkeypatch.setenv(env_key, visible_devices[0] if visible_devices else "0")
else:
monkeypatch.setenv(env_key, "0")
@pytest.fixture

View File

@@ -42,7 +42,7 @@ TRAIN_ARGS = {
}
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
@pytest.mark.parametrize("num_samples", [16])
def test_feedback_data(num_samples: int):
train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"]

View File

@@ -25,7 +25,7 @@ TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3")
UNUSED_TOKEN = "<|UNUSED_TOKEN|>"
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
@pytest.mark.parametrize("special_tokens", [False, True])
def test_add_tokens(special_tokens: bool):
if special_tokens:

View File

@@ -39,7 +39,7 @@ INFER_ARGS = {
}
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
@pytest.mark.xfail(is_transformers_version_greater_than("4.48"), reason="Attention refactor.")
def test_attention():
attention_available = ["disabled"]

View File

@@ -39,7 +39,7 @@ TRAIN_ARGS = {
}
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
@pytest.mark.parametrize("disable_gradient_checkpointing", [False, True])
def test_vanilla_checkpointing(disable_gradient_checkpointing: bool):
model = load_train_model(disable_gradient_checkpointing=disable_gradient_checkpointing, **TRAIN_ARGS)
@@ -47,14 +47,14 @@ def test_vanilla_checkpointing(disable_gradient_checkpointing: bool):
assert getattr(module, "gradient_checkpointing") != disable_gradient_checkpointing
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_unsloth_gradient_checkpointing():
model = load_train_model(use_unsloth_gc=True, **TRAIN_ARGS)
for module in filter(lambda m: hasattr(m, "gradient_checkpointing"), model.modules()):
assert module._gradient_checkpointing_func.__self__.__name__ == "UnslothGradientCheckpointing"
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_upcast_layernorm():
model = load_train_model(upcast_layernorm=True, **TRAIN_ARGS)
for name, param in model.named_parameters():
@@ -62,7 +62,7 @@ def test_upcast_layernorm():
assert param.dtype == torch.float32
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_upcast_lmhead_output():
model = load_train_model(upcast_lmhead_output=True, **TRAIN_ARGS)
inputs = torch.randn((1, 16), dtype=torch.float16, device=get_current_device())

View File

@@ -24,7 +24,7 @@ from llamafactory.model.model_utils.misc import find_expanded_modules
HF_TOKEN = os.getenv("HF_TOKEN")
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
def test_expanded_modules():
config = AutoConfig.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")

View File

@@ -18,7 +18,7 @@ import torch
from llamafactory.model.model_utils.packing import get_seqlens_in_batch, get_unpad_data
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
@pytest.mark.parametrize(
"attention_mask,golden_seq_lens",
[

View File

@@ -23,7 +23,7 @@ from llamafactory.hparams import FinetuningArguments, ModelArguments
from llamafactory.model.adapter import init_adapter
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
@pytest.mark.parametrize("freeze_vision_tower", (False, True))
@pytest.mark.parametrize("freeze_multi_modal_projector", (False, True))
@pytest.mark.parametrize("freeze_language_model", (False, True))
@@ -49,7 +49,7 @@ def test_visual_full(freeze_vision_tower: bool, freeze_multi_modal_projector: bo
assert param.requires_grad != freeze_language_model
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
@pytest.mark.parametrize("freeze_vision_tower,freeze_language_model", ((False, False), (False, True), (True, False)))
def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool):
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")
@@ -82,7 +82,7 @@ def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool):
assert (merger_param_name in trainable_params) is False
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_visual_model_save_load():
# check VLM's state dict: https://github.com/huggingface/transformers/pull/38385
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")

View File

@@ -30,7 +30,7 @@ INFER_ARGS = {
}
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_base():
model = load_infer_model(**INFER_ARGS)
ref_model = load_reference_model(TINY_LLAMA3)

View File

@@ -44,7 +44,7 @@ INFER_ARGS = {
}
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_freeze_train_all_modules():
model = load_train_model(freeze_trainable_layers=1, **TRAIN_ARGS)
for name, param in model.named_parameters():
@@ -56,7 +56,7 @@ def test_freeze_train_all_modules():
assert param.dtype == torch.float16
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_freeze_train_extra_modules():
model = load_train_model(freeze_trainable_layers=1, freeze_extra_modules="embed_tokens,lm_head", **TRAIN_ARGS)
for name, param in model.named_parameters():
@@ -68,7 +68,7 @@ def test_freeze_train_extra_modules():
assert param.dtype == torch.float16
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_freeze_inference():
model = load_infer_model(**INFER_ARGS)
for param in model.parameters():

View File

@@ -44,7 +44,7 @@ INFER_ARGS = {
}
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_full_train():
model = load_train_model(**TRAIN_ARGS)
for param in model.parameters():
@@ -52,7 +52,7 @@ def test_full_train():
assert param.dtype == torch.float32
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_full_inference():
model = load_infer_model(**INFER_ARGS)
for param in model.parameters():

View File

@@ -55,35 +55,35 @@ INFER_ARGS = {
}
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_lora_train_qv_modules():
model = load_train_model(lora_target="q_proj,v_proj", **TRAIN_ARGS)
linear_modules, _ = check_lora_model(model)
assert linear_modules == {"q_proj", "v_proj"}
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_lora_train_all_modules():
model = load_train_model(lora_target="all", **TRAIN_ARGS)
linear_modules, _ = check_lora_model(model)
assert linear_modules == {"q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "gate_proj", "down_proj"}
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_lora_train_extra_modules():
model = load_train_model(additional_target="embed_tokens,lm_head", **TRAIN_ARGS)
_, extra_modules = check_lora_model(model)
assert extra_modules == {"embed_tokens", "lm_head"}
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_lora_train_old_adapters():
model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=False, **TRAIN_ARGS)
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True)
compare_model(model, ref_model)
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_lora_train_new_adapters():
model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=True, **TRAIN_ARGS)
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True)
@@ -92,7 +92,7 @@ def test_lora_train_new_adapters():
)
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
@pytest.mark.usefixtures("fix_valuehead_cpu_loading")
def test_lora_train_valuehead():
model = load_train_model(add_valuehead=True, **TRAIN_ARGS)
@@ -103,7 +103,7 @@ def test_lora_train_valuehead():
assert torch.allclose(state_dict["v_head.summary.bias"], ref_state_dict["v_head.summary.bias"])
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
def test_lora_inference():
model = load_infer_model(**INFER_ARGS)
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True).merge_and_unload()

View File

@@ -49,7 +49,7 @@ INFER_ARGS = {
}
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
@pytest.mark.xfail(reason="PiSSA initialization is not stable in different platform.")
def test_pissa_train():
model = load_train_model(**TRAIN_ARGS)
@@ -57,7 +57,7 @@ def test_pissa_train():
compare_model(model, ref_model)
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
@pytest.mark.xfail(reason="Known connection error.")
def test_pissa_inference():
model = load_infer_model(**INFER_ARGS)

View File

@@ -59,7 +59,7 @@ class DataCollatorWithVerbose(DataCollatorWithPadding):
return {k: v[:, :1] for k, v in batch.items()} # truncate input length
@pytest.mark.runs_on(["cpu", "npu"])
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
@pytest.mark.parametrize("disable_shuffling", [False, True])
def test_shuffle(disable_shuffling: bool):
model_args, data_args, training_args, finetuning_args, _ = get_train_args(