From e43a972b25fcf9caf5fa4018649ac9f06d1c89df Mon Sep 17 00:00:00 2001 From: Username_Full Date: Sun, 30 Nov 2025 09:37:08 +0800 Subject: [PATCH] [test] add npu test yaml and add ascend a3 docker file (#9547) Co-authored-by: jiaqiw09 --- .github/workflows/docker.yml | 34 ++++++-- .github/workflows/tests_npu.yml | 87 +++++++++++++++++++ docker/docker-npu/Dockerfile | 5 +- docker/docker-npu/docker-compose.yml | 36 +++++++- tests/conftest.py | 31 +++++++ tests/data/processor/test_feedback.py | 1 + tests/data/processor/test_pairwise.py | 1 + tests/data/processor/test_processor_utils.py | 1 + tests/data/processor/test_supervised.py | 4 + tests/data/processor/test_unsupervised.py | 1 + tests/data/test_collator.py | 4 + tests/data/test_converter.py | 4 + tests/data/test_formatter.py | 27 ++++++ tests/data/test_loader.py | 5 ++ tests/data/test_mm_plugin.py | 13 +++ tests/data/test_template.py | 18 ++++ tests/e2e/test_chat.py | 4 + tests/e2e/test_sglang.py | 2 + tests/e2e/test_train.py | 3 +- tests/eval/test_eval_template.py | 5 +- tests/model/model_utils/test_add_tokens.py | 1 + tests/model/model_utils/test_attention.py | 1 + tests/model/model_utils/test_checkpointing.py | 4 + tests/model/model_utils/test_misc.py | 1 + tests/model/model_utils/test_packing.py | 1 + tests/model/model_utils/test_visual.py | 3 + tests/model/test_base.py | 6 +- tests/model/test_freeze.py | 4 + tests/model/test_full.py | 5 +- tests/model/test_lora.py | 9 +- tests/model/test_pissa.py | 3 +- tests/train/test_sft_trainer.py | 1 + tests/utils.py | 18 ++++ 33 files changed, 322 insertions(+), 21 deletions(-) create mode 100644 .github/workflows/tests_npu.yml create mode 100644 tests/utils.py diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c809f6d5..06a5c6c0 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -27,14 +27,18 @@ jobs: strategy: fail-fast: false matrix: - device: - - "cuda" - - "npu" + include: + - device: "cuda" + npu_type: "" + - device: "npu" + npu_type: "a2" + - device: "npu" + npu_type: "a3" runs-on: ubuntu-latest concurrency: - group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.device }} + group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.device }}-${{ matrix.npu_type }} cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} environment: @@ -76,7 +80,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to Quay - if: ${{ github.event_name != 'pull_request' && matrix.device == 'npu' }} + if: ${{ github.event_name != 'pull_request' && matrix.device == 'npu'}} uses: docker/login-action@v3 with: registry: quay.io @@ -97,8 +101,8 @@ jobs: cache-from: type=gha cache-to: type=gha,mode=max - - name: Build and push Docker image (NPU) - if: ${{ matrix.device == 'npu' }} + - name: Build and push Docker image (NPU-A2) + if: ${{ matrix.device == 'npu' && matrix.npu_type == 'a2' }} uses: docker/build-push-action@v6 with: context: . @@ -110,3 +114,19 @@ jobs: quay.io/ascend/llamafactory:${{ steps.version.outputs.tag }}-npu-a2 cache-from: type=gha cache-to: type=gha,mode=max + + - name: Build and push Docker image (NPU-A3) + if: ${{ matrix.device == 'npu' && matrix.npu_type == 'a3' }} + uses: docker/build-push-action@v6 + with: + context: . + platforms: linux/amd64,linux/arm64 + file: ./docker/docker-npu/Dockerfile + build-args: | + BASE_IMAGE=quay.io/ascend/cann:8.3.rc2-a3-ubuntu22.04-py3.11 + push: ${{ github.event_name != 'pull_request' }} + tags: | + docker.io/hiyouga/llamafactory:${{ steps.version.outputs.tag }}-npu-a3 + quay.io/ascend/llamafactory:${{ steps.version.outputs.tag }}-npu-a3 + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/tests_npu.yml b/.github/workflows/tests_npu.yml new file mode 100644 index 00000000..316ed2f7 --- /dev/null +++ b/.github/workflows/tests_npu.yml @@ -0,0 +1,87 @@ +name: tests_npu + +on: + workflow_dispatch: + push: + branches: + - "main" + paths: + - "**/*.py" + - "requirements.txt" + - "Makefile" + - ".github/workflows/*.yml" + pull_request: + branches: + - "main" + paths: + - "**/*.py" + - "requirements.txt" + - "Makefile" + - ".github/workflows/*.yml" + +jobs: + tests: + strategy: + fail-fast: false + matrix: + python: + - "3.11" + os: + - "linux-aarch64-a2-4" + pytorch_npu: + - "2.7.1" + + runs-on: ${{ matrix.os }} + + concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.os }}-${{ matrix.python }} + cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} + + container: + image: ascendai/cann:8.3.rc2-910b-ubuntu22.04-py3.11 + env: + HF_ENDPOINT: https://hf-mirror.com + HF_TOKEN: ${{ secrets.HF_TOKEN }} + OS_NAME: ${{ matrix.os }} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install ".[torch-npu,dev]" torch-npu==${{matrix.pytorch_npu}} + + - name: Install node + run: | + apt-get update || true + apt-get install -y curl + curl -fsSL https://deb.nodesource.com/setup_20.x | bash - + apt-get install -y nodejs + + - name: Cache files + id: hf-hub-cache + uses: actions/cache@v4 + with: + path: ${{ runner.temp }}/huggingface + key: huggingface-${{ matrix.os }}-${{ matrix.python }}-${{ hashFiles('tests/version.txt') }} + + - name: Check quality + run: | + make style && make quality + + - name: Check license + run: | + make license + + - name: Check build + run: | + make build + + - name: Test with pytest + run: | + make test + env: + HF_HOME: /root/.cache/huggingface + HF_HUB_OFFLINE: "${{ steps.hf-hub-cache.outputs.cache-hit == 'true' && '1' || '0' }}" \ No newline at end of file diff --git a/docker/docker-npu/Dockerfile b/docker/docker-npu/Dockerfile index 7539008b..825662b2 100644 --- a/docker/docker-npu/Dockerfile +++ b/docker/docker-npu/Dockerfile @@ -1,9 +1,6 @@ # https://hub.docker.com/r/ascendai/cann/tags -# default base image build for A2, if build for A3, using this image: -# ARG BASE_IMAGE=ascendai/cann:8.3.rc1-a3-ubuntu22.04-py3.11 - -ARG BASE_IMAGE=ascendai/cann:8.3.rc1-910b-ubuntu22.04-py3.11 +ARG BASE_IMAGE=quay.io/ascend/cann:8.3.rc2-910b-ubuntu22.04-py3.11 FROM ${BASE_IMAGE} # Installation arguments diff --git a/docker/docker-npu/docker-compose.yml b/docker/docker-npu/docker-compose.yml index 659f8d1b..8530efaf 100644 --- a/docker/docker-npu/docker-compose.yml +++ b/docker/docker-npu/docker-compose.yml @@ -1,12 +1,13 @@ services: - llamafactory: + llamafactory-a2: build: dockerfile: ./docker/docker-npu/Dockerfile context: ../.. args: PIP_INDEX: https://pypi.org/simple EXTRAS: torch-npu,metrics - container_name: llamafactory + container_name: llamafactory-a2 + image: llamafactory:npu-a2 volumes: - /usr/local/dcmi:/usr/local/dcmi - /usr/local/bin/npu-smi:/usr/local/bin/npu-smi @@ -26,3 +27,34 @@ services: - /dev/devmm_svm - /dev/hisi_hdc restart: unless-stopped + + llamafactory-a3: + profiles: ["a3"] + build: + dockerfile: ./docker/docker-npu/Dockerfile + context: ../.. + args: + BASE_IMAGE: quay.io/ascend/cann:8.3.rc2-a3-ubuntu22.04-py3.11 + PIP_INDEX: https://pypi.org/simple + EXTRAS: torch-npu,metrics + container_name: llamafactory-a3 + image: llamafactory:npu-a3 + volumes: + - /usr/local/dcmi:/usr/local/dcmi + - /usr/local/bin/npu-smi:/usr/local/bin/npu-smi + - /usr/local/Ascend/driver:/usr/local/Ascend/driver + - /etc/ascend_install.info:/etc/ascend_install.info + ports: + - "7861:7860" + - "8001:8000" + ipc: host + tty: true + # shm_size: "16gb" # ipc: host is set + stdin_open: true + command: bash + devices: + - /dev/davinci0 + - /dev/davinci_manager + - /dev/devmm_svm + - /dev/hisi_hdc + restart: unless-stopped diff --git a/tests/conftest.py b/tests/conftest.py index f5811c9d..ddcaf22f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -40,8 +40,38 @@ def pytest_configure(config): config.addinivalue_line( "markers", "require_device: test requires specific device, e.g., @pytest.mark.require_device('cuda')" ) + config.addinivalue_line( + "markers", "runs_on: test requires specific device, e.g., @pytest.mark.runs_on(['cpu'])" + ) +def _handle_runs_on(items): + """Skip tests on specified devices based on runs_on marker. + + Usage: + # Skip tests on specified devices + @pytest.mark.runs_on(['cpu']) + def test_something(): + pass + """ + for item in items: + runs_on_marker = item.get_closest_marker("runs_on") + if runs_on_marker: + runs_on_devices = runs_on_marker.args[0] + + # Compatibility handling: Allow a single string instead of a list + # Example: @pytest.mark.("cpu") + if isinstance(runs_on_devices, str): + runs_on_devices = [runs_on_devices] + + + if CURRENT_DEVICE not in runs_on_devices: + item.add_marker( + pytest.mark.skip( + reason=f"test requires one of {runs_on_devices} (current: {CURRENT_DEVICE})" + ) + ) + def _handle_slow_tests(items): """Skip slow tests unless RUN_SLOW environment variable is set. @@ -104,6 +134,7 @@ def pytest_collection_modifyitems(config, items): _handle_slow_tests(items) _handle_device_skips(items) _handle_device_requirements(items) + _handle_runs_on(items) @pytest.fixture diff --git a/tests/data/processor/test_feedback.py b/tests/data/processor/test_feedback.py index 355e7fe0..73b06675 100644 --- a/tests/data/processor/test_feedback.py +++ b/tests/data/processor/test_feedback.py @@ -42,6 +42,7 @@ TRAIN_ARGS = { } +@pytest.mark.runs_on(["cpu","npu"]) @pytest.mark.parametrize("num_samples", [16]) def test_feedback_data(num_samples: int): train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"] diff --git a/tests/data/processor/test_pairwise.py b/tests/data/processor/test_pairwise.py index 1040ba82..d3b8dbce 100644 --- a/tests/data/processor/test_pairwise.py +++ b/tests/data/processor/test_pairwise.py @@ -51,6 +51,7 @@ def _convert_sharegpt_to_openai(messages: list[dict[str, str]]) -> list[dict[str return new_messages +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize("num_samples", [16]) def test_pairwise_data(num_samples: int): train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"] diff --git a/tests/data/processor/test_processor_utils.py b/tests/data/processor/test_processor_utils.py index e004cb06..256f5a6e 100644 --- a/tests/data/processor/test_processor_utils.py +++ b/tests/data/processor/test_processor_utils.py @@ -18,6 +18,7 @@ import pytest from llamafactory.data.processor.processor_utils import infer_seqlen +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize( "test_input,test_output", [ diff --git a/tests/data/processor/test_supervised.py b/tests/data/processor/test_supervised.py index 6eaa34d3..903930b8 100644 --- a/tests/data/processor/test_supervised.py +++ b/tests/data/processor/test_supervised.py @@ -42,6 +42,7 @@ TRAIN_ARGS = { } +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize("num_samples", [16]) def test_supervised_single_turn(num_samples: int): train_dataset = load_dataset_module(dataset_dir="ONLINE", dataset=TINY_DATA, **TRAIN_ARGS)["train_dataset"] @@ -61,6 +62,7 @@ def test_supervised_single_turn(num_samples: int): assert train_dataset["input_ids"][index] == ref_input_ids +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize("num_samples", [8]) def test_supervised_multi_turn(num_samples: int): train_dataset = load_dataset_module(dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", **TRAIN_ARGS)[ @@ -74,6 +76,7 @@ def test_supervised_multi_turn(num_samples: int): assert train_dataset["input_ids"][index] == ref_input_ids +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize("num_samples", [4]) def test_supervised_train_on_prompt(num_samples: int): train_dataset = load_dataset_module( @@ -88,6 +91,7 @@ def test_supervised_train_on_prompt(num_samples: int): assert train_dataset["labels"][index] == ref_ids +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize("num_samples", [4]) def test_supervised_mask_history(num_samples: int): train_dataset = load_dataset_module( diff --git a/tests/data/processor/test_unsupervised.py b/tests/data/processor/test_unsupervised.py index 947b2e39..d9a9c9c4 100644 --- a/tests/data/processor/test_unsupervised.py +++ b/tests/data/processor/test_unsupervised.py @@ -45,6 +45,7 @@ TRAIN_ARGS = { } +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize("num_samples", [16]) def test_unsupervised_data(num_samples: int): train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"] diff --git a/tests/data/test_collator.py b/tests/data/test_collator.py index 657f280d..047354ab 100644 --- a/tests/data/test_collator.py +++ b/tests/data/test_collator.py @@ -14,6 +14,7 @@ import os +import pytest import torch from PIL import Image from transformers import AutoConfig, AutoModelForVision2Seq @@ -28,6 +29,7 @@ from llamafactory.model import load_tokenizer TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") +@pytest.mark.runs_on(["cpu"]) def test_base_collator(): model_args, data_args, *_ = get_infer_args({"model_name_or_path": TINY_LLAMA3, "template": "default"}) tokenizer_module = load_tokenizer(model_args) @@ -71,6 +73,7 @@ def test_base_collator(): assert batch_input[k].eq(torch.tensor(expected_input[k])).all() +@pytest.mark.runs_on(["cpu"]) def test_multimodal_collator(): model_args, data_args, *_ = get_infer_args( {"model_name_or_path": "Qwen/Qwen2-VL-2B-Instruct", "template": "qwen2_vl"} @@ -126,6 +129,7 @@ def test_multimodal_collator(): assert batch_input[k].eq(torch.tensor(expected_input[k])).all() +@pytest.mark.runs_on(["cpu"]) def test_4d_attention_mask(): o = 0.0 x = torch.finfo(torch.float16).min diff --git a/tests/data/test_converter.py b/tests/data/test_converter.py index 6997f75f..23929c24 100644 --- a/tests/data/test_converter.py +++ b/tests/data/test_converter.py @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +import pytest + from llamafactory.data import Role from llamafactory.data.converter import get_dataset_converter from llamafactory.data.parser import DatasetAttr from llamafactory.hparams import DataArguments +@pytest.mark.runs_on(["cpu"]) def test_alpaca_converter(): dataset_attr = DatasetAttr("hf_hub", "llamafactory/tiny-supervised-dataset") data_args = DataArguments() @@ -38,6 +41,7 @@ def test_alpaca_converter(): } +@pytest.mark.runs_on(["cpu"]) def test_sharegpt_converter(): dataset_attr = DatasetAttr("hf_hub", "llamafactory/tiny-supervised-dataset") data_args = DataArguments() diff --git a/tests/data/test_formatter.py b/tests/data/test_formatter.py index 3ccb8bb3..c2da6dbf 100644 --- a/tests/data/test_formatter.py +++ b/tests/data/test_formatter.py @@ -15,6 +15,8 @@ import json from datetime import datetime +import pytest + from llamafactory.data.formatter import EmptyFormatter, FunctionFormatter, StringFormatter, ToolFormatter @@ -36,16 +38,19 @@ TOOLS = [ ] +@pytest.mark.runs_on(["cpu"]) def test_empty_formatter(): formatter = EmptyFormatter(slots=["\n"]) assert formatter.apply() == ["\n"] +@pytest.mark.runs_on(["cpu"]) def test_string_formatter(): formatter = StringFormatter(slots=["", "Human: {{content}}\nAssistant:"]) assert formatter.apply(content="Hi") == ["", "Human: Hi\nAssistant:"] +@pytest.mark.runs_on(["cpu"]) def test_function_formatter(): formatter = FunctionFormatter(slots=["{{content}}", ""], tool_format="default") tool_calls = json.dumps(FUNCTION) @@ -55,6 +60,7 @@ def test_function_formatter(): ] +@pytest.mark.runs_on(["cpu"]) def test_multi_function_formatter(): formatter = FunctionFormatter(slots=["{{content}}", ""], tool_format="default") tool_calls = json.dumps([FUNCTION] * 2) @@ -65,6 +71,7 @@ def test_multi_function_formatter(): ] +@pytest.mark.runs_on(["cpu"]) def test_default_tool_formatter(): formatter = ToolFormatter(tool_format="default") assert formatter.apply(content=json.dumps(TOOLS)) == [ @@ -83,12 +90,14 @@ def test_default_tool_formatter(): ] +@pytest.mark.runs_on(["cpu"]) def test_default_tool_extractor(): formatter = ToolFormatter(tool_format="default") result = """Action: test_tool\nAction Input: {"foo": "bar", "size": 10}""" assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")] +@pytest.mark.runs_on(["cpu"]) def test_default_multi_tool_extractor(): formatter = ToolFormatter(tool_format="default") result = ( @@ -101,12 +110,14 @@ def test_default_multi_tool_extractor(): ] +@pytest.mark.runs_on(["cpu"]) def test_glm4_function_formatter(): formatter = FunctionFormatter(slots=["{{content}}"], tool_format="glm4") tool_calls = json.dumps(FUNCTION) assert formatter.apply(content=tool_calls) == ["""tool_name\n{"foo": "bar", "size": 10}"""] +@pytest.mark.runs_on(["cpu"]) def test_glm4_tool_formatter(): formatter = ToolFormatter(tool_format="glm4") assert formatter.apply(content=json.dumps(TOOLS)) == [ @@ -117,12 +128,14 @@ def test_glm4_tool_formatter(): ] +@pytest.mark.runs_on(["cpu"]) def test_glm4_tool_extractor(): formatter = ToolFormatter(tool_format="glm4") result = """test_tool\n{"foo": "bar", "size": 10}\n""" assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")] +@pytest.mark.runs_on(["cpu"]) def test_llama3_function_formatter(): formatter = FunctionFormatter(slots=["{{content}}<|eot_id|>"], tool_format="llama3") tool_calls = json.dumps(FUNCTION) @@ -131,6 +144,7 @@ def test_llama3_function_formatter(): ] +@pytest.mark.runs_on(["cpu"]) def test_llama3_multi_function_formatter(): formatter = FunctionFormatter(slots=["{{content}}<|eot_id|>"], tool_format="llama3") tool_calls = json.dumps([FUNCTION] * 2) @@ -141,6 +155,7 @@ def test_llama3_multi_function_formatter(): ] +@pytest.mark.runs_on(["cpu"]) def test_llama3_tool_formatter(): formatter = ToolFormatter(tool_format="llama3") date = datetime.now().strftime("%d %b %Y") @@ -154,12 +169,14 @@ def test_llama3_tool_formatter(): ] +@pytest.mark.runs_on(["cpu"]) def test_llama3_tool_extractor(): formatter = ToolFormatter(tool_format="llama3") result = """{"name": "test_tool", "parameters": {"foo": "bar", "size": 10}}\n""" assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")] +@pytest.mark.runs_on(["cpu"]) def test_llama3_multi_tool_extractor(): formatter = ToolFormatter(tool_format="llama3") result = ( @@ -172,6 +189,7 @@ def test_llama3_multi_tool_extractor(): ] +@pytest.mark.runs_on(["cpu"]) def test_mistral_function_formatter(): formatter = FunctionFormatter(slots=["[TOOL_CALLS] {{content}}", ""], tool_format="mistral") tool_calls = json.dumps(FUNCTION) @@ -181,6 +199,7 @@ def test_mistral_function_formatter(): ] +@pytest.mark.runs_on(["cpu"]) def test_mistral_multi_function_formatter(): formatter = FunctionFormatter(slots=["[TOOL_CALLS] {{content}}", ""], tool_format="mistral") tool_calls = json.dumps([FUNCTION] * 2) @@ -192,6 +211,7 @@ def test_mistral_multi_function_formatter(): ] +@pytest.mark.runs_on(["cpu"]) def test_mistral_tool_formatter(): formatter = ToolFormatter(tool_format="mistral") wrapped_tool = {"type": "function", "function": TOOLS[0]} @@ -200,12 +220,14 @@ def test_mistral_tool_formatter(): ] +@pytest.mark.runs_on(["cpu"]) def test_mistral_tool_extractor(): formatter = ToolFormatter(tool_format="mistral") result = """{"name": "test_tool", "arguments": {"foo": "bar", "size": 10}}""" assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")] +@pytest.mark.runs_on(["cpu"]) def test_mistral_multi_tool_extractor(): formatter = ToolFormatter(tool_format="mistral") result = ( @@ -218,6 +240,7 @@ def test_mistral_multi_tool_extractor(): ] +@pytest.mark.runs_on(["cpu"]) def test_qwen_function_formatter(): formatter = FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen") tool_calls = json.dumps(FUNCTION) @@ -226,6 +249,7 @@ def test_qwen_function_formatter(): ] +@pytest.mark.runs_on(["cpu"]) def test_qwen_multi_function_formatter(): formatter = FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen") tool_calls = json.dumps([FUNCTION] * 2) @@ -236,6 +260,7 @@ def test_qwen_multi_function_formatter(): ] +@pytest.mark.runs_on(["cpu"]) def test_qwen_tool_formatter(): formatter = ToolFormatter(tool_format="qwen") wrapped_tool = {"type": "function", "function": TOOLS[0]} @@ -249,12 +274,14 @@ def test_qwen_tool_formatter(): ] +@pytest.mark.runs_on(["cpu"]) def test_qwen_tool_extractor(): formatter = ToolFormatter(tool_format="qwen") result = """\n{"name": "test_tool", "arguments": {"foo": "bar", "size": 10}}\n""" assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")] +@pytest.mark.runs_on(["cpu"]) def test_qwen_multi_tool_extractor(): formatter = ToolFormatter(tool_format="qwen") result = ( diff --git a/tests/data/test_loader.py b/tests/data/test_loader.py index b45bdaad..9546cf4f 100644 --- a/tests/data/test_loader.py +++ b/tests/data/test_loader.py @@ -14,6 +14,8 @@ import os +import pytest + from llamafactory.train.test_utils import load_dataset_module @@ -38,18 +40,21 @@ TRAIN_ARGS = { } +@pytest.mark.runs_on(["cpu"]) def test_load_train_only(): dataset_module = load_dataset_module(**TRAIN_ARGS) assert dataset_module.get("train_dataset") is not None assert dataset_module.get("eval_dataset") is None +@pytest.mark.runs_on(["cpu"]) def test_load_val_size(): dataset_module = load_dataset_module(val_size=0.1, **TRAIN_ARGS) assert dataset_module.get("train_dataset") is not None assert dataset_module.get("eval_dataset") is not None +@pytest.mark.runs_on(["cpu"]) def test_load_eval_data(): dataset_module = load_dataset_module(eval_dataset=TINY_DATA, **TRAIN_ARGS) assert dataset_module.get("train_dataset") is not None diff --git a/tests/data/test_mm_plugin.py b/tests/data/test_mm_plugin.py index 4b702e83..6efc9e43 100644 --- a/tests/data/test_mm_plugin.py +++ b/tests/data/test_mm_plugin.py @@ -179,6 +179,7 @@ def _check_plugin( ) +@pytest.mark.runs_on(["cpu"]) def test_base_plugin(): tokenizer_module = _load_tokenizer_module(model_name_or_path=TINY_LLAMA3) base_plugin = get_mm_plugin(name="base") @@ -186,6 +187,7 @@ def test_base_plugin(): _check_plugin(**check_inputs) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.skipif(not HF_TOKEN, reason="Gated model.") @pytest.mark.skipif(not is_transformers_version_greater_than("4.50.0"), reason="Requires transformers>=4.50.0") def test_gemma3_plugin(): @@ -208,6 +210,7 @@ def test_gemma3_plugin(): _check_plugin(**check_inputs) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.skipif(not is_transformers_version_greater_than("4.52.0"), reason="Requires transformers>=4.52.0") def test_internvl_plugin(): image_seqlen = 256 @@ -226,6 +229,7 @@ def test_internvl_plugin(): _check_plugin(**check_inputs) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.skipif(not is_transformers_version_greater_than("4.51.0"), reason="Requires transformers>=4.51.0") def test_llama4_plugin(): tokenizer_module = _load_tokenizer_module(model_name_or_path=TINY_LLAMA4) @@ -247,6 +251,7 @@ def test_llama4_plugin(): _check_plugin(**check_inputs) +@pytest.mark.runs_on(["cpu"]) def test_llava_plugin(): image_seqlen = 576 tokenizer_module = _load_tokenizer_module(model_name_or_path="llava-hf/llava-1.5-7b-hf") @@ -260,6 +265,7 @@ def test_llava_plugin(): _check_plugin(**check_inputs) +@pytest.mark.runs_on(["cpu"]) def test_llava_next_plugin(): image_seqlen = 1176 tokenizer_module = _load_tokenizer_module(model_name_or_path="llava-hf/llava-v1.6-vicuna-7b-hf") @@ -273,6 +279,7 @@ def test_llava_next_plugin(): _check_plugin(**check_inputs) +@pytest.mark.runs_on(["cpu"]) def test_llava_next_video_plugin(): image_seqlen = 1176 tokenizer_module = _load_tokenizer_module(model_name_or_path="llava-hf/LLaVA-NeXT-Video-7B-hf") @@ -286,6 +293,7 @@ def test_llava_next_video_plugin(): _check_plugin(**check_inputs) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.skipif(not HF_TOKEN, reason="Gated model.") def test_paligemma_plugin(): image_seqlen = 256 @@ -305,6 +313,7 @@ def test_paligemma_plugin(): _check_plugin(**check_inputs) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.skipif(not is_transformers_version_greater_than("4.50.0"), reason="Requires transformers>=4.50.0") def test_pixtral_plugin(): image_slice_height, image_slice_width = 2, 2 @@ -327,6 +336,7 @@ def test_pixtral_plugin(): _check_plugin(**check_inputs) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.skipif(not is_transformers_version_greater_than("4.52.0"), reason="Requires transformers>=4.52.0") def test_qwen2_omni_plugin(): image_seqlen, audio_seqlen = 4, 2 @@ -357,6 +367,7 @@ def test_qwen2_omni_plugin(): _check_plugin(**check_inputs) +@pytest.mark.runs_on(["cpu"]) def test_qwen2_vl_plugin(): image_seqlen = 4 tokenizer_module = _load_tokenizer_module(model_name_or_path="Qwen/Qwen2-VL-7B-Instruct") @@ -373,6 +384,7 @@ def test_qwen2_vl_plugin(): _check_plugin(**check_inputs) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.skipif(not is_transformers_version_greater_than("4.57.0"), reason="Requires transformers>=4.57.0") def test_qwen3_vl_plugin(): frame_seqlen = 1 @@ -394,6 +406,7 @@ def test_qwen3_vl_plugin(): _check_plugin(**check_inputs) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.skipif(not is_transformers_version_greater_than("4.47.0"), reason="Requires transformers>=4.47.0") def test_video_llava_plugin(): image_seqlen = 256 diff --git a/tests/data/test_template.py b/tests/data/test_template.py index dd2deca8..dc510172 100644 --- a/tests/data/test_template.py +++ b/tests/data/test_template.py @@ -89,6 +89,7 @@ def _check_template( _check_tokenization(tokenizer, (prompt_ids, answer_ids), (prompt_str, answer_str)) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize("use_fast", [True, False]) def test_encode_oneturn(use_fast: bool): tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, use_fast=use_fast) @@ -104,6 +105,7 @@ def test_encode_oneturn(use_fast: bool): _check_tokenization(tokenizer, (prompt_ids, answer_ids), (prompt_str, answer_str)) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize("use_fast", [True, False]) def test_encode_multiturn(use_fast: bool): tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, use_fast=use_fast) @@ -125,6 +127,7 @@ def test_encode_multiturn(use_fast: bool): ) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize("use_fast", [True, False]) @pytest.mark.parametrize("cot_messages", [True, False]) @pytest.mark.parametrize("enable_thinking", [True, False, None]) @@ -151,6 +154,7 @@ def test_reasoning_encode_oneturn(use_fast: bool, cot_messages: bool, enable_thi _check_tokenization(tokenizer, (prompt_ids, answer_ids), (prompt_str, answer_str)) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize("use_fast", [True, False]) @pytest.mark.parametrize("cot_messages", [True, False]) @pytest.mark.parametrize("enable_thinking", [True, False, None]) @@ -180,6 +184,7 @@ def test_reasoning_encode_multiturn(use_fast: bool, cot_messages: bool, enable_t ) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize("use_fast", [True, False]) def test_jinja_template(use_fast: bool): tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, use_fast=use_fast) @@ -190,6 +195,7 @@ def test_jinja_template(use_fast: bool): assert tokenizer.apply_chat_template(MESSAGES) == ref_tokenizer.apply_chat_template(MESSAGES) +@pytest.mark.runs_on(["cpu"]) def test_ollama_modelfile(): tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3) template = get_template_and_fix_tokenizer(tokenizer, DataArguments(template="llama3")) @@ -207,12 +213,14 @@ def test_ollama_modelfile(): ) +@pytest.mark.runs_on(["cpu"]) def test_get_stop_token_ids(): tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3) template = get_template_and_fix_tokenizer(tokenizer, DataArguments(template="llama3")) assert set(template.get_stop_token_ids(tokenizer)) == {128008, 128009} +@pytest.mark.runs_on(["cpu"]) @pytest.mark.skipif(not HF_TOKEN, reason="Gated model.") @pytest.mark.parametrize("use_fast", [True, False]) def test_gemma_template(use_fast: bool): @@ -226,6 +234,7 @@ def test_gemma_template(use_fast: bool): _check_template("google/gemma-3-4b-it", "gemma", prompt_str, answer_str, use_fast) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.skipif(not HF_TOKEN, reason="Gated model.") @pytest.mark.parametrize("use_fast", [True, False]) def test_gemma2_template(use_fast: bool): @@ -239,6 +248,7 @@ def test_gemma2_template(use_fast: bool): _check_template("google/gemma-2-2b-it", "gemma2", prompt_str, answer_str, use_fast) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.skipif(not HF_TOKEN, reason="Gated model.") @pytest.mark.parametrize("use_fast", [True, False]) def test_llama3_template(use_fast: bool): @@ -252,6 +262,7 @@ def test_llama3_template(use_fast: bool): _check_template("meta-llama/Meta-Llama-3-8B-Instruct", "llama3", prompt_str, answer_str, use_fast) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize( "use_fast", [True, pytest.param(False, marks=pytest.mark.xfail(reason="Llama 4 has no slow tokenizer."))] ) @@ -273,6 +284,8 @@ def test_llama4_template(use_fast: bool): pytest.param(False, marks=pytest.mark.xfail(reason="Phi-4 slow tokenizer is broken.")), ], ) + +@pytest.mark.runs_on(["cpu"]) def test_phi4_template(use_fast: bool): prompt_str = ( f"<|im_start|>user<|im_sep|>{MESSAGES[0]['content']}<|im_end|>" @@ -284,6 +297,7 @@ def test_phi4_template(use_fast: bool): _check_template("microsoft/phi-4", "phi4", prompt_str, answer_str, use_fast) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.xfail(not HF_TOKEN, reason="Authorization.") @pytest.mark.parametrize("use_fast", [True, False]) def test_qwen2_5_template(use_fast: bool): @@ -298,6 +312,7 @@ def test_qwen2_5_template(use_fast: bool): _check_template("Qwen/Qwen2.5-7B-Instruct", "qwen", prompt_str, answer_str, use_fast) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize("use_fast", [True, False]) @pytest.mark.parametrize("cot_messages", [True, False]) def test_qwen3_template(use_fast: bool, cot_messages: bool): @@ -317,6 +332,7 @@ def test_qwen3_template(use_fast: bool, cot_messages: bool): _check_template("Qwen/Qwen3-8B", "qwen3", prompt_str, answer_str, use_fast, messages=messages) +@pytest.mark.runs_on(["cpu"]) def test_parse_llama3_template(): tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, token=HF_TOKEN) template = parse_template(tokenizer) @@ -330,6 +346,7 @@ def test_parse_llama3_template(): assert template.default_system == "" +@pytest.mark.runs_on(["cpu"]) @pytest.mark.xfail(not HF_TOKEN, reason="Authorization.") def test_parse_qwen_template(): tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN) @@ -342,6 +359,7 @@ def test_parse_qwen_template(): assert template.default_system == "You are Qwen, created by Alibaba Cloud. You are a helpful assistant." +@pytest.mark.runs_on(["cpu"]) @pytest.mark.xfail(not HF_TOKEN, reason="Authorization.") def test_parse_qwen3_template(): tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B", token=HF_TOKEN) diff --git a/tests/e2e/test_chat.py b/tests/e2e/test_chat.py index 3221b6de..b05c7962 100644 --- a/tests/e2e/test_chat.py +++ b/tests/e2e/test_chat.py @@ -14,6 +14,8 @@ import os +import pytest + from llamafactory.chat import ChatModel @@ -35,11 +37,13 @@ MESSAGES = [ EXPECTED_RESPONSE = "_rho" +@pytest.mark.runs_on(["cpu"]) def test_chat(): chat_model = ChatModel(INFER_ARGS) assert chat_model.chat(MESSAGES)[0].response_text == EXPECTED_RESPONSE +@pytest.mark.runs_on(["cpu"]) def test_stream_chat(): chat_model = ChatModel(INFER_ARGS) response = "" diff --git a/tests/e2e/test_sglang.py b/tests/e2e/test_sglang.py index de9a5c1c..8db1703a 100644 --- a/tests/e2e/test_sglang.py +++ b/tests/e2e/test_sglang.py @@ -39,6 +39,7 @@ MESSAGES = [ ] +@pytest.mark.runs_on(["cpu"]) @pytest.mark.skipif(not is_sglang_available(), reason="SGLang is not installed") def test_chat(): r"""Test the SGLang engine's basic chat functionality.""" @@ -48,6 +49,7 @@ def test_chat(): print(response.response_text) +@pytest.mark.runs_on(["cpu"]) @pytest.mark.skipif(not is_sglang_available(), reason="SGLang is not installed") def test_stream_chat(): r"""Test the SGLang engine's streaming chat functionality.""" diff --git a/tests/e2e/test_train.py b/tests/e2e/test_train.py index 48f6f79d..0a2a1cba 100644 --- a/tests/e2e/test_train.py +++ b/tests/e2e/test_train.py @@ -48,7 +48,7 @@ INFER_ARGS = { OS_NAME = os.getenv("OS_NAME", "") - +@pytest.mark.runs_on(["cpu"]) @pytest.mark.parametrize( "stage,dataset", [ @@ -65,6 +65,7 @@ def test_run_exp(stage: str, dataset: str): assert os.path.exists(output_dir) +@pytest.mark.runs_on(["cpu"]) def test_export(): export_dir = os.path.join("output", "llama3_export") export_model({"export_dir": export_dir, **INFER_ARGS}) diff --git a/tests/eval/test_eval_template.py b/tests/eval/test_eval_template.py index eddc1640..6f61cc2d 100644 --- a/tests/eval/test_eval_template.py +++ b/tests/eval/test_eval_template.py @@ -12,9 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +import pytest + from llamafactory.eval.template import get_eval_template +@pytest.mark.runs_on(["cpu"]) def test_eval_template_en(): support_set = [ { @@ -52,7 +55,7 @@ def test_eval_template_en(): {"role": "assistant", "content": "C"}, ] - +@pytest.mark.runs_on(["cpu"]) def test_eval_template_zh(): support_set = [ { diff --git a/tests/model/model_utils/test_add_tokens.py b/tests/model/model_utils/test_add_tokens.py index cb1c414a..4710819a 100644 --- a/tests/model/model_utils/test_add_tokens.py +++ b/tests/model/model_utils/test_add_tokens.py @@ -25,6 +25,7 @@ TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") UNUSED_TOKEN = "<|UNUSED_TOKEN|>" +@pytest.mark.runs_on(["cpu","npu"]) @pytest.mark.parametrize("special_tokens", [False, True]) def test_add_tokens(special_tokens: bool): if special_tokens: diff --git a/tests/model/model_utils/test_attention.py b/tests/model/model_utils/test_attention.py index 0063630a..446d8063 100644 --- a/tests/model/model_utils/test_attention.py +++ b/tests/model/model_utils/test_attention.py @@ -29,6 +29,7 @@ INFER_ARGS = { } +@pytest.mark.runs_on(["cpu","npu"]) @pytest.mark.xfail(is_transformers_version_greater_than("4.48"), reason="Attention refactor.") def test_attention(): attention_available = ["disabled"] diff --git a/tests/model/model_utils/test_checkpointing.py b/tests/model/model_utils/test_checkpointing.py index 2402e6fb..63df0730 100644 --- a/tests/model/model_utils/test_checkpointing.py +++ b/tests/model/model_utils/test_checkpointing.py @@ -39,6 +39,7 @@ TRAIN_ARGS = { } +@pytest.mark.runs_on(["cpu","npu"]) @pytest.mark.parametrize("disable_gradient_checkpointing", [False, True]) def test_vanilla_checkpointing(disable_gradient_checkpointing: bool): model = load_train_model(disable_gradient_checkpointing=disable_gradient_checkpointing, **TRAIN_ARGS) @@ -46,12 +47,14 @@ def test_vanilla_checkpointing(disable_gradient_checkpointing: bool): assert getattr(module, "gradient_checkpointing") != disable_gradient_checkpointing +@pytest.mark.runs_on(["cpu","npu"]) def test_unsloth_gradient_checkpointing(): model = load_train_model(use_unsloth_gc=True, **TRAIN_ARGS) for module in filter(lambda m: hasattr(m, "gradient_checkpointing"), model.modules()): assert module._gradient_checkpointing_func.__self__.__name__ == "UnslothGradientCheckpointing" +@pytest.mark.runs_on(["cpu","npu"]) def test_upcast_layernorm(): model = load_train_model(upcast_layernorm=True, **TRAIN_ARGS) for name, param in model.named_parameters(): @@ -59,6 +62,7 @@ def test_upcast_layernorm(): assert param.dtype == torch.float32 +@pytest.mark.runs_on(["cpu","npu"]) def test_upcast_lmhead_output(): model = load_train_model(upcast_lmhead_output=True, **TRAIN_ARGS) inputs = torch.randn((1, 16), dtype=torch.float16, device=get_current_device()) diff --git a/tests/model/model_utils/test_misc.py b/tests/model/model_utils/test_misc.py index b2c8b3bf..537ae4f1 100644 --- a/tests/model/model_utils/test_misc.py +++ b/tests/model/model_utils/test_misc.py @@ -24,6 +24,7 @@ from llamafactory.model.model_utils.misc import find_expanded_modules HF_TOKEN = os.getenv("HF_TOKEN") +@pytest.mark.runs_on(["cpu","npu"]) @pytest.mark.skipif(not HF_TOKEN, reason="Gated model.") def test_expanded_modules(): config = AutoConfig.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct") diff --git a/tests/model/model_utils/test_packing.py b/tests/model/model_utils/test_packing.py index 81e0d66a..6dde0751 100644 --- a/tests/model/model_utils/test_packing.py +++ b/tests/model/model_utils/test_packing.py @@ -18,6 +18,7 @@ import torch from llamafactory.model.model_utils.packing import get_seqlens_in_batch, get_unpad_data +@pytest.mark.runs_on(["cpu","npu"]) @pytest.mark.parametrize( "attention_mask,golden_seq_lens", [ diff --git a/tests/model/model_utils/test_visual.py b/tests/model/model_utils/test_visual.py index fc53b69c..703bbb7f 100644 --- a/tests/model/model_utils/test_visual.py +++ b/tests/model/model_utils/test_visual.py @@ -23,6 +23,7 @@ from llamafactory.hparams import FinetuningArguments, ModelArguments from llamafactory.model.adapter import init_adapter +@pytest.mark.runs_on(["cpu","npu"]) @pytest.mark.parametrize("freeze_vision_tower", (False, True)) @pytest.mark.parametrize("freeze_multi_modal_projector", (False, True)) @pytest.mark.parametrize("freeze_language_model", (False, True)) @@ -48,6 +49,7 @@ def test_visual_full(freeze_vision_tower: bool, freeze_multi_modal_projector: bo assert param.requires_grad != freeze_language_model +@pytest.mark.runs_on(["cpu","npu"]) @pytest.mark.parametrize("freeze_vision_tower,freeze_language_model", ((False, False), (False, True), (True, False))) def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool): model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct") @@ -80,6 +82,7 @@ def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool): assert (merger_param_name in trainable_params) is False +@pytest.mark.runs_on(["cpu","npu"]) def test_visual_model_save_load(): # check VLM's state dict: https://github.com/huggingface/transformers/pull/38385 model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct") diff --git a/tests/model/test_base.py b/tests/model/test_base.py index 14afff63..382bea2f 100644 --- a/tests/model/test_base.py +++ b/tests/model/test_base.py @@ -29,13 +29,15 @@ INFER_ARGS = { "infer_dtype": "float16", } - +@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.skip_on_devices("npu") def test_base(): model = load_infer_model(**INFER_ARGS) ref_model = load_reference_model(TINY_LLAMA3) compare_model(model, ref_model) - +@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.skip_on_devices("npu") @pytest.mark.usefixtures("fix_valuehead_cpu_loading") def test_valuehead(): model = load_infer_model(add_valuehead=True, **INFER_ARGS) diff --git a/tests/model/test_freeze.py b/tests/model/test_freeze.py index b82ec88d..9d39ded1 100644 --- a/tests/model/test_freeze.py +++ b/tests/model/test_freeze.py @@ -14,6 +14,7 @@ import os +import pytest import torch from llamafactory.train.test_utils import load_infer_model, load_train_model @@ -43,6 +44,7 @@ INFER_ARGS = { } +@pytest.mark.runs_on(["cpu","npu"]) def test_freeze_train_all_modules(): model = load_train_model(freeze_trainable_layers=1, **TRAIN_ARGS) for name, param in model.named_parameters(): @@ -54,6 +56,7 @@ def test_freeze_train_all_modules(): assert param.dtype == torch.float16 +@pytest.mark.runs_on(["cpu","npu"]) def test_freeze_train_extra_modules(): model = load_train_model(freeze_trainable_layers=1, freeze_extra_modules="embed_tokens,lm_head", **TRAIN_ARGS) for name, param in model.named_parameters(): @@ -65,6 +68,7 @@ def test_freeze_train_extra_modules(): assert param.dtype == torch.float16 +@pytest.mark.runs_on(["cpu","npu"]) def test_freeze_inference(): model = load_infer_model(**INFER_ARGS) for param in model.parameters(): diff --git a/tests/model/test_full.py b/tests/model/test_full.py index 9058b6ac..3f55f1a0 100644 --- a/tests/model/test_full.py +++ b/tests/model/test_full.py @@ -14,6 +14,7 @@ import os +import pytest import torch from llamafactory.train.test_utils import load_infer_model, load_train_model @@ -42,14 +43,14 @@ INFER_ARGS = { "infer_dtype": "float16", } - +@pytest.mark.runs_on(["cpu","npu"]) def test_full_train(): model = load_train_model(**TRAIN_ARGS) for param in model.parameters(): assert param.requires_grad is True assert param.dtype == torch.float32 - +@pytest.mark.runs_on(["cpu","npu"]) def test_full_inference(): model = load_infer_model(**INFER_ARGS) for param in model.parameters(): diff --git a/tests/model/test_lora.py b/tests/model/test_lora.py index 38b6b505..3a855391 100644 --- a/tests/model/test_lora.py +++ b/tests/model/test_lora.py @@ -55,30 +55,35 @@ INFER_ARGS = { } +@pytest.mark.runs_on(["cpu","npu"]) def test_lora_train_qv_modules(): model = load_train_model(lora_target="q_proj,v_proj", **TRAIN_ARGS) linear_modules, _ = check_lora_model(model) assert linear_modules == {"q_proj", "v_proj"} +@pytest.mark.runs_on(["cpu","npu"]) def test_lora_train_all_modules(): model = load_train_model(lora_target="all", **TRAIN_ARGS) linear_modules, _ = check_lora_model(model) assert linear_modules == {"q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "gate_proj", "down_proj"} +@pytest.mark.runs_on(["cpu","npu"]) def test_lora_train_extra_modules(): model = load_train_model(additional_target="embed_tokens,lm_head", **TRAIN_ARGS) _, extra_modules = check_lora_model(model) assert extra_modules == {"embed_tokens", "lm_head"} +@pytest.mark.runs_on(["cpu","npu"]) def test_lora_train_old_adapters(): model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=False, **TRAIN_ARGS) ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True) compare_model(model, ref_model) +@pytest.mark.runs_on(["cpu","npu"]) def test_lora_train_new_adapters(): model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=True, **TRAIN_ARGS) ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True) @@ -87,6 +92,7 @@ def test_lora_train_new_adapters(): ) +@pytest.mark.runs_on(["cpu","npu"]) @pytest.mark.usefixtures("fix_valuehead_cpu_loading") def test_lora_train_valuehead(): model = load_train_model(add_valuehead=True, **TRAIN_ARGS) @@ -96,7 +102,8 @@ def test_lora_train_valuehead(): assert torch.allclose(state_dict["v_head.summary.weight"], ref_state_dict["v_head.summary.weight"]) assert torch.allclose(state_dict["v_head.summary.bias"], ref_state_dict["v_head.summary.bias"]) - +@pytest.mark.runs_on(["cpu","npu"]) +@pytest.mark.skip_on_devices("npu") def test_lora_inference(): model = load_infer_model(**INFER_ARGS) ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True).merge_and_unload() diff --git a/tests/model/test_pissa.py b/tests/model/test_pissa.py index 3b6101f8..6b830290 100644 --- a/tests/model/test_pissa.py +++ b/tests/model/test_pissa.py @@ -49,13 +49,14 @@ INFER_ARGS = { } +@pytest.mark.runs_on(["cpu","npu"]) @pytest.mark.xfail(reason="PiSSA initialization is not stable in different platform.") def test_pissa_train(): model = load_train_model(**TRAIN_ARGS) ref_model = load_reference_model(TINY_LLAMA_PISSA, TINY_LLAMA_PISSA, use_pissa=True, is_trainable=True) compare_model(model, ref_model) - +@pytest.mark.runs_on(["cpu","npu"]) @pytest.mark.xfail(reason="Known connection error.") def test_pissa_inference(): model = load_infer_model(**INFER_ARGS) diff --git a/tests/train/test_sft_trainer.py b/tests/train/test_sft_trainer.py index 9f6ebe41..1dd2c0e6 100644 --- a/tests/train/test_sft_trainer.py +++ b/tests/train/test_sft_trainer.py @@ -59,6 +59,7 @@ class DataCollatorWithVerbose(DataCollatorWithPadding): return {k: v[:, :1] for k, v in batch.items()} # truncate input length +@pytest.mark.runs_on(["cpu","npu"]) @pytest.mark.parametrize("disable_shuffling", [False, True]) def test_shuffle(disable_shuffling: bool): model_args, data_args, training_args, finetuning_args, _ = get_train_args( diff --git a/tests/utils.py b/tests/utils.py new file mode 100644 index 00000000..6ff92420 --- /dev/null +++ b/tests/utils.py @@ -0,0 +1,18 @@ +# Copyright 2025 the LlamaFactory team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + + +runs_on = pytest.mark.runs_on