mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-15 03:10:35 +08:00
[test] add npu test yaml and add ascend a3 docker file (#9547)
Co-authored-by: jiaqiw09 <jiaqiw960714@gmail.com>
This commit is contained in:
34
.github/workflows/docker.yml
vendored
34
.github/workflows/docker.yml
vendored
@@ -27,14 +27,18 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
device:
|
include:
|
||||||
- "cuda"
|
- device: "cuda"
|
||||||
- "npu"
|
npu_type: ""
|
||||||
|
- device: "npu"
|
||||||
|
npu_type: "a2"
|
||||||
|
- device: "npu"
|
||||||
|
npu_type: "a3"
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.device }}
|
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.device }}-${{ matrix.npu_type }}
|
||||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
@@ -76,7 +80,7 @@ jobs:
|
|||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Login to Quay
|
- name: Login to Quay
|
||||||
if: ${{ github.event_name != 'pull_request' && matrix.device == 'npu' }}
|
if: ${{ github.event_name != 'pull_request' && matrix.device == 'npu'}}
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: quay.io
|
registry: quay.io
|
||||||
@@ -97,8 +101,8 @@ jobs:
|
|||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
cache-to: type=gha,mode=max
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
- name: Build and push Docker image (NPU)
|
- name: Build and push Docker image (NPU-A2)
|
||||||
if: ${{ matrix.device == 'npu' }}
|
if: ${{ matrix.device == 'npu' && matrix.npu_type == 'a2' }}
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
@@ -110,3 +114,19 @@ jobs:
|
|||||||
quay.io/ascend/llamafactory:${{ steps.version.outputs.tag }}-npu-a2
|
quay.io/ascend/llamafactory:${{ steps.version.outputs.tag }}-npu-a2
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
cache-to: type=gha,mode=max
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
|
- name: Build and push Docker image (NPU-A3)
|
||||||
|
if: ${{ matrix.device == 'npu' && matrix.npu_type == 'a3' }}
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
file: ./docker/docker-npu/Dockerfile
|
||||||
|
build-args: |
|
||||||
|
BASE_IMAGE=quay.io/ascend/cann:8.3.rc2-a3-ubuntu22.04-py3.11
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
tags: |
|
||||||
|
docker.io/hiyouga/llamafactory:${{ steps.version.outputs.tag }}-npu-a3
|
||||||
|
quay.io/ascend/llamafactory:${{ steps.version.outputs.tag }}-npu-a3
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
|||||||
87
.github/workflows/tests_npu.yml
vendored
Normal file
87
.github/workflows/tests_npu.yml
vendored
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
name: tests_npu
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- "main"
|
||||||
|
paths:
|
||||||
|
- "**/*.py"
|
||||||
|
- "requirements.txt"
|
||||||
|
- "Makefile"
|
||||||
|
- ".github/workflows/*.yml"
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- "main"
|
||||||
|
paths:
|
||||||
|
- "**/*.py"
|
||||||
|
- "requirements.txt"
|
||||||
|
- "Makefile"
|
||||||
|
- ".github/workflows/*.yml"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
tests:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
python:
|
||||||
|
- "3.11"
|
||||||
|
os:
|
||||||
|
- "linux-aarch64-a2-4"
|
||||||
|
pytorch_npu:
|
||||||
|
- "2.7.1"
|
||||||
|
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.os }}-${{ matrix.python }}
|
||||||
|
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||||
|
|
||||||
|
container:
|
||||||
|
image: ascendai/cann:8.3.rc2-910b-ubuntu22.04-py3.11
|
||||||
|
env:
|
||||||
|
HF_ENDPOINT: https://hf-mirror.com
|
||||||
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
|
OS_NAME: ${{ matrix.os }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
python -m pip install ".[torch-npu,dev]" torch-npu==${{matrix.pytorch_npu}}
|
||||||
|
|
||||||
|
- name: Install node
|
||||||
|
run: |
|
||||||
|
apt-get update || true
|
||||||
|
apt-get install -y curl
|
||||||
|
curl -fsSL https://deb.nodesource.com/setup_20.x | bash -
|
||||||
|
apt-get install -y nodejs
|
||||||
|
|
||||||
|
- name: Cache files
|
||||||
|
id: hf-hub-cache
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: ${{ runner.temp }}/huggingface
|
||||||
|
key: huggingface-${{ matrix.os }}-${{ matrix.python }}-${{ hashFiles('tests/version.txt') }}
|
||||||
|
|
||||||
|
- name: Check quality
|
||||||
|
run: |
|
||||||
|
make style && make quality
|
||||||
|
|
||||||
|
- name: Check license
|
||||||
|
run: |
|
||||||
|
make license
|
||||||
|
|
||||||
|
- name: Check build
|
||||||
|
run: |
|
||||||
|
make build
|
||||||
|
|
||||||
|
- name: Test with pytest
|
||||||
|
run: |
|
||||||
|
make test
|
||||||
|
env:
|
||||||
|
HF_HOME: /root/.cache/huggingface
|
||||||
|
HF_HUB_OFFLINE: "${{ steps.hf-hub-cache.outputs.cache-hit == 'true' && '1' || '0' }}"
|
||||||
@@ -1,9 +1,6 @@
|
|||||||
# https://hub.docker.com/r/ascendai/cann/tags
|
# https://hub.docker.com/r/ascendai/cann/tags
|
||||||
|
|
||||||
# default base image build for A2, if build for A3, using this image:
|
ARG BASE_IMAGE=quay.io/ascend/cann:8.3.rc2-910b-ubuntu22.04-py3.11
|
||||||
# ARG BASE_IMAGE=ascendai/cann:8.3.rc1-a3-ubuntu22.04-py3.11
|
|
||||||
|
|
||||||
ARG BASE_IMAGE=ascendai/cann:8.3.rc1-910b-ubuntu22.04-py3.11
|
|
||||||
FROM ${BASE_IMAGE}
|
FROM ${BASE_IMAGE}
|
||||||
|
|
||||||
# Installation arguments
|
# Installation arguments
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
services:
|
services:
|
||||||
llamafactory:
|
llamafactory-a2:
|
||||||
build:
|
build:
|
||||||
dockerfile: ./docker/docker-npu/Dockerfile
|
dockerfile: ./docker/docker-npu/Dockerfile
|
||||||
context: ../..
|
context: ../..
|
||||||
args:
|
args:
|
||||||
PIP_INDEX: https://pypi.org/simple
|
PIP_INDEX: https://pypi.org/simple
|
||||||
EXTRAS: torch-npu,metrics
|
EXTRAS: torch-npu,metrics
|
||||||
container_name: llamafactory
|
container_name: llamafactory-a2
|
||||||
|
image: llamafactory:npu-a2
|
||||||
volumes:
|
volumes:
|
||||||
- /usr/local/dcmi:/usr/local/dcmi
|
- /usr/local/dcmi:/usr/local/dcmi
|
||||||
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
|
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
|
||||||
@@ -26,3 +27,34 @@ services:
|
|||||||
- /dev/devmm_svm
|
- /dev/devmm_svm
|
||||||
- /dev/hisi_hdc
|
- /dev/hisi_hdc
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
|
llamafactory-a3:
|
||||||
|
profiles: ["a3"]
|
||||||
|
build:
|
||||||
|
dockerfile: ./docker/docker-npu/Dockerfile
|
||||||
|
context: ../..
|
||||||
|
args:
|
||||||
|
BASE_IMAGE: quay.io/ascend/cann:8.3.rc2-a3-ubuntu22.04-py3.11
|
||||||
|
PIP_INDEX: https://pypi.org/simple
|
||||||
|
EXTRAS: torch-npu,metrics
|
||||||
|
container_name: llamafactory-a3
|
||||||
|
image: llamafactory:npu-a3
|
||||||
|
volumes:
|
||||||
|
- /usr/local/dcmi:/usr/local/dcmi
|
||||||
|
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
|
||||||
|
- /usr/local/Ascend/driver:/usr/local/Ascend/driver
|
||||||
|
- /etc/ascend_install.info:/etc/ascend_install.info
|
||||||
|
ports:
|
||||||
|
- "7861:7860"
|
||||||
|
- "8001:8000"
|
||||||
|
ipc: host
|
||||||
|
tty: true
|
||||||
|
# shm_size: "16gb" # ipc: host is set
|
||||||
|
stdin_open: true
|
||||||
|
command: bash
|
||||||
|
devices:
|
||||||
|
- /dev/davinci0
|
||||||
|
- /dev/davinci_manager
|
||||||
|
- /dev/devmm_svm
|
||||||
|
- /dev/hisi_hdc
|
||||||
|
restart: unless-stopped
|
||||||
|
|||||||
@@ -40,8 +40,38 @@ def pytest_configure(config):
|
|||||||
config.addinivalue_line(
|
config.addinivalue_line(
|
||||||
"markers", "require_device: test requires specific device, e.g., @pytest.mark.require_device('cuda')"
|
"markers", "require_device: test requires specific device, e.g., @pytest.mark.require_device('cuda')"
|
||||||
)
|
)
|
||||||
|
config.addinivalue_line(
|
||||||
|
"markers", "runs_on: test requires specific device, e.g., @pytest.mark.runs_on(['cpu'])"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_runs_on(items):
|
||||||
|
"""Skip tests on specified devices based on runs_on marker.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
# Skip tests on specified devices
|
||||||
|
@pytest.mark.runs_on(['cpu'])
|
||||||
|
def test_something():
|
||||||
|
pass
|
||||||
|
"""
|
||||||
|
for item in items:
|
||||||
|
runs_on_marker = item.get_closest_marker("runs_on")
|
||||||
|
if runs_on_marker:
|
||||||
|
runs_on_devices = runs_on_marker.args[0]
|
||||||
|
|
||||||
|
# Compatibility handling: Allow a single string instead of a list
|
||||||
|
# Example: @pytest.mark.("cpu")
|
||||||
|
if isinstance(runs_on_devices, str):
|
||||||
|
runs_on_devices = [runs_on_devices]
|
||||||
|
|
||||||
|
|
||||||
|
if CURRENT_DEVICE not in runs_on_devices:
|
||||||
|
item.add_marker(
|
||||||
|
pytest.mark.skip(
|
||||||
|
reason=f"test requires one of {runs_on_devices} (current: {CURRENT_DEVICE})"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def _handle_slow_tests(items):
|
def _handle_slow_tests(items):
|
||||||
"""Skip slow tests unless RUN_SLOW environment variable is set.
|
"""Skip slow tests unless RUN_SLOW environment variable is set.
|
||||||
|
|
||||||
@@ -104,6 +134,7 @@ def pytest_collection_modifyitems(config, items):
|
|||||||
_handle_slow_tests(items)
|
_handle_slow_tests(items)
|
||||||
_handle_device_skips(items)
|
_handle_device_skips(items)
|
||||||
_handle_device_requirements(items)
|
_handle_device_requirements(items)
|
||||||
|
_handle_runs_on(items)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
|
|||||||
@@ -42,6 +42,7 @@ TRAIN_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
@pytest.mark.parametrize("num_samples", [16])
|
@pytest.mark.parametrize("num_samples", [16])
|
||||||
def test_feedback_data(num_samples: int):
|
def test_feedback_data(num_samples: int):
|
||||||
train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"]
|
train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"]
|
||||||
|
|||||||
@@ -51,6 +51,7 @@ def _convert_sharegpt_to_openai(messages: list[dict[str, str]]) -> list[dict[str
|
|||||||
return new_messages
|
return new_messages
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize("num_samples", [16])
|
@pytest.mark.parametrize("num_samples", [16])
|
||||||
def test_pairwise_data(num_samples: int):
|
def test_pairwise_data(num_samples: int):
|
||||||
train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"]
|
train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"]
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ import pytest
|
|||||||
from llamafactory.data.processor.processor_utils import infer_seqlen
|
from llamafactory.data.processor.processor_utils import infer_seqlen
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"test_input,test_output",
|
"test_input,test_output",
|
||||||
[
|
[
|
||||||
|
|||||||
@@ -42,6 +42,7 @@ TRAIN_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize("num_samples", [16])
|
@pytest.mark.parametrize("num_samples", [16])
|
||||||
def test_supervised_single_turn(num_samples: int):
|
def test_supervised_single_turn(num_samples: int):
|
||||||
train_dataset = load_dataset_module(dataset_dir="ONLINE", dataset=TINY_DATA, **TRAIN_ARGS)["train_dataset"]
|
train_dataset = load_dataset_module(dataset_dir="ONLINE", dataset=TINY_DATA, **TRAIN_ARGS)["train_dataset"]
|
||||||
@@ -61,6 +62,7 @@ def test_supervised_single_turn(num_samples: int):
|
|||||||
assert train_dataset["input_ids"][index] == ref_input_ids
|
assert train_dataset["input_ids"][index] == ref_input_ids
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize("num_samples", [8])
|
@pytest.mark.parametrize("num_samples", [8])
|
||||||
def test_supervised_multi_turn(num_samples: int):
|
def test_supervised_multi_turn(num_samples: int):
|
||||||
train_dataset = load_dataset_module(dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", **TRAIN_ARGS)[
|
train_dataset = load_dataset_module(dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", **TRAIN_ARGS)[
|
||||||
@@ -74,6 +76,7 @@ def test_supervised_multi_turn(num_samples: int):
|
|||||||
assert train_dataset["input_ids"][index] == ref_input_ids
|
assert train_dataset["input_ids"][index] == ref_input_ids
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize("num_samples", [4])
|
@pytest.mark.parametrize("num_samples", [4])
|
||||||
def test_supervised_train_on_prompt(num_samples: int):
|
def test_supervised_train_on_prompt(num_samples: int):
|
||||||
train_dataset = load_dataset_module(
|
train_dataset = load_dataset_module(
|
||||||
@@ -88,6 +91,7 @@ def test_supervised_train_on_prompt(num_samples: int):
|
|||||||
assert train_dataset["labels"][index] == ref_ids
|
assert train_dataset["labels"][index] == ref_ids
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize("num_samples", [4])
|
@pytest.mark.parametrize("num_samples", [4])
|
||||||
def test_supervised_mask_history(num_samples: int):
|
def test_supervised_mask_history(num_samples: int):
|
||||||
train_dataset = load_dataset_module(
|
train_dataset = load_dataset_module(
|
||||||
|
|||||||
@@ -45,6 +45,7 @@ TRAIN_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize("num_samples", [16])
|
@pytest.mark.parametrize("num_samples", [16])
|
||||||
def test_unsupervised_data(num_samples: int):
|
def test_unsupervised_data(num_samples: int):
|
||||||
train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"]
|
train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"]
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
import pytest
|
||||||
import torch
|
import torch
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from transformers import AutoConfig, AutoModelForVision2Seq
|
from transformers import AutoConfig, AutoModelForVision2Seq
|
||||||
@@ -28,6 +29,7 @@ from llamafactory.model import load_tokenizer
|
|||||||
TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3")
|
TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_base_collator():
|
def test_base_collator():
|
||||||
model_args, data_args, *_ = get_infer_args({"model_name_or_path": TINY_LLAMA3, "template": "default"})
|
model_args, data_args, *_ = get_infer_args({"model_name_or_path": TINY_LLAMA3, "template": "default"})
|
||||||
tokenizer_module = load_tokenizer(model_args)
|
tokenizer_module = load_tokenizer(model_args)
|
||||||
@@ -71,6 +73,7 @@ def test_base_collator():
|
|||||||
assert batch_input[k].eq(torch.tensor(expected_input[k])).all()
|
assert batch_input[k].eq(torch.tensor(expected_input[k])).all()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_multimodal_collator():
|
def test_multimodal_collator():
|
||||||
model_args, data_args, *_ = get_infer_args(
|
model_args, data_args, *_ = get_infer_args(
|
||||||
{"model_name_or_path": "Qwen/Qwen2-VL-2B-Instruct", "template": "qwen2_vl"}
|
{"model_name_or_path": "Qwen/Qwen2-VL-2B-Instruct", "template": "qwen2_vl"}
|
||||||
@@ -126,6 +129,7 @@ def test_multimodal_collator():
|
|||||||
assert batch_input[k].eq(torch.tensor(expected_input[k])).all()
|
assert batch_input[k].eq(torch.tensor(expected_input[k])).all()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_4d_attention_mask():
|
def test_4d_attention_mask():
|
||||||
o = 0.0
|
o = 0.0
|
||||||
x = torch.finfo(torch.float16).min
|
x = torch.finfo(torch.float16).min
|
||||||
|
|||||||
@@ -12,12 +12,15 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
from llamafactory.data import Role
|
from llamafactory.data import Role
|
||||||
from llamafactory.data.converter import get_dataset_converter
|
from llamafactory.data.converter import get_dataset_converter
|
||||||
from llamafactory.data.parser import DatasetAttr
|
from llamafactory.data.parser import DatasetAttr
|
||||||
from llamafactory.hparams import DataArguments
|
from llamafactory.hparams import DataArguments
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_alpaca_converter():
|
def test_alpaca_converter():
|
||||||
dataset_attr = DatasetAttr("hf_hub", "llamafactory/tiny-supervised-dataset")
|
dataset_attr = DatasetAttr("hf_hub", "llamafactory/tiny-supervised-dataset")
|
||||||
data_args = DataArguments()
|
data_args = DataArguments()
|
||||||
@@ -38,6 +41,7 @@ def test_alpaca_converter():
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_sharegpt_converter():
|
def test_sharegpt_converter():
|
||||||
dataset_attr = DatasetAttr("hf_hub", "llamafactory/tiny-supervised-dataset")
|
dataset_attr = DatasetAttr("hf_hub", "llamafactory/tiny-supervised-dataset")
|
||||||
data_args = DataArguments()
|
data_args = DataArguments()
|
||||||
|
|||||||
@@ -15,6 +15,8 @@
|
|||||||
import json
|
import json
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
from llamafactory.data.formatter import EmptyFormatter, FunctionFormatter, StringFormatter, ToolFormatter
|
from llamafactory.data.formatter import EmptyFormatter, FunctionFormatter, StringFormatter, ToolFormatter
|
||||||
|
|
||||||
|
|
||||||
@@ -36,16 +38,19 @@ TOOLS = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_empty_formatter():
|
def test_empty_formatter():
|
||||||
formatter = EmptyFormatter(slots=["\n"])
|
formatter = EmptyFormatter(slots=["\n"])
|
||||||
assert formatter.apply() == ["\n"]
|
assert formatter.apply() == ["\n"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_string_formatter():
|
def test_string_formatter():
|
||||||
formatter = StringFormatter(slots=["<s>", "Human: {{content}}\nAssistant:"])
|
formatter = StringFormatter(slots=["<s>", "Human: {{content}}\nAssistant:"])
|
||||||
assert formatter.apply(content="Hi") == ["<s>", "Human: Hi\nAssistant:"]
|
assert formatter.apply(content="Hi") == ["<s>", "Human: Hi\nAssistant:"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_function_formatter():
|
def test_function_formatter():
|
||||||
formatter = FunctionFormatter(slots=["{{content}}", "</s>"], tool_format="default")
|
formatter = FunctionFormatter(slots=["{{content}}", "</s>"], tool_format="default")
|
||||||
tool_calls = json.dumps(FUNCTION)
|
tool_calls = json.dumps(FUNCTION)
|
||||||
@@ -55,6 +60,7 @@ def test_function_formatter():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_multi_function_formatter():
|
def test_multi_function_formatter():
|
||||||
formatter = FunctionFormatter(slots=["{{content}}", "</s>"], tool_format="default")
|
formatter = FunctionFormatter(slots=["{{content}}", "</s>"], tool_format="default")
|
||||||
tool_calls = json.dumps([FUNCTION] * 2)
|
tool_calls = json.dumps([FUNCTION] * 2)
|
||||||
@@ -65,6 +71,7 @@ def test_multi_function_formatter():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_default_tool_formatter():
|
def test_default_tool_formatter():
|
||||||
formatter = ToolFormatter(tool_format="default")
|
formatter = ToolFormatter(tool_format="default")
|
||||||
assert formatter.apply(content=json.dumps(TOOLS)) == [
|
assert formatter.apply(content=json.dumps(TOOLS)) == [
|
||||||
@@ -83,12 +90,14 @@ def test_default_tool_formatter():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_default_tool_extractor():
|
def test_default_tool_extractor():
|
||||||
formatter = ToolFormatter(tool_format="default")
|
formatter = ToolFormatter(tool_format="default")
|
||||||
result = """Action: test_tool\nAction Input: {"foo": "bar", "size": 10}"""
|
result = """Action: test_tool\nAction Input: {"foo": "bar", "size": 10}"""
|
||||||
assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")]
|
assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_default_multi_tool_extractor():
|
def test_default_multi_tool_extractor():
|
||||||
formatter = ToolFormatter(tool_format="default")
|
formatter = ToolFormatter(tool_format="default")
|
||||||
result = (
|
result = (
|
||||||
@@ -101,12 +110,14 @@ def test_default_multi_tool_extractor():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_glm4_function_formatter():
|
def test_glm4_function_formatter():
|
||||||
formatter = FunctionFormatter(slots=["{{content}}"], tool_format="glm4")
|
formatter = FunctionFormatter(slots=["{{content}}"], tool_format="glm4")
|
||||||
tool_calls = json.dumps(FUNCTION)
|
tool_calls = json.dumps(FUNCTION)
|
||||||
assert formatter.apply(content=tool_calls) == ["""tool_name\n{"foo": "bar", "size": 10}"""]
|
assert formatter.apply(content=tool_calls) == ["""tool_name\n{"foo": "bar", "size": 10}"""]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_glm4_tool_formatter():
|
def test_glm4_tool_formatter():
|
||||||
formatter = ToolFormatter(tool_format="glm4")
|
formatter = ToolFormatter(tool_format="glm4")
|
||||||
assert formatter.apply(content=json.dumps(TOOLS)) == [
|
assert formatter.apply(content=json.dumps(TOOLS)) == [
|
||||||
@@ -117,12 +128,14 @@ def test_glm4_tool_formatter():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_glm4_tool_extractor():
|
def test_glm4_tool_extractor():
|
||||||
formatter = ToolFormatter(tool_format="glm4")
|
formatter = ToolFormatter(tool_format="glm4")
|
||||||
result = """test_tool\n{"foo": "bar", "size": 10}\n"""
|
result = """test_tool\n{"foo": "bar", "size": 10}\n"""
|
||||||
assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")]
|
assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_llama3_function_formatter():
|
def test_llama3_function_formatter():
|
||||||
formatter = FunctionFormatter(slots=["{{content}}<|eot_id|>"], tool_format="llama3")
|
formatter = FunctionFormatter(slots=["{{content}}<|eot_id|>"], tool_format="llama3")
|
||||||
tool_calls = json.dumps(FUNCTION)
|
tool_calls = json.dumps(FUNCTION)
|
||||||
@@ -131,6 +144,7 @@ def test_llama3_function_formatter():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_llama3_multi_function_formatter():
|
def test_llama3_multi_function_formatter():
|
||||||
formatter = FunctionFormatter(slots=["{{content}}<|eot_id|>"], tool_format="llama3")
|
formatter = FunctionFormatter(slots=["{{content}}<|eot_id|>"], tool_format="llama3")
|
||||||
tool_calls = json.dumps([FUNCTION] * 2)
|
tool_calls = json.dumps([FUNCTION] * 2)
|
||||||
@@ -141,6 +155,7 @@ def test_llama3_multi_function_formatter():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_llama3_tool_formatter():
|
def test_llama3_tool_formatter():
|
||||||
formatter = ToolFormatter(tool_format="llama3")
|
formatter = ToolFormatter(tool_format="llama3")
|
||||||
date = datetime.now().strftime("%d %b %Y")
|
date = datetime.now().strftime("%d %b %Y")
|
||||||
@@ -154,12 +169,14 @@ def test_llama3_tool_formatter():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_llama3_tool_extractor():
|
def test_llama3_tool_extractor():
|
||||||
formatter = ToolFormatter(tool_format="llama3")
|
formatter = ToolFormatter(tool_format="llama3")
|
||||||
result = """{"name": "test_tool", "parameters": {"foo": "bar", "size": 10}}\n"""
|
result = """{"name": "test_tool", "parameters": {"foo": "bar", "size": 10}}\n"""
|
||||||
assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")]
|
assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_llama3_multi_tool_extractor():
|
def test_llama3_multi_tool_extractor():
|
||||||
formatter = ToolFormatter(tool_format="llama3")
|
formatter = ToolFormatter(tool_format="llama3")
|
||||||
result = (
|
result = (
|
||||||
@@ -172,6 +189,7 @@ def test_llama3_multi_tool_extractor():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_mistral_function_formatter():
|
def test_mistral_function_formatter():
|
||||||
formatter = FunctionFormatter(slots=["[TOOL_CALLS] {{content}}", "</s>"], tool_format="mistral")
|
formatter = FunctionFormatter(slots=["[TOOL_CALLS] {{content}}", "</s>"], tool_format="mistral")
|
||||||
tool_calls = json.dumps(FUNCTION)
|
tool_calls = json.dumps(FUNCTION)
|
||||||
@@ -181,6 +199,7 @@ def test_mistral_function_formatter():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_mistral_multi_function_formatter():
|
def test_mistral_multi_function_formatter():
|
||||||
formatter = FunctionFormatter(slots=["[TOOL_CALLS] {{content}}", "</s>"], tool_format="mistral")
|
formatter = FunctionFormatter(slots=["[TOOL_CALLS] {{content}}", "</s>"], tool_format="mistral")
|
||||||
tool_calls = json.dumps([FUNCTION] * 2)
|
tool_calls = json.dumps([FUNCTION] * 2)
|
||||||
@@ -192,6 +211,7 @@ def test_mistral_multi_function_formatter():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_mistral_tool_formatter():
|
def test_mistral_tool_formatter():
|
||||||
formatter = ToolFormatter(tool_format="mistral")
|
formatter = ToolFormatter(tool_format="mistral")
|
||||||
wrapped_tool = {"type": "function", "function": TOOLS[0]}
|
wrapped_tool = {"type": "function", "function": TOOLS[0]}
|
||||||
@@ -200,12 +220,14 @@ def test_mistral_tool_formatter():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_mistral_tool_extractor():
|
def test_mistral_tool_extractor():
|
||||||
formatter = ToolFormatter(tool_format="mistral")
|
formatter = ToolFormatter(tool_format="mistral")
|
||||||
result = """{"name": "test_tool", "arguments": {"foo": "bar", "size": 10}}"""
|
result = """{"name": "test_tool", "arguments": {"foo": "bar", "size": 10}}"""
|
||||||
assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")]
|
assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_mistral_multi_tool_extractor():
|
def test_mistral_multi_tool_extractor():
|
||||||
formatter = ToolFormatter(tool_format="mistral")
|
formatter = ToolFormatter(tool_format="mistral")
|
||||||
result = (
|
result = (
|
||||||
@@ -218,6 +240,7 @@ def test_mistral_multi_tool_extractor():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_qwen_function_formatter():
|
def test_qwen_function_formatter():
|
||||||
formatter = FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen")
|
formatter = FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen")
|
||||||
tool_calls = json.dumps(FUNCTION)
|
tool_calls = json.dumps(FUNCTION)
|
||||||
@@ -226,6 +249,7 @@ def test_qwen_function_formatter():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_qwen_multi_function_formatter():
|
def test_qwen_multi_function_formatter():
|
||||||
formatter = FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen")
|
formatter = FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen")
|
||||||
tool_calls = json.dumps([FUNCTION] * 2)
|
tool_calls = json.dumps([FUNCTION] * 2)
|
||||||
@@ -236,6 +260,7 @@ def test_qwen_multi_function_formatter():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_qwen_tool_formatter():
|
def test_qwen_tool_formatter():
|
||||||
formatter = ToolFormatter(tool_format="qwen")
|
formatter = ToolFormatter(tool_format="qwen")
|
||||||
wrapped_tool = {"type": "function", "function": TOOLS[0]}
|
wrapped_tool = {"type": "function", "function": TOOLS[0]}
|
||||||
@@ -249,12 +274,14 @@ def test_qwen_tool_formatter():
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_qwen_tool_extractor():
|
def test_qwen_tool_extractor():
|
||||||
formatter = ToolFormatter(tool_format="qwen")
|
formatter = ToolFormatter(tool_format="qwen")
|
||||||
result = """<tool_call>\n{"name": "test_tool", "arguments": {"foo": "bar", "size": 10}}\n</tool_call>"""
|
result = """<tool_call>\n{"name": "test_tool", "arguments": {"foo": "bar", "size": 10}}\n</tool_call>"""
|
||||||
assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")]
|
assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_qwen_multi_tool_extractor():
|
def test_qwen_multi_tool_extractor():
|
||||||
formatter = ToolFormatter(tool_format="qwen")
|
formatter = ToolFormatter(tool_format="qwen")
|
||||||
result = (
|
result = (
|
||||||
|
|||||||
@@ -14,6 +14,8 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
from llamafactory.train.test_utils import load_dataset_module
|
from llamafactory.train.test_utils import load_dataset_module
|
||||||
|
|
||||||
|
|
||||||
@@ -38,18 +40,21 @@ TRAIN_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_load_train_only():
|
def test_load_train_only():
|
||||||
dataset_module = load_dataset_module(**TRAIN_ARGS)
|
dataset_module = load_dataset_module(**TRAIN_ARGS)
|
||||||
assert dataset_module.get("train_dataset") is not None
|
assert dataset_module.get("train_dataset") is not None
|
||||||
assert dataset_module.get("eval_dataset") is None
|
assert dataset_module.get("eval_dataset") is None
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_load_val_size():
|
def test_load_val_size():
|
||||||
dataset_module = load_dataset_module(val_size=0.1, **TRAIN_ARGS)
|
dataset_module = load_dataset_module(val_size=0.1, **TRAIN_ARGS)
|
||||||
assert dataset_module.get("train_dataset") is not None
|
assert dataset_module.get("train_dataset") is not None
|
||||||
assert dataset_module.get("eval_dataset") is not None
|
assert dataset_module.get("eval_dataset") is not None
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_load_eval_data():
|
def test_load_eval_data():
|
||||||
dataset_module = load_dataset_module(eval_dataset=TINY_DATA, **TRAIN_ARGS)
|
dataset_module = load_dataset_module(eval_dataset=TINY_DATA, **TRAIN_ARGS)
|
||||||
assert dataset_module.get("train_dataset") is not None
|
assert dataset_module.get("train_dataset") is not None
|
||||||
|
|||||||
@@ -179,6 +179,7 @@ def _check_plugin(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_base_plugin():
|
def test_base_plugin():
|
||||||
tokenizer_module = _load_tokenizer_module(model_name_or_path=TINY_LLAMA3)
|
tokenizer_module = _load_tokenizer_module(model_name_or_path=TINY_LLAMA3)
|
||||||
base_plugin = get_mm_plugin(name="base")
|
base_plugin = get_mm_plugin(name="base")
|
||||||
@@ -186,6 +187,7 @@ def test_base_plugin():
|
|||||||
_check_plugin(**check_inputs)
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
||||||
@pytest.mark.skipif(not is_transformers_version_greater_than("4.50.0"), reason="Requires transformers>=4.50.0")
|
@pytest.mark.skipif(not is_transformers_version_greater_than("4.50.0"), reason="Requires transformers>=4.50.0")
|
||||||
def test_gemma3_plugin():
|
def test_gemma3_plugin():
|
||||||
@@ -208,6 +210,7 @@ def test_gemma3_plugin():
|
|||||||
_check_plugin(**check_inputs)
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.skipif(not is_transformers_version_greater_than("4.52.0"), reason="Requires transformers>=4.52.0")
|
@pytest.mark.skipif(not is_transformers_version_greater_than("4.52.0"), reason="Requires transformers>=4.52.0")
|
||||||
def test_internvl_plugin():
|
def test_internvl_plugin():
|
||||||
image_seqlen = 256
|
image_seqlen = 256
|
||||||
@@ -226,6 +229,7 @@ def test_internvl_plugin():
|
|||||||
_check_plugin(**check_inputs)
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.skipif(not is_transformers_version_greater_than("4.51.0"), reason="Requires transformers>=4.51.0")
|
@pytest.mark.skipif(not is_transformers_version_greater_than("4.51.0"), reason="Requires transformers>=4.51.0")
|
||||||
def test_llama4_plugin():
|
def test_llama4_plugin():
|
||||||
tokenizer_module = _load_tokenizer_module(model_name_or_path=TINY_LLAMA4)
|
tokenizer_module = _load_tokenizer_module(model_name_or_path=TINY_LLAMA4)
|
||||||
@@ -247,6 +251,7 @@ def test_llama4_plugin():
|
|||||||
_check_plugin(**check_inputs)
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_llava_plugin():
|
def test_llava_plugin():
|
||||||
image_seqlen = 576
|
image_seqlen = 576
|
||||||
tokenizer_module = _load_tokenizer_module(model_name_or_path="llava-hf/llava-1.5-7b-hf")
|
tokenizer_module = _load_tokenizer_module(model_name_or_path="llava-hf/llava-1.5-7b-hf")
|
||||||
@@ -260,6 +265,7 @@ def test_llava_plugin():
|
|||||||
_check_plugin(**check_inputs)
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_llava_next_plugin():
|
def test_llava_next_plugin():
|
||||||
image_seqlen = 1176
|
image_seqlen = 1176
|
||||||
tokenizer_module = _load_tokenizer_module(model_name_or_path="llava-hf/llava-v1.6-vicuna-7b-hf")
|
tokenizer_module = _load_tokenizer_module(model_name_or_path="llava-hf/llava-v1.6-vicuna-7b-hf")
|
||||||
@@ -273,6 +279,7 @@ def test_llava_next_plugin():
|
|||||||
_check_plugin(**check_inputs)
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_llava_next_video_plugin():
|
def test_llava_next_video_plugin():
|
||||||
image_seqlen = 1176
|
image_seqlen = 1176
|
||||||
tokenizer_module = _load_tokenizer_module(model_name_or_path="llava-hf/LLaVA-NeXT-Video-7B-hf")
|
tokenizer_module = _load_tokenizer_module(model_name_or_path="llava-hf/LLaVA-NeXT-Video-7B-hf")
|
||||||
@@ -286,6 +293,7 @@ def test_llava_next_video_plugin():
|
|||||||
_check_plugin(**check_inputs)
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
||||||
def test_paligemma_plugin():
|
def test_paligemma_plugin():
|
||||||
image_seqlen = 256
|
image_seqlen = 256
|
||||||
@@ -305,6 +313,7 @@ def test_paligemma_plugin():
|
|||||||
_check_plugin(**check_inputs)
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.skipif(not is_transformers_version_greater_than("4.50.0"), reason="Requires transformers>=4.50.0")
|
@pytest.mark.skipif(not is_transformers_version_greater_than("4.50.0"), reason="Requires transformers>=4.50.0")
|
||||||
def test_pixtral_plugin():
|
def test_pixtral_plugin():
|
||||||
image_slice_height, image_slice_width = 2, 2
|
image_slice_height, image_slice_width = 2, 2
|
||||||
@@ -327,6 +336,7 @@ def test_pixtral_plugin():
|
|||||||
_check_plugin(**check_inputs)
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.skipif(not is_transformers_version_greater_than("4.52.0"), reason="Requires transformers>=4.52.0")
|
@pytest.mark.skipif(not is_transformers_version_greater_than("4.52.0"), reason="Requires transformers>=4.52.0")
|
||||||
def test_qwen2_omni_plugin():
|
def test_qwen2_omni_plugin():
|
||||||
image_seqlen, audio_seqlen = 4, 2
|
image_seqlen, audio_seqlen = 4, 2
|
||||||
@@ -357,6 +367,7 @@ def test_qwen2_omni_plugin():
|
|||||||
_check_plugin(**check_inputs)
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_qwen2_vl_plugin():
|
def test_qwen2_vl_plugin():
|
||||||
image_seqlen = 4
|
image_seqlen = 4
|
||||||
tokenizer_module = _load_tokenizer_module(model_name_or_path="Qwen/Qwen2-VL-7B-Instruct")
|
tokenizer_module = _load_tokenizer_module(model_name_or_path="Qwen/Qwen2-VL-7B-Instruct")
|
||||||
@@ -373,6 +384,7 @@ def test_qwen2_vl_plugin():
|
|||||||
_check_plugin(**check_inputs)
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.skipif(not is_transformers_version_greater_than("4.57.0"), reason="Requires transformers>=4.57.0")
|
@pytest.mark.skipif(not is_transformers_version_greater_than("4.57.0"), reason="Requires transformers>=4.57.0")
|
||||||
def test_qwen3_vl_plugin():
|
def test_qwen3_vl_plugin():
|
||||||
frame_seqlen = 1
|
frame_seqlen = 1
|
||||||
@@ -394,6 +406,7 @@ def test_qwen3_vl_plugin():
|
|||||||
_check_plugin(**check_inputs)
|
_check_plugin(**check_inputs)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.skipif(not is_transformers_version_greater_than("4.47.0"), reason="Requires transformers>=4.47.0")
|
@pytest.mark.skipif(not is_transformers_version_greater_than("4.47.0"), reason="Requires transformers>=4.47.0")
|
||||||
def test_video_llava_plugin():
|
def test_video_llava_plugin():
|
||||||
image_seqlen = 256
|
image_seqlen = 256
|
||||||
|
|||||||
@@ -89,6 +89,7 @@ def _check_template(
|
|||||||
_check_tokenization(tokenizer, (prompt_ids, answer_ids), (prompt_str, answer_str))
|
_check_tokenization(tokenizer, (prompt_ids, answer_ids), (prompt_str, answer_str))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize("use_fast", [True, False])
|
@pytest.mark.parametrize("use_fast", [True, False])
|
||||||
def test_encode_oneturn(use_fast: bool):
|
def test_encode_oneturn(use_fast: bool):
|
||||||
tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, use_fast=use_fast)
|
tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, use_fast=use_fast)
|
||||||
@@ -104,6 +105,7 @@ def test_encode_oneturn(use_fast: bool):
|
|||||||
_check_tokenization(tokenizer, (prompt_ids, answer_ids), (prompt_str, answer_str))
|
_check_tokenization(tokenizer, (prompt_ids, answer_ids), (prompt_str, answer_str))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize("use_fast", [True, False])
|
@pytest.mark.parametrize("use_fast", [True, False])
|
||||||
def test_encode_multiturn(use_fast: bool):
|
def test_encode_multiturn(use_fast: bool):
|
||||||
tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, use_fast=use_fast)
|
tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, use_fast=use_fast)
|
||||||
@@ -125,6 +127,7 @@ def test_encode_multiturn(use_fast: bool):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize("use_fast", [True, False])
|
@pytest.mark.parametrize("use_fast", [True, False])
|
||||||
@pytest.mark.parametrize("cot_messages", [True, False])
|
@pytest.mark.parametrize("cot_messages", [True, False])
|
||||||
@pytest.mark.parametrize("enable_thinking", [True, False, None])
|
@pytest.mark.parametrize("enable_thinking", [True, False, None])
|
||||||
@@ -151,6 +154,7 @@ def test_reasoning_encode_oneturn(use_fast: bool, cot_messages: bool, enable_thi
|
|||||||
_check_tokenization(tokenizer, (prompt_ids, answer_ids), (prompt_str, answer_str))
|
_check_tokenization(tokenizer, (prompt_ids, answer_ids), (prompt_str, answer_str))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize("use_fast", [True, False])
|
@pytest.mark.parametrize("use_fast", [True, False])
|
||||||
@pytest.mark.parametrize("cot_messages", [True, False])
|
@pytest.mark.parametrize("cot_messages", [True, False])
|
||||||
@pytest.mark.parametrize("enable_thinking", [True, False, None])
|
@pytest.mark.parametrize("enable_thinking", [True, False, None])
|
||||||
@@ -180,6 +184,7 @@ def test_reasoning_encode_multiturn(use_fast: bool, cot_messages: bool, enable_t
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize("use_fast", [True, False])
|
@pytest.mark.parametrize("use_fast", [True, False])
|
||||||
def test_jinja_template(use_fast: bool):
|
def test_jinja_template(use_fast: bool):
|
||||||
tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, use_fast=use_fast)
|
tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, use_fast=use_fast)
|
||||||
@@ -190,6 +195,7 @@ def test_jinja_template(use_fast: bool):
|
|||||||
assert tokenizer.apply_chat_template(MESSAGES) == ref_tokenizer.apply_chat_template(MESSAGES)
|
assert tokenizer.apply_chat_template(MESSAGES) == ref_tokenizer.apply_chat_template(MESSAGES)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_ollama_modelfile():
|
def test_ollama_modelfile():
|
||||||
tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
|
tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
|
||||||
template = get_template_and_fix_tokenizer(tokenizer, DataArguments(template="llama3"))
|
template = get_template_and_fix_tokenizer(tokenizer, DataArguments(template="llama3"))
|
||||||
@@ -207,12 +213,14 @@ def test_ollama_modelfile():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_get_stop_token_ids():
|
def test_get_stop_token_ids():
|
||||||
tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
|
tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
|
||||||
template = get_template_and_fix_tokenizer(tokenizer, DataArguments(template="llama3"))
|
template = get_template_and_fix_tokenizer(tokenizer, DataArguments(template="llama3"))
|
||||||
assert set(template.get_stop_token_ids(tokenizer)) == {128008, 128009}
|
assert set(template.get_stop_token_ids(tokenizer)) == {128008, 128009}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
||||||
@pytest.mark.parametrize("use_fast", [True, False])
|
@pytest.mark.parametrize("use_fast", [True, False])
|
||||||
def test_gemma_template(use_fast: bool):
|
def test_gemma_template(use_fast: bool):
|
||||||
@@ -226,6 +234,7 @@ def test_gemma_template(use_fast: bool):
|
|||||||
_check_template("google/gemma-3-4b-it", "gemma", prompt_str, answer_str, use_fast)
|
_check_template("google/gemma-3-4b-it", "gemma", prompt_str, answer_str, use_fast)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
||||||
@pytest.mark.parametrize("use_fast", [True, False])
|
@pytest.mark.parametrize("use_fast", [True, False])
|
||||||
def test_gemma2_template(use_fast: bool):
|
def test_gemma2_template(use_fast: bool):
|
||||||
@@ -239,6 +248,7 @@ def test_gemma2_template(use_fast: bool):
|
|||||||
_check_template("google/gemma-2-2b-it", "gemma2", prompt_str, answer_str, use_fast)
|
_check_template("google/gemma-2-2b-it", "gemma2", prompt_str, answer_str, use_fast)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
||||||
@pytest.mark.parametrize("use_fast", [True, False])
|
@pytest.mark.parametrize("use_fast", [True, False])
|
||||||
def test_llama3_template(use_fast: bool):
|
def test_llama3_template(use_fast: bool):
|
||||||
@@ -252,6 +262,7 @@ def test_llama3_template(use_fast: bool):
|
|||||||
_check_template("meta-llama/Meta-Llama-3-8B-Instruct", "llama3", prompt_str, answer_str, use_fast)
|
_check_template("meta-llama/Meta-Llama-3-8B-Instruct", "llama3", prompt_str, answer_str, use_fast)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"use_fast", [True, pytest.param(False, marks=pytest.mark.xfail(reason="Llama 4 has no slow tokenizer."))]
|
"use_fast", [True, pytest.param(False, marks=pytest.mark.xfail(reason="Llama 4 has no slow tokenizer."))]
|
||||||
)
|
)
|
||||||
@@ -273,6 +284,8 @@ def test_llama4_template(use_fast: bool):
|
|||||||
pytest.param(False, marks=pytest.mark.xfail(reason="Phi-4 slow tokenizer is broken.")),
|
pytest.param(False, marks=pytest.mark.xfail(reason="Phi-4 slow tokenizer is broken.")),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_phi4_template(use_fast: bool):
|
def test_phi4_template(use_fast: bool):
|
||||||
prompt_str = (
|
prompt_str = (
|
||||||
f"<|im_start|>user<|im_sep|>{MESSAGES[0]['content']}<|im_end|>"
|
f"<|im_start|>user<|im_sep|>{MESSAGES[0]['content']}<|im_end|>"
|
||||||
@@ -284,6 +297,7 @@ def test_phi4_template(use_fast: bool):
|
|||||||
_check_template("microsoft/phi-4", "phi4", prompt_str, answer_str, use_fast)
|
_check_template("microsoft/phi-4", "phi4", prompt_str, answer_str, use_fast)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.xfail(not HF_TOKEN, reason="Authorization.")
|
@pytest.mark.xfail(not HF_TOKEN, reason="Authorization.")
|
||||||
@pytest.mark.parametrize("use_fast", [True, False])
|
@pytest.mark.parametrize("use_fast", [True, False])
|
||||||
def test_qwen2_5_template(use_fast: bool):
|
def test_qwen2_5_template(use_fast: bool):
|
||||||
@@ -298,6 +312,7 @@ def test_qwen2_5_template(use_fast: bool):
|
|||||||
_check_template("Qwen/Qwen2.5-7B-Instruct", "qwen", prompt_str, answer_str, use_fast)
|
_check_template("Qwen/Qwen2.5-7B-Instruct", "qwen", prompt_str, answer_str, use_fast)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize("use_fast", [True, False])
|
@pytest.mark.parametrize("use_fast", [True, False])
|
||||||
@pytest.mark.parametrize("cot_messages", [True, False])
|
@pytest.mark.parametrize("cot_messages", [True, False])
|
||||||
def test_qwen3_template(use_fast: bool, cot_messages: bool):
|
def test_qwen3_template(use_fast: bool, cot_messages: bool):
|
||||||
@@ -317,6 +332,7 @@ def test_qwen3_template(use_fast: bool, cot_messages: bool):
|
|||||||
_check_template("Qwen/Qwen3-8B", "qwen3", prompt_str, answer_str, use_fast, messages=messages)
|
_check_template("Qwen/Qwen3-8B", "qwen3", prompt_str, answer_str, use_fast, messages=messages)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_parse_llama3_template():
|
def test_parse_llama3_template():
|
||||||
tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, token=HF_TOKEN)
|
tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, token=HF_TOKEN)
|
||||||
template = parse_template(tokenizer)
|
template = parse_template(tokenizer)
|
||||||
@@ -330,6 +346,7 @@ def test_parse_llama3_template():
|
|||||||
assert template.default_system == ""
|
assert template.default_system == ""
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.xfail(not HF_TOKEN, reason="Authorization.")
|
@pytest.mark.xfail(not HF_TOKEN, reason="Authorization.")
|
||||||
def test_parse_qwen_template():
|
def test_parse_qwen_template():
|
||||||
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
|
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN)
|
||||||
@@ -342,6 +359,7 @@ def test_parse_qwen_template():
|
|||||||
assert template.default_system == "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."
|
assert template.default_system == "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.xfail(not HF_TOKEN, reason="Authorization.")
|
@pytest.mark.xfail(not HF_TOKEN, reason="Authorization.")
|
||||||
def test_parse_qwen3_template():
|
def test_parse_qwen3_template():
|
||||||
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B", token=HF_TOKEN)
|
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B", token=HF_TOKEN)
|
||||||
|
|||||||
@@ -14,6 +14,8 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
from llamafactory.chat import ChatModel
|
from llamafactory.chat import ChatModel
|
||||||
|
|
||||||
|
|
||||||
@@ -35,11 +37,13 @@ MESSAGES = [
|
|||||||
EXPECTED_RESPONSE = "_rho"
|
EXPECTED_RESPONSE = "_rho"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_chat():
|
def test_chat():
|
||||||
chat_model = ChatModel(INFER_ARGS)
|
chat_model = ChatModel(INFER_ARGS)
|
||||||
assert chat_model.chat(MESSAGES)[0].response_text == EXPECTED_RESPONSE
|
assert chat_model.chat(MESSAGES)[0].response_text == EXPECTED_RESPONSE
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_stream_chat():
|
def test_stream_chat():
|
||||||
chat_model = ChatModel(INFER_ARGS)
|
chat_model = ChatModel(INFER_ARGS)
|
||||||
response = ""
|
response = ""
|
||||||
|
|||||||
@@ -39,6 +39,7 @@ MESSAGES = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.skipif(not is_sglang_available(), reason="SGLang is not installed")
|
@pytest.mark.skipif(not is_sglang_available(), reason="SGLang is not installed")
|
||||||
def test_chat():
|
def test_chat():
|
||||||
r"""Test the SGLang engine's basic chat functionality."""
|
r"""Test the SGLang engine's basic chat functionality."""
|
||||||
@@ -48,6 +49,7 @@ def test_chat():
|
|||||||
print(response.response_text)
|
print(response.response_text)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.skipif(not is_sglang_available(), reason="SGLang is not installed")
|
@pytest.mark.skipif(not is_sglang_available(), reason="SGLang is not installed")
|
||||||
def test_stream_chat():
|
def test_stream_chat():
|
||||||
r"""Test the SGLang engine's streaming chat functionality."""
|
r"""Test the SGLang engine's streaming chat functionality."""
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ INFER_ARGS = {
|
|||||||
|
|
||||||
OS_NAME = os.getenv("OS_NAME", "")
|
OS_NAME = os.getenv("OS_NAME", "")
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"stage,dataset",
|
"stage,dataset",
|
||||||
[
|
[
|
||||||
@@ -65,6 +65,7 @@ def test_run_exp(stage: str, dataset: str):
|
|||||||
assert os.path.exists(output_dir)
|
assert os.path.exists(output_dir)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_export():
|
def test_export():
|
||||||
export_dir = os.path.join("output", "llama3_export")
|
export_dir = os.path.join("output", "llama3_export")
|
||||||
export_model({"export_dir": export_dir, **INFER_ARGS})
|
export_model({"export_dir": export_dir, **INFER_ARGS})
|
||||||
|
|||||||
@@ -12,9 +12,12 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
from llamafactory.eval.template import get_eval_template
|
from llamafactory.eval.template import get_eval_template
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_eval_template_en():
|
def test_eval_template_en():
|
||||||
support_set = [
|
support_set = [
|
||||||
{
|
{
|
||||||
@@ -52,7 +55,7 @@ def test_eval_template_en():
|
|||||||
{"role": "assistant", "content": "C"},
|
{"role": "assistant", "content": "C"},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_eval_template_zh():
|
def test_eval_template_zh():
|
||||||
support_set = [
|
support_set = [
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3")
|
|||||||
UNUSED_TOKEN = "<|UNUSED_TOKEN|>"
|
UNUSED_TOKEN = "<|UNUSED_TOKEN|>"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
@pytest.mark.parametrize("special_tokens", [False, True])
|
@pytest.mark.parametrize("special_tokens", [False, True])
|
||||||
def test_add_tokens(special_tokens: bool):
|
def test_add_tokens(special_tokens: bool):
|
||||||
if special_tokens:
|
if special_tokens:
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ INFER_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
@pytest.mark.xfail(is_transformers_version_greater_than("4.48"), reason="Attention refactor.")
|
@pytest.mark.xfail(is_transformers_version_greater_than("4.48"), reason="Attention refactor.")
|
||||||
def test_attention():
|
def test_attention():
|
||||||
attention_available = ["disabled"]
|
attention_available = ["disabled"]
|
||||||
|
|||||||
@@ -39,6 +39,7 @@ TRAIN_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
@pytest.mark.parametrize("disable_gradient_checkpointing", [False, True])
|
@pytest.mark.parametrize("disable_gradient_checkpointing", [False, True])
|
||||||
def test_vanilla_checkpointing(disable_gradient_checkpointing: bool):
|
def test_vanilla_checkpointing(disable_gradient_checkpointing: bool):
|
||||||
model = load_train_model(disable_gradient_checkpointing=disable_gradient_checkpointing, **TRAIN_ARGS)
|
model = load_train_model(disable_gradient_checkpointing=disable_gradient_checkpointing, **TRAIN_ARGS)
|
||||||
@@ -46,12 +47,14 @@ def test_vanilla_checkpointing(disable_gradient_checkpointing: bool):
|
|||||||
assert getattr(module, "gradient_checkpointing") != disable_gradient_checkpointing
|
assert getattr(module, "gradient_checkpointing") != disable_gradient_checkpointing
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
def test_unsloth_gradient_checkpointing():
|
def test_unsloth_gradient_checkpointing():
|
||||||
model = load_train_model(use_unsloth_gc=True, **TRAIN_ARGS)
|
model = load_train_model(use_unsloth_gc=True, **TRAIN_ARGS)
|
||||||
for module in filter(lambda m: hasattr(m, "gradient_checkpointing"), model.modules()):
|
for module in filter(lambda m: hasattr(m, "gradient_checkpointing"), model.modules()):
|
||||||
assert module._gradient_checkpointing_func.__self__.__name__ == "UnslothGradientCheckpointing"
|
assert module._gradient_checkpointing_func.__self__.__name__ == "UnslothGradientCheckpointing"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
def test_upcast_layernorm():
|
def test_upcast_layernorm():
|
||||||
model = load_train_model(upcast_layernorm=True, **TRAIN_ARGS)
|
model = load_train_model(upcast_layernorm=True, **TRAIN_ARGS)
|
||||||
for name, param in model.named_parameters():
|
for name, param in model.named_parameters():
|
||||||
@@ -59,6 +62,7 @@ def test_upcast_layernorm():
|
|||||||
assert param.dtype == torch.float32
|
assert param.dtype == torch.float32
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
def test_upcast_lmhead_output():
|
def test_upcast_lmhead_output():
|
||||||
model = load_train_model(upcast_lmhead_output=True, **TRAIN_ARGS)
|
model = load_train_model(upcast_lmhead_output=True, **TRAIN_ARGS)
|
||||||
inputs = torch.randn((1, 16), dtype=torch.float16, device=get_current_device())
|
inputs = torch.randn((1, 16), dtype=torch.float16, device=get_current_device())
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ from llamafactory.model.model_utils.misc import find_expanded_modules
|
|||||||
HF_TOKEN = os.getenv("HF_TOKEN")
|
HF_TOKEN = os.getenv("HF_TOKEN")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
||||||
def test_expanded_modules():
|
def test_expanded_modules():
|
||||||
config = AutoConfig.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
config = AutoConfig.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ import torch
|
|||||||
from llamafactory.model.model_utils.packing import get_seqlens_in_batch, get_unpad_data
|
from llamafactory.model.model_utils.packing import get_seqlens_in_batch, get_unpad_data
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"attention_mask,golden_seq_lens",
|
"attention_mask,golden_seq_lens",
|
||||||
[
|
[
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ from llamafactory.hparams import FinetuningArguments, ModelArguments
|
|||||||
from llamafactory.model.adapter import init_adapter
|
from llamafactory.model.adapter import init_adapter
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
@pytest.mark.parametrize("freeze_vision_tower", (False, True))
|
@pytest.mark.parametrize("freeze_vision_tower", (False, True))
|
||||||
@pytest.mark.parametrize("freeze_multi_modal_projector", (False, True))
|
@pytest.mark.parametrize("freeze_multi_modal_projector", (False, True))
|
||||||
@pytest.mark.parametrize("freeze_language_model", (False, True))
|
@pytest.mark.parametrize("freeze_language_model", (False, True))
|
||||||
@@ -48,6 +49,7 @@ def test_visual_full(freeze_vision_tower: bool, freeze_multi_modal_projector: bo
|
|||||||
assert param.requires_grad != freeze_language_model
|
assert param.requires_grad != freeze_language_model
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
@pytest.mark.parametrize("freeze_vision_tower,freeze_language_model", ((False, False), (False, True), (True, False)))
|
@pytest.mark.parametrize("freeze_vision_tower,freeze_language_model", ((False, False), (False, True), (True, False)))
|
||||||
def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool):
|
def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool):
|
||||||
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")
|
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")
|
||||||
@@ -80,6 +82,7 @@ def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool):
|
|||||||
assert (merger_param_name in trainable_params) is False
|
assert (merger_param_name in trainable_params) is False
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
def test_visual_model_save_load():
|
def test_visual_model_save_load():
|
||||||
# check VLM's state dict: https://github.com/huggingface/transformers/pull/38385
|
# check VLM's state dict: https://github.com/huggingface/transformers/pull/38385
|
||||||
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")
|
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")
|
||||||
|
|||||||
@@ -29,13 +29,15 @@ INFER_ARGS = {
|
|||||||
"infer_dtype": "float16",
|
"infer_dtype": "float16",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
|
@pytest.mark.skip_on_devices("npu")
|
||||||
def test_base():
|
def test_base():
|
||||||
model = load_infer_model(**INFER_ARGS)
|
model = load_infer_model(**INFER_ARGS)
|
||||||
ref_model = load_reference_model(TINY_LLAMA3)
|
ref_model = load_reference_model(TINY_LLAMA3)
|
||||||
compare_model(model, ref_model)
|
compare_model(model, ref_model)
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
|
@pytest.mark.skip_on_devices("npu")
|
||||||
@pytest.mark.usefixtures("fix_valuehead_cpu_loading")
|
@pytest.mark.usefixtures("fix_valuehead_cpu_loading")
|
||||||
def test_valuehead():
|
def test_valuehead():
|
||||||
model = load_infer_model(add_valuehead=True, **INFER_ARGS)
|
model = load_infer_model(add_valuehead=True, **INFER_ARGS)
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
import pytest
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
from llamafactory.train.test_utils import load_infer_model, load_train_model
|
from llamafactory.train.test_utils import load_infer_model, load_train_model
|
||||||
@@ -43,6 +44,7 @@ INFER_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
def test_freeze_train_all_modules():
|
def test_freeze_train_all_modules():
|
||||||
model = load_train_model(freeze_trainable_layers=1, **TRAIN_ARGS)
|
model = load_train_model(freeze_trainable_layers=1, **TRAIN_ARGS)
|
||||||
for name, param in model.named_parameters():
|
for name, param in model.named_parameters():
|
||||||
@@ -54,6 +56,7 @@ def test_freeze_train_all_modules():
|
|||||||
assert param.dtype == torch.float16
|
assert param.dtype == torch.float16
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
def test_freeze_train_extra_modules():
|
def test_freeze_train_extra_modules():
|
||||||
model = load_train_model(freeze_trainable_layers=1, freeze_extra_modules="embed_tokens,lm_head", **TRAIN_ARGS)
|
model = load_train_model(freeze_trainable_layers=1, freeze_extra_modules="embed_tokens,lm_head", **TRAIN_ARGS)
|
||||||
for name, param in model.named_parameters():
|
for name, param in model.named_parameters():
|
||||||
@@ -65,6 +68,7 @@ def test_freeze_train_extra_modules():
|
|||||||
assert param.dtype == torch.float16
|
assert param.dtype == torch.float16
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
def test_freeze_inference():
|
def test_freeze_inference():
|
||||||
model = load_infer_model(**INFER_ARGS)
|
model = load_infer_model(**INFER_ARGS)
|
||||||
for param in model.parameters():
|
for param in model.parameters():
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
import pytest
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
from llamafactory.train.test_utils import load_infer_model, load_train_model
|
from llamafactory.train.test_utils import load_infer_model, load_train_model
|
||||||
@@ -42,14 +43,14 @@ INFER_ARGS = {
|
|||||||
"infer_dtype": "float16",
|
"infer_dtype": "float16",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
def test_full_train():
|
def test_full_train():
|
||||||
model = load_train_model(**TRAIN_ARGS)
|
model = load_train_model(**TRAIN_ARGS)
|
||||||
for param in model.parameters():
|
for param in model.parameters():
|
||||||
assert param.requires_grad is True
|
assert param.requires_grad is True
|
||||||
assert param.dtype == torch.float32
|
assert param.dtype == torch.float32
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
def test_full_inference():
|
def test_full_inference():
|
||||||
model = load_infer_model(**INFER_ARGS)
|
model = load_infer_model(**INFER_ARGS)
|
||||||
for param in model.parameters():
|
for param in model.parameters():
|
||||||
|
|||||||
@@ -55,30 +55,35 @@ INFER_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
def test_lora_train_qv_modules():
|
def test_lora_train_qv_modules():
|
||||||
model = load_train_model(lora_target="q_proj,v_proj", **TRAIN_ARGS)
|
model = load_train_model(lora_target="q_proj,v_proj", **TRAIN_ARGS)
|
||||||
linear_modules, _ = check_lora_model(model)
|
linear_modules, _ = check_lora_model(model)
|
||||||
assert linear_modules == {"q_proj", "v_proj"}
|
assert linear_modules == {"q_proj", "v_proj"}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
def test_lora_train_all_modules():
|
def test_lora_train_all_modules():
|
||||||
model = load_train_model(lora_target="all", **TRAIN_ARGS)
|
model = load_train_model(lora_target="all", **TRAIN_ARGS)
|
||||||
linear_modules, _ = check_lora_model(model)
|
linear_modules, _ = check_lora_model(model)
|
||||||
assert linear_modules == {"q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "gate_proj", "down_proj"}
|
assert linear_modules == {"q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "gate_proj", "down_proj"}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
def test_lora_train_extra_modules():
|
def test_lora_train_extra_modules():
|
||||||
model = load_train_model(additional_target="embed_tokens,lm_head", **TRAIN_ARGS)
|
model = load_train_model(additional_target="embed_tokens,lm_head", **TRAIN_ARGS)
|
||||||
_, extra_modules = check_lora_model(model)
|
_, extra_modules = check_lora_model(model)
|
||||||
assert extra_modules == {"embed_tokens", "lm_head"}
|
assert extra_modules == {"embed_tokens", "lm_head"}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
def test_lora_train_old_adapters():
|
def test_lora_train_old_adapters():
|
||||||
model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=False, **TRAIN_ARGS)
|
model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=False, **TRAIN_ARGS)
|
||||||
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True)
|
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True)
|
||||||
compare_model(model, ref_model)
|
compare_model(model, ref_model)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
def test_lora_train_new_adapters():
|
def test_lora_train_new_adapters():
|
||||||
model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=True, **TRAIN_ARGS)
|
model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=True, **TRAIN_ARGS)
|
||||||
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True)
|
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True)
|
||||||
@@ -87,6 +92,7 @@ def test_lora_train_new_adapters():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
@pytest.mark.usefixtures("fix_valuehead_cpu_loading")
|
@pytest.mark.usefixtures("fix_valuehead_cpu_loading")
|
||||||
def test_lora_train_valuehead():
|
def test_lora_train_valuehead():
|
||||||
model = load_train_model(add_valuehead=True, **TRAIN_ARGS)
|
model = load_train_model(add_valuehead=True, **TRAIN_ARGS)
|
||||||
@@ -96,7 +102,8 @@ def test_lora_train_valuehead():
|
|||||||
assert torch.allclose(state_dict["v_head.summary.weight"], ref_state_dict["v_head.summary.weight"])
|
assert torch.allclose(state_dict["v_head.summary.weight"], ref_state_dict["v_head.summary.weight"])
|
||||||
assert torch.allclose(state_dict["v_head.summary.bias"], ref_state_dict["v_head.summary.bias"])
|
assert torch.allclose(state_dict["v_head.summary.bias"], ref_state_dict["v_head.summary.bias"])
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
|
@pytest.mark.skip_on_devices("npu")
|
||||||
def test_lora_inference():
|
def test_lora_inference():
|
||||||
model = load_infer_model(**INFER_ARGS)
|
model = load_infer_model(**INFER_ARGS)
|
||||||
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True).merge_and_unload()
|
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True).merge_and_unload()
|
||||||
|
|||||||
@@ -49,13 +49,14 @@ INFER_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
@pytest.mark.xfail(reason="PiSSA initialization is not stable in different platform.")
|
@pytest.mark.xfail(reason="PiSSA initialization is not stable in different platform.")
|
||||||
def test_pissa_train():
|
def test_pissa_train():
|
||||||
model = load_train_model(**TRAIN_ARGS)
|
model = load_train_model(**TRAIN_ARGS)
|
||||||
ref_model = load_reference_model(TINY_LLAMA_PISSA, TINY_LLAMA_PISSA, use_pissa=True, is_trainable=True)
|
ref_model = load_reference_model(TINY_LLAMA_PISSA, TINY_LLAMA_PISSA, use_pissa=True, is_trainable=True)
|
||||||
compare_model(model, ref_model)
|
compare_model(model, ref_model)
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
@pytest.mark.xfail(reason="Known connection error.")
|
@pytest.mark.xfail(reason="Known connection error.")
|
||||||
def test_pissa_inference():
|
def test_pissa_inference():
|
||||||
model = load_infer_model(**INFER_ARGS)
|
model = load_infer_model(**INFER_ARGS)
|
||||||
|
|||||||
@@ -59,6 +59,7 @@ class DataCollatorWithVerbose(DataCollatorWithPadding):
|
|||||||
return {k: v[:, :1] for k, v in batch.items()} # truncate input length
|
return {k: v[:, :1] for k, v in batch.items()} # truncate input length
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.runs_on(["cpu","npu"])
|
||||||
@pytest.mark.parametrize("disable_shuffling", [False, True])
|
@pytest.mark.parametrize("disable_shuffling", [False, True])
|
||||||
def test_shuffle(disable_shuffling: bool):
|
def test_shuffle(disable_shuffling: bool):
|
||||||
model_args, data_args, training_args, finetuning_args, _ = get_train_args(
|
model_args, data_args, training_args, finetuning_args, _ = get_train_args(
|
||||||
|
|||||||
18
tests/utils.py
Normal file
18
tests/utils.py
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
# Copyright 2025 the LlamaFactory team.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
runs_on = pytest.mark.runs_on
|
||||||
Reference in New Issue
Block a user