mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-16 20:00:36 +08:00
[test] add npu test yaml and add ascend a3 docker file (#9547)
Co-authored-by: jiaqiw09 <jiaqiw960714@gmail.com>
This commit is contained in:
@@ -25,6 +25,7 @@ TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3")
|
||||
UNUSED_TOKEN = "<|UNUSED_TOKEN|>"
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu","npu"])
|
||||
@pytest.mark.parametrize("special_tokens", [False, True])
|
||||
def test_add_tokens(special_tokens: bool):
|
||||
if special_tokens:
|
||||
|
||||
@@ -29,6 +29,7 @@ INFER_ARGS = {
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu","npu"])
|
||||
@pytest.mark.xfail(is_transformers_version_greater_than("4.48"), reason="Attention refactor.")
|
||||
def test_attention():
|
||||
attention_available = ["disabled"]
|
||||
|
||||
@@ -39,6 +39,7 @@ TRAIN_ARGS = {
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu","npu"])
|
||||
@pytest.mark.parametrize("disable_gradient_checkpointing", [False, True])
|
||||
def test_vanilla_checkpointing(disable_gradient_checkpointing: bool):
|
||||
model = load_train_model(disable_gradient_checkpointing=disable_gradient_checkpointing, **TRAIN_ARGS)
|
||||
@@ -46,12 +47,14 @@ def test_vanilla_checkpointing(disable_gradient_checkpointing: bool):
|
||||
assert getattr(module, "gradient_checkpointing") != disable_gradient_checkpointing
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu","npu"])
|
||||
def test_unsloth_gradient_checkpointing():
|
||||
model = load_train_model(use_unsloth_gc=True, **TRAIN_ARGS)
|
||||
for module in filter(lambda m: hasattr(m, "gradient_checkpointing"), model.modules()):
|
||||
assert module._gradient_checkpointing_func.__self__.__name__ == "UnslothGradientCheckpointing"
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu","npu"])
|
||||
def test_upcast_layernorm():
|
||||
model = load_train_model(upcast_layernorm=True, **TRAIN_ARGS)
|
||||
for name, param in model.named_parameters():
|
||||
@@ -59,6 +62,7 @@ def test_upcast_layernorm():
|
||||
assert param.dtype == torch.float32
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu","npu"])
|
||||
def test_upcast_lmhead_output():
|
||||
model = load_train_model(upcast_lmhead_output=True, **TRAIN_ARGS)
|
||||
inputs = torch.randn((1, 16), dtype=torch.float16, device=get_current_device())
|
||||
|
||||
@@ -24,6 +24,7 @@ from llamafactory.model.model_utils.misc import find_expanded_modules
|
||||
HF_TOKEN = os.getenv("HF_TOKEN")
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu","npu"])
|
||||
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
||||
def test_expanded_modules():
|
||||
config = AutoConfig.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
||||
|
||||
@@ -18,6 +18,7 @@ import torch
|
||||
from llamafactory.model.model_utils.packing import get_seqlens_in_batch, get_unpad_data
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu","npu"])
|
||||
@pytest.mark.parametrize(
|
||||
"attention_mask,golden_seq_lens",
|
||||
[
|
||||
|
||||
@@ -23,6 +23,7 @@ from llamafactory.hparams import FinetuningArguments, ModelArguments
|
||||
from llamafactory.model.adapter import init_adapter
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu","npu"])
|
||||
@pytest.mark.parametrize("freeze_vision_tower", (False, True))
|
||||
@pytest.mark.parametrize("freeze_multi_modal_projector", (False, True))
|
||||
@pytest.mark.parametrize("freeze_language_model", (False, True))
|
||||
@@ -48,6 +49,7 @@ def test_visual_full(freeze_vision_tower: bool, freeze_multi_modal_projector: bo
|
||||
assert param.requires_grad != freeze_language_model
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu","npu"])
|
||||
@pytest.mark.parametrize("freeze_vision_tower,freeze_language_model", ((False, False), (False, True), (True, False)))
|
||||
def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool):
|
||||
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")
|
||||
@@ -80,6 +82,7 @@ def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool):
|
||||
assert (merger_param_name in trainable_params) is False
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu","npu"])
|
||||
def test_visual_model_save_load():
|
||||
# check VLM's state dict: https://github.com/huggingface/transformers/pull/38385
|
||||
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")
|
||||
|
||||
Reference in New Issue
Block a user