mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-15 19:30:36 +08:00
[misc] lint (#9593)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
2
.github/workflows/tests_npu.yml
vendored
2
.github/workflows/tests_npu.yml
vendored
@@ -84,4 +84,4 @@ jobs:
|
|||||||
make test
|
make test
|
||||||
env:
|
env:
|
||||||
HF_HOME: /root/.cache/huggingface
|
HF_HOME: /root/.cache/huggingface
|
||||||
HF_HUB_OFFLINE: "${{ steps.hf-hub-cache.outputs.cache-hit == 'true' && '1' || '0' }}"
|
HF_HUB_OFFLINE: "${{ steps.hf-hub-cache.outputs.cache-hit == 'true' && '1' || '0' }}"
|
||||||
|
|||||||
@@ -19,4 +19,4 @@ same_network: true
|
|||||||
tpu_env: []
|
tpu_env: []
|
||||||
tpu_use_cluster: false
|
tpu_use_cluster: false
|
||||||
tpu_use_sudo: false
|
tpu_use_sudo: false
|
||||||
use_cpu: false
|
use_cpu: false
|
||||||
|
|||||||
@@ -39,4 +39,4 @@ warmup_ratio: 0.1
|
|||||||
bf16: true
|
bf16: true
|
||||||
ddp_timeout: 180000000
|
ddp_timeout: 180000000
|
||||||
resume_from_checkpoint: null
|
resume_from_checkpoint: null
|
||||||
seed: 1234
|
seed: 1234
|
||||||
|
|||||||
@@ -18,8 +18,8 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import os
|
|
||||||
import inspect
|
import inspect
|
||||||
|
import os
|
||||||
from functools import WRAPPER_ASSIGNMENTS, partial, wraps
|
from functools import WRAPPER_ASSIGNMENTS, partial, wraps
|
||||||
from types import MethodType
|
from types import MethodType
|
||||||
from typing import TYPE_CHECKING, Any, Callable, Optional, Union
|
from typing import TYPE_CHECKING, Any, Callable, Optional, Union
|
||||||
@@ -156,11 +156,9 @@ def prepare_model_for_training(model: "PreTrainedModel", model_args: "ModelArgum
|
|||||||
if (
|
if (
|
||||||
os.environ.get("ACCELERATE_USE_FSDP", "false").lower() == "true"
|
os.environ.get("ACCELERATE_USE_FSDP", "false").lower() == "true"
|
||||||
and int(os.environ.get("FSDP_VERSION", "1")) == 2
|
and int(os.environ.get("FSDP_VERSION", "1")) == 2
|
||||||
):
|
):
|
||||||
model_args.use_reentrant_gc = False
|
model_args.use_reentrant_gc = False
|
||||||
logger.warning_rank0(
|
logger.warning_rank0("You are using fsdp2, `use_reentrant_gc` has been set to False.")
|
||||||
"You are using fsdp2, `use_reentrant_gc` has been set to False. "
|
|
||||||
)
|
|
||||||
|
|
||||||
if not model_args.disable_gradient_checkpointing:
|
if not model_args.disable_gradient_checkpointing:
|
||||||
if not getattr(model, "supports_gradient_checkpointing", False):
|
if not getattr(model, "supports_gradient_checkpointing", False):
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ from ..trainer_utils import create_modelcard_and_push
|
|||||||
from .metric import ComputeAccuracy, ComputeSimilarity, eval_logit_processor
|
from .metric import ComputeAccuracy, ComputeSimilarity, eval_logit_processor
|
||||||
from .trainer import CustomSeq2SeqTrainer
|
from .trainer import CustomSeq2SeqTrainer
|
||||||
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from transformers import Seq2SeqTrainingArguments, TrainerCallback
|
from transformers import Seq2SeqTrainingArguments, TrainerCallback
|
||||||
|
|
||||||
@@ -144,4 +145,4 @@ def run_sft(
|
|||||||
trainer.save_predictions(dataset_module["eval_dataset"], predict_results, generating_args.skip_special_tokens)
|
trainer.save_predictions(dataset_module["eval_dataset"], predict_results, generating_args.skip_special_tokens)
|
||||||
|
|
||||||
# Create model card
|
# Create model card
|
||||||
create_modelcard_and_push(trainer, model_args, data_args, training_args, finetuning_args)
|
create_modelcard_and_push(trainer, model_args, data_args, training_args, finetuning_args)
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
|
|
||||||
|
|||||||
@@ -40,9 +40,7 @@ def pytest_configure(config):
|
|||||||
config.addinivalue_line(
|
config.addinivalue_line(
|
||||||
"markers", "require_device: test requires specific device, e.g., @pytest.mark.require_device('cuda')"
|
"markers", "require_device: test requires specific device, e.g., @pytest.mark.require_device('cuda')"
|
||||||
)
|
)
|
||||||
config.addinivalue_line(
|
config.addinivalue_line("markers", "runs_on: test requires specific device, e.g., @pytest.mark.runs_on(['cpu'])")
|
||||||
"markers", "runs_on: test requires specific device, e.g., @pytest.mark.runs_on(['cpu'])"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _handle_runs_on(items):
|
def _handle_runs_on(items):
|
||||||
@@ -64,14 +62,12 @@ def _handle_runs_on(items):
|
|||||||
if isinstance(runs_on_devices, str):
|
if isinstance(runs_on_devices, str):
|
||||||
runs_on_devices = [runs_on_devices]
|
runs_on_devices = [runs_on_devices]
|
||||||
|
|
||||||
|
|
||||||
if CURRENT_DEVICE not in runs_on_devices:
|
if CURRENT_DEVICE not in runs_on_devices:
|
||||||
item.add_marker(
|
item.add_marker(
|
||||||
pytest.mark.skip(
|
pytest.mark.skip(reason=f"test requires one of {runs_on_devices} (current: {CURRENT_DEVICE})")
|
||||||
reason=f"test requires one of {runs_on_devices} (current: {CURRENT_DEVICE})"
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _handle_slow_tests(items):
|
def _handle_slow_tests(items):
|
||||||
"""Skip slow tests unless RUN_SLOW environment variable is set.
|
"""Skip slow tests unless RUN_SLOW environment variable is set.
|
||||||
|
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ TRAIN_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.parametrize("num_samples", [16])
|
@pytest.mark.parametrize("num_samples", [16])
|
||||||
def test_feedback_data(num_samples: int):
|
def test_feedback_data(num_samples: int):
|
||||||
train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"]
|
train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"]
|
||||||
|
|||||||
@@ -284,7 +284,6 @@ def test_llama4_template(use_fast: bool):
|
|||||||
pytest.param(False, marks=pytest.mark.xfail(reason="Phi-4 slow tokenizer is broken.")),
|
pytest.param(False, marks=pytest.mark.xfail(reason="Phi-4 slow tokenizer is broken.")),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu"])
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_phi4_template(use_fast: bool):
|
def test_phi4_template(use_fast: bool):
|
||||||
prompt_str = (
|
prompt_str = (
|
||||||
|
|||||||
@@ -48,6 +48,7 @@ INFER_ARGS = {
|
|||||||
|
|
||||||
OS_NAME = os.getenv("OS_NAME", "")
|
OS_NAME = os.getenv("OS_NAME", "")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu"])
|
@pytest.mark.runs_on(["cpu"])
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"stage,dataset",
|
"stage,dataset",
|
||||||
|
|||||||
@@ -55,6 +55,7 @@ def test_eval_template_en():
|
|||||||
{"role": "assistant", "content": "C"},
|
{"role": "assistant", "content": "C"},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu"])
|
@pytest.mark.runs_on(["cpu"])
|
||||||
def test_eval_template_zh():
|
def test_eval_template_zh():
|
||||||
support_set = [
|
support_set = [
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3")
|
|||||||
UNUSED_TOKEN = "<|UNUSED_TOKEN|>"
|
UNUSED_TOKEN = "<|UNUSED_TOKEN|>"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.parametrize("special_tokens", [False, True])
|
@pytest.mark.parametrize("special_tokens", [False, True])
|
||||||
def test_add_tokens(special_tokens: bool):
|
def test_add_tokens(special_tokens: bool):
|
||||||
if special_tokens:
|
if special_tokens:
|
||||||
|
|||||||
@@ -17,13 +17,16 @@ import os
|
|||||||
import pytest
|
import pytest
|
||||||
from transformers.utils import is_flash_attn_2_available
|
from transformers.utils import is_flash_attn_2_available
|
||||||
|
|
||||||
|
|
||||||
# Compatible with Transformers v4 and Transformers v5
|
# Compatible with Transformers v4 and Transformers v5
|
||||||
try:
|
try:
|
||||||
from transformers.utils import is_torch_sdpa_available
|
from transformers.utils import is_torch_sdpa_available
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
|
||||||
def is_torch_sdpa_available():
|
def is_torch_sdpa_available():
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
from llamafactory.extras.packages import is_transformers_version_greater_than
|
from llamafactory.extras.packages import is_transformers_version_greater_than
|
||||||
from llamafactory.train.test_utils import load_infer_model
|
from llamafactory.train.test_utils import load_infer_model
|
||||||
|
|
||||||
@@ -36,7 +39,7 @@ INFER_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.xfail(is_transformers_version_greater_than("4.48"), reason="Attention refactor.")
|
@pytest.mark.xfail(is_transformers_version_greater_than("4.48"), reason="Attention refactor.")
|
||||||
def test_attention():
|
def test_attention():
|
||||||
attention_available = ["disabled"]
|
attention_available = ["disabled"]
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ TRAIN_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.parametrize("disable_gradient_checkpointing", [False, True])
|
@pytest.mark.parametrize("disable_gradient_checkpointing", [False, True])
|
||||||
def test_vanilla_checkpointing(disable_gradient_checkpointing: bool):
|
def test_vanilla_checkpointing(disable_gradient_checkpointing: bool):
|
||||||
model = load_train_model(disable_gradient_checkpointing=disable_gradient_checkpointing, **TRAIN_ARGS)
|
model = load_train_model(disable_gradient_checkpointing=disable_gradient_checkpointing, **TRAIN_ARGS)
|
||||||
@@ -47,14 +47,14 @@ def test_vanilla_checkpointing(disable_gradient_checkpointing: bool):
|
|||||||
assert getattr(module, "gradient_checkpointing") != disable_gradient_checkpointing
|
assert getattr(module, "gradient_checkpointing") != disable_gradient_checkpointing
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
def test_unsloth_gradient_checkpointing():
|
def test_unsloth_gradient_checkpointing():
|
||||||
model = load_train_model(use_unsloth_gc=True, **TRAIN_ARGS)
|
model = load_train_model(use_unsloth_gc=True, **TRAIN_ARGS)
|
||||||
for module in filter(lambda m: hasattr(m, "gradient_checkpointing"), model.modules()):
|
for module in filter(lambda m: hasattr(m, "gradient_checkpointing"), model.modules()):
|
||||||
assert module._gradient_checkpointing_func.__self__.__name__ == "UnslothGradientCheckpointing"
|
assert module._gradient_checkpointing_func.__self__.__name__ == "UnslothGradientCheckpointing"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
def test_upcast_layernorm():
|
def test_upcast_layernorm():
|
||||||
model = load_train_model(upcast_layernorm=True, **TRAIN_ARGS)
|
model = load_train_model(upcast_layernorm=True, **TRAIN_ARGS)
|
||||||
for name, param in model.named_parameters():
|
for name, param in model.named_parameters():
|
||||||
@@ -62,7 +62,7 @@ def test_upcast_layernorm():
|
|||||||
assert param.dtype == torch.float32
|
assert param.dtype == torch.float32
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
def test_upcast_lmhead_output():
|
def test_upcast_lmhead_output():
|
||||||
model = load_train_model(upcast_lmhead_output=True, **TRAIN_ARGS)
|
model = load_train_model(upcast_lmhead_output=True, **TRAIN_ARGS)
|
||||||
inputs = torch.randn((1, 16), dtype=torch.float16, device=get_current_device())
|
inputs = torch.randn((1, 16), dtype=torch.float16, device=get_current_device())
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ from llamafactory.model.model_utils.misc import find_expanded_modules
|
|||||||
HF_TOKEN = os.getenv("HF_TOKEN")
|
HF_TOKEN = os.getenv("HF_TOKEN")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
@pytest.mark.skipif(not HF_TOKEN, reason="Gated model.")
|
||||||
def test_expanded_modules():
|
def test_expanded_modules():
|
||||||
config = AutoConfig.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
config = AutoConfig.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ import torch
|
|||||||
from llamafactory.model.model_utils.packing import get_seqlens_in_batch, get_unpad_data
|
from llamafactory.model.model_utils.packing import get_seqlens_in_batch, get_unpad_data
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"attention_mask,golden_seq_lens",
|
"attention_mask,golden_seq_lens",
|
||||||
[
|
[
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ from llamafactory.hparams import FinetuningArguments, ModelArguments
|
|||||||
from llamafactory.model.adapter import init_adapter
|
from llamafactory.model.adapter import init_adapter
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.parametrize("freeze_vision_tower", (False, True))
|
@pytest.mark.parametrize("freeze_vision_tower", (False, True))
|
||||||
@pytest.mark.parametrize("freeze_multi_modal_projector", (False, True))
|
@pytest.mark.parametrize("freeze_multi_modal_projector", (False, True))
|
||||||
@pytest.mark.parametrize("freeze_language_model", (False, True))
|
@pytest.mark.parametrize("freeze_language_model", (False, True))
|
||||||
@@ -49,7 +49,7 @@ def test_visual_full(freeze_vision_tower: bool, freeze_multi_modal_projector: bo
|
|||||||
assert param.requires_grad != freeze_language_model
|
assert param.requires_grad != freeze_language_model
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.parametrize("freeze_vision_tower,freeze_language_model", ((False, False), (False, True), (True, False)))
|
@pytest.mark.parametrize("freeze_vision_tower,freeze_language_model", ((False, False), (False, True), (True, False)))
|
||||||
def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool):
|
def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool):
|
||||||
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")
|
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")
|
||||||
@@ -82,7 +82,7 @@ def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool):
|
|||||||
assert (merger_param_name in trainable_params) is False
|
assert (merger_param_name in trainable_params) is False
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
def test_visual_model_save_load():
|
def test_visual_model_save_load():
|
||||||
# check VLM's state dict: https://github.com/huggingface/transformers/pull/38385
|
# check VLM's state dict: https://github.com/huggingface/transformers/pull/38385
|
||||||
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")
|
model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct")
|
||||||
|
|||||||
@@ -29,14 +29,16 @@ INFER_ARGS = {
|
|||||||
"infer_dtype": "float16",
|
"infer_dtype": "float16",
|
||||||
}
|
}
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
|
||||||
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.skip_on_devices("npu")
|
@pytest.mark.skip_on_devices("npu")
|
||||||
def test_base():
|
def test_base():
|
||||||
model = load_infer_model(**INFER_ARGS)
|
model = load_infer_model(**INFER_ARGS)
|
||||||
ref_model = load_reference_model(TINY_LLAMA3)
|
ref_model = load_reference_model(TINY_LLAMA3)
|
||||||
compare_model(model, ref_model)
|
compare_model(model, ref_model)
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
|
||||||
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.skip_on_devices("npu")
|
@pytest.mark.skip_on_devices("npu")
|
||||||
@pytest.mark.usefixtures("fix_valuehead_cpu_loading")
|
@pytest.mark.usefixtures("fix_valuehead_cpu_loading")
|
||||||
def test_valuehead():
|
def test_valuehead():
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ INFER_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
def test_freeze_train_all_modules():
|
def test_freeze_train_all_modules():
|
||||||
model = load_train_model(freeze_trainable_layers=1, **TRAIN_ARGS)
|
model = load_train_model(freeze_trainable_layers=1, **TRAIN_ARGS)
|
||||||
for name, param in model.named_parameters():
|
for name, param in model.named_parameters():
|
||||||
@@ -56,7 +56,7 @@ def test_freeze_train_all_modules():
|
|||||||
assert param.dtype == torch.float16
|
assert param.dtype == torch.float16
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
def test_freeze_train_extra_modules():
|
def test_freeze_train_extra_modules():
|
||||||
model = load_train_model(freeze_trainable_layers=1, freeze_extra_modules="embed_tokens,lm_head", **TRAIN_ARGS)
|
model = load_train_model(freeze_trainable_layers=1, freeze_extra_modules="embed_tokens,lm_head", **TRAIN_ARGS)
|
||||||
for name, param in model.named_parameters():
|
for name, param in model.named_parameters():
|
||||||
@@ -68,7 +68,7 @@ def test_freeze_train_extra_modules():
|
|||||||
assert param.dtype == torch.float16
|
assert param.dtype == torch.float16
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
def test_freeze_inference():
|
def test_freeze_inference():
|
||||||
model = load_infer_model(**INFER_ARGS)
|
model = load_infer_model(**INFER_ARGS)
|
||||||
for param in model.parameters():
|
for param in model.parameters():
|
||||||
|
|||||||
@@ -43,14 +43,16 @@ INFER_ARGS = {
|
|||||||
"infer_dtype": "float16",
|
"infer_dtype": "float16",
|
||||||
}
|
}
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
|
||||||
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
def test_full_train():
|
def test_full_train():
|
||||||
model = load_train_model(**TRAIN_ARGS)
|
model = load_train_model(**TRAIN_ARGS)
|
||||||
for param in model.parameters():
|
for param in model.parameters():
|
||||||
assert param.requires_grad is True
|
assert param.requires_grad is True
|
||||||
assert param.dtype == torch.float32
|
assert param.dtype == torch.float32
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
|
||||||
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
def test_full_inference():
|
def test_full_inference():
|
||||||
model = load_infer_model(**INFER_ARGS)
|
model = load_infer_model(**INFER_ARGS)
|
||||||
for param in model.parameters():
|
for param in model.parameters():
|
||||||
|
|||||||
@@ -55,35 +55,35 @@ INFER_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
def test_lora_train_qv_modules():
|
def test_lora_train_qv_modules():
|
||||||
model = load_train_model(lora_target="q_proj,v_proj", **TRAIN_ARGS)
|
model = load_train_model(lora_target="q_proj,v_proj", **TRAIN_ARGS)
|
||||||
linear_modules, _ = check_lora_model(model)
|
linear_modules, _ = check_lora_model(model)
|
||||||
assert linear_modules == {"q_proj", "v_proj"}
|
assert linear_modules == {"q_proj", "v_proj"}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
def test_lora_train_all_modules():
|
def test_lora_train_all_modules():
|
||||||
model = load_train_model(lora_target="all", **TRAIN_ARGS)
|
model = load_train_model(lora_target="all", **TRAIN_ARGS)
|
||||||
linear_modules, _ = check_lora_model(model)
|
linear_modules, _ = check_lora_model(model)
|
||||||
assert linear_modules == {"q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "gate_proj", "down_proj"}
|
assert linear_modules == {"q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "gate_proj", "down_proj"}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
def test_lora_train_extra_modules():
|
def test_lora_train_extra_modules():
|
||||||
model = load_train_model(additional_target="embed_tokens,lm_head", **TRAIN_ARGS)
|
model = load_train_model(additional_target="embed_tokens,lm_head", **TRAIN_ARGS)
|
||||||
_, extra_modules = check_lora_model(model)
|
_, extra_modules = check_lora_model(model)
|
||||||
assert extra_modules == {"embed_tokens", "lm_head"}
|
assert extra_modules == {"embed_tokens", "lm_head"}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
def test_lora_train_old_adapters():
|
def test_lora_train_old_adapters():
|
||||||
model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=False, **TRAIN_ARGS)
|
model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=False, **TRAIN_ARGS)
|
||||||
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True)
|
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True)
|
||||||
compare_model(model, ref_model)
|
compare_model(model, ref_model)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
def test_lora_train_new_adapters():
|
def test_lora_train_new_adapters():
|
||||||
model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=True, **TRAIN_ARGS)
|
model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=True, **TRAIN_ARGS)
|
||||||
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True)
|
ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True)
|
||||||
@@ -92,7 +92,7 @@ def test_lora_train_new_adapters():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.usefixtures("fix_valuehead_cpu_loading")
|
@pytest.mark.usefixtures("fix_valuehead_cpu_loading")
|
||||||
def test_lora_train_valuehead():
|
def test_lora_train_valuehead():
|
||||||
model = load_train_model(add_valuehead=True, **TRAIN_ARGS)
|
model = load_train_model(add_valuehead=True, **TRAIN_ARGS)
|
||||||
@@ -102,7 +102,8 @@ def test_lora_train_valuehead():
|
|||||||
assert torch.allclose(state_dict["v_head.summary.weight"], ref_state_dict["v_head.summary.weight"])
|
assert torch.allclose(state_dict["v_head.summary.weight"], ref_state_dict["v_head.summary.weight"])
|
||||||
assert torch.allclose(state_dict["v_head.summary.bias"], ref_state_dict["v_head.summary.bias"])
|
assert torch.allclose(state_dict["v_head.summary.bias"], ref_state_dict["v_head.summary.bias"])
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
|
||||||
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.skip_on_devices("npu")
|
@pytest.mark.skip_on_devices("npu")
|
||||||
def test_lora_inference():
|
def test_lora_inference():
|
||||||
model = load_infer_model(**INFER_ARGS)
|
model = load_infer_model(**INFER_ARGS)
|
||||||
|
|||||||
@@ -49,14 +49,15 @@ INFER_ARGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.xfail(reason="PiSSA initialization is not stable in different platform.")
|
@pytest.mark.xfail(reason="PiSSA initialization is not stable in different platform.")
|
||||||
def test_pissa_train():
|
def test_pissa_train():
|
||||||
model = load_train_model(**TRAIN_ARGS)
|
model = load_train_model(**TRAIN_ARGS)
|
||||||
ref_model = load_reference_model(TINY_LLAMA_PISSA, TINY_LLAMA_PISSA, use_pissa=True, is_trainable=True)
|
ref_model = load_reference_model(TINY_LLAMA_PISSA, TINY_LLAMA_PISSA, use_pissa=True, is_trainable=True)
|
||||||
compare_model(model, ref_model)
|
compare_model(model, ref_model)
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
|
||||||
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.xfail(reason="Known connection error.")
|
@pytest.mark.xfail(reason="Known connection error.")
|
||||||
def test_pissa_inference():
|
def test_pissa_inference():
|
||||||
model = load_infer_model(**INFER_ARGS)
|
model = load_infer_model(**INFER_ARGS)
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ class DataCollatorWithVerbose(DataCollatorWithPadding):
|
|||||||
return {k: v[:, :1] for k, v in batch.items()} # truncate input length
|
return {k: v[:, :1] for k, v in batch.items()} # truncate input length
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.runs_on(["cpu","npu"])
|
@pytest.mark.runs_on(["cpu", "npu"])
|
||||||
@pytest.mark.parametrize("disable_shuffling", [False, True])
|
@pytest.mark.parametrize("disable_shuffling", [False, True])
|
||||||
def test_shuffle(disable_shuffling: bool):
|
def test_shuffle(disable_shuffling: bool):
|
||||||
model_args, data_args, training_args, finetuning_args, _ = get_train_args(
|
model_args, data_args, training_args, finetuning_args, _ = get_train_args(
|
||||||
|
|||||||
Reference in New Issue
Block a user