mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-27 09:10:35 +08:00
[misc] fix accelerator (#9661)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
@@ -14,7 +14,6 @@
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from llamafactory.train.test_utils import load_infer_model, load_train_model
|
||||
@@ -44,7 +43,6 @@ INFER_ARGS = {
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
|
||||
def test_freeze_train_all_modules():
|
||||
model = load_train_model(freeze_trainable_layers=1, **TRAIN_ARGS)
|
||||
for name, param in model.named_parameters():
|
||||
@@ -56,7 +54,6 @@ def test_freeze_train_all_modules():
|
||||
assert param.dtype == torch.float16
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
|
||||
def test_freeze_train_extra_modules():
|
||||
model = load_train_model(freeze_trainable_layers=1, freeze_extra_modules="embed_tokens,lm_head", **TRAIN_ARGS)
|
||||
for name, param in model.named_parameters():
|
||||
@@ -68,7 +65,6 @@ def test_freeze_train_extra_modules():
|
||||
assert param.dtype == torch.float16
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu", "npu", "cuda"])
|
||||
def test_freeze_inference():
|
||||
model = load_infer_model(**INFER_ARGS)
|
||||
for param in model.parameters():
|
||||
|
||||
Reference in New Issue
Block a user