mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-16 20:00:36 +08:00
[misc] lint (#9593)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
@@ -43,14 +43,16 @@ INFER_ARGS = {
|
||||
"infer_dtype": "float16",
|
||||
}
|
||||
|
||||
@pytest.mark.runs_on(["cpu","npu"])
|
||||
|
||||
@pytest.mark.runs_on(["cpu", "npu"])
|
||||
def test_full_train():
|
||||
model = load_train_model(**TRAIN_ARGS)
|
||||
for param in model.parameters():
|
||||
assert param.requires_grad is True
|
||||
assert param.dtype == torch.float32
|
||||
|
||||
@pytest.mark.runs_on(["cpu","npu"])
|
||||
|
||||
@pytest.mark.runs_on(["cpu", "npu"])
|
||||
def test_full_inference():
|
||||
model = load_infer_model(**INFER_ARGS)
|
||||
for param in model.parameters():
|
||||
|
||||
Reference in New Issue
Block a user