mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-18 04:40:35 +08:00
[misc] lint (#9593)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
@@ -42,7 +42,7 @@ TRAIN_ARGS = {
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu","npu"])
|
||||
@pytest.mark.runs_on(["cpu", "npu"])
|
||||
@pytest.mark.parametrize("num_samples", [16])
|
||||
def test_feedback_data(num_samples: int):
|
||||
train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"]
|
||||
|
||||
@@ -284,7 +284,6 @@ def test_llama4_template(use_fast: bool):
|
||||
pytest.param(False, marks=pytest.mark.xfail(reason="Phi-4 slow tokenizer is broken.")),
|
||||
],
|
||||
)
|
||||
|
||||
@pytest.mark.runs_on(["cpu"])
|
||||
def test_phi4_template(use_fast: bool):
|
||||
prompt_str = (
|
||||
|
||||
Reference in New Issue
Block a user