mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-16 20:00:36 +08:00
[test] add npu test yaml and add ascend a3 docker file (#9547)
Co-authored-by: jiaqiw09 <jiaqiw960714@gmail.com>
This commit is contained in:
@@ -14,6 +14,8 @@
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from llamafactory.chat import ChatModel
|
||||
|
||||
|
||||
@@ -35,11 +37,13 @@ MESSAGES = [
|
||||
EXPECTED_RESPONSE = "_rho"
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu"])
|
||||
def test_chat():
|
||||
chat_model = ChatModel(INFER_ARGS)
|
||||
assert chat_model.chat(MESSAGES)[0].response_text == EXPECTED_RESPONSE
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu"])
|
||||
def test_stream_chat():
|
||||
chat_model = ChatModel(INFER_ARGS)
|
||||
response = ""
|
||||
|
||||
@@ -39,6 +39,7 @@ MESSAGES = [
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu"])
|
||||
@pytest.mark.skipif(not is_sglang_available(), reason="SGLang is not installed")
|
||||
def test_chat():
|
||||
r"""Test the SGLang engine's basic chat functionality."""
|
||||
@@ -48,6 +49,7 @@ def test_chat():
|
||||
print(response.response_text)
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu"])
|
||||
@pytest.mark.skipif(not is_sglang_available(), reason="SGLang is not installed")
|
||||
def test_stream_chat():
|
||||
r"""Test the SGLang engine's streaming chat functionality."""
|
||||
|
||||
@@ -48,7 +48,7 @@ INFER_ARGS = {
|
||||
|
||||
OS_NAME = os.getenv("OS_NAME", "")
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu"])
|
||||
@pytest.mark.parametrize(
|
||||
"stage,dataset",
|
||||
[
|
||||
@@ -65,6 +65,7 @@ def test_run_exp(stage: str, dataset: str):
|
||||
assert os.path.exists(output_dir)
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu"])
|
||||
def test_export():
|
||||
export_dir = os.path.join("output", "llama3_export")
|
||||
export_model({"export_dir": export_dir, **INFER_ARGS})
|
||||
|
||||
Reference in New Issue
Block a user