[model] support LiquidAI's LFM2.5-VL vision-language model (#9729)

This commit is contained in:
Vo Van Phuc
2026-01-07 16:20:29 +07:00
committed by GitHub
parent b4e051bea4
commit 958fb523a2
5 changed files with 118 additions and 0 deletions

View File

@@ -419,3 +419,15 @@ def test_video_llava_plugin():
]
check_inputs["expected_mm_inputs"] = _get_mm_inputs(tokenizer_module["processor"])
_check_plugin(**check_inputs)
@pytest.mark.runs_on(["cpu", "mps"])
def test_lfm2_vl_plugin():
"""Test LFM2.5-VL plugin instantiation."""
# Test plugin can be instantiated with correct tokens
lfm2_vl_plugin = get_mm_plugin(name="lfm2_vl", image_token="<image>")
assert lfm2_vl_plugin is not None
assert lfm2_vl_plugin.image_token == "<image>"
assert lfm2_vl_plugin.video_token is None
assert lfm2_vl_plugin.audio_token is None
assert lfm2_vl_plugin.__class__.__name__ == "LFMVLPlugin"