[refactor] rename lfm template to lfm2 and add LFM 2.5 to README (#9731)

This commit is contained in:
Vo Van Phuc
2026-01-07 18:25:04 +07:00
committed by GitHub
parent 4c1eb922e2
commit 5cfd804b59
6 changed files with 33 additions and 31 deletions

View File

@@ -298,6 +298,7 @@ Read technical notes:
| [InternLM/Intern-S1-mini](https://huggingface.co/internlm/) | 8B | intern_s1 | | [InternLM/Intern-S1-mini](https://huggingface.co/internlm/) | 8B | intern_s1 |
| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl | | [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl |
| [Ling 2.0 (mini/flash)](https://huggingface.co/inclusionAI) | 16B/100B | bailing_v2 | | [Ling 2.0 (mini/flash)](https://huggingface.co/inclusionAI) | 16B/100B | bailing_v2 |
| [LFM 2.5 (VL)](https://huggingface.co/LiquidAI) | 1.2B/1.6B | lfm2/lfm2_vl |
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - | | [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 | | [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 | | [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |

View File

@@ -300,6 +300,7 @@ https://github.com/user-attachments/assets/43b700c6-a178-41db-b1f8-8190a5d3fcfc
| [InternLM/Intern-S1-mini](https://huggingface.co/internlm/) | 8B | intern_s1 | | [InternLM/Intern-S1-mini](https://huggingface.co/internlm/) | 8B | intern_s1 |
| [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl | | [Kimi-VL](https://huggingface.co/moonshotai) | 16B | kimi_vl |
| [Ling 2.0 (mini/flash)](https://huggingface.co/inclusionAI) | 16B/100B | bailing_v2 | | [Ling 2.0 (mini/flash)](https://huggingface.co/inclusionAI) | 16B/100B | bailing_v2 |
| [LFM 2.5 (VL)](https://huggingface.co/LiquidAI) | 1.2B/1.6B | lfm2/lfm2_vl |
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - | | [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 | | [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
| [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 | | [Llama 3-3.3](https://huggingface.co/meta-llama) | 1B/3B/8B/70B | llama3 |

View File

@@ -1331,18 +1331,18 @@ register_template(
register_template( register_template(
name="lfm", name="lfm2",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]), format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]), format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="lfm"), format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="lfm2"),
format_observation=StringFormatter( format_observation=StringFormatter(
slots=[ slots=[
"<|im_start|>tool\n<|tool_response_start|>{{content}}<|tool_response_end|><|im_end|>\n" "<|im_start|>tool\n<|tool_response_start|>{{content}}<|tool_response_end|><|im_end|>\n"
"<|im_start|>assistant\n" "<|im_start|>assistant\n"
] ]
), ),
format_tools=ToolFormatter(tool_format="lfm"), format_tools=ToolFormatter(tool_format="lfm2"),
default_system="You are a helpful AI assistant.", default_system="You are a helpful AI assistant.",
stop_words=["<|im_end|>"], stop_words=["<|im_end|>"],
tool_call_words=("<|tool_call_start|>", "<|tool_call_end|>"), tool_call_words=("<|tool_call_start|>", "<|tool_call_end|>"),
@@ -1355,14 +1355,14 @@ register_template(
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]), format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]), format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]), format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="lfm"), format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="lfm2"),
format_observation=StringFormatter( format_observation=StringFormatter(
slots=[ slots=[
"<|im_start|>tool\n<|tool_response_start|>{{content}}<|tool_response_end|><|im_end|>\n" "<|im_start|>tool\n<|tool_response_start|>{{content}}<|tool_response_end|><|im_end|>\n"
"<|im_start|>assistant\n" "<|im_start|>assistant\n"
] ]
), ),
format_tools=ToolFormatter(tool_format="lfm"), format_tools=ToolFormatter(tool_format="lfm2"),
default_system="You are a helpful multimodal assistant by Liquid AI.", default_system="You are a helpful multimodal assistant by Liquid AI.",
stop_words=["<|im_end|>"], stop_words=["<|im_end|>"],
tool_call_words=("<|tool_call_start|>", "<|tool_call_end|>"), tool_call_words=("<|tool_call_start|>", "<|tool_call_end|>"),

View File

@@ -102,7 +102,7 @@ LING_TOOL_PROMPT = (
""""arguments": <args-json-object>}}\n</tool_call>""" """"arguments": <args-json-object>}}\n</tool_call>"""
) )
LFM_TOOL_PROMPT = "List of tools: <|tool_list_start|>{tool_text}<|tool_list_end|>" LFM2_TOOL_PROMPT = "List of tools: <|tool_list_start|>{tool_text}<|tool_list_end|>"
@dataclass @dataclass
@@ -549,8 +549,8 @@ class LingToolUtils(QwenToolUtils):
return LING_TOOL_PROMPT.format(tool_text=tool_text) + "\n" + "detailed thinking off" return LING_TOOL_PROMPT.format(tool_text=tool_text) + "\n" + "detailed thinking off"
class LFMToolUtils(ToolUtils): class LFM2ToolUtils(ToolUtils):
r"""LFM 2.5 tool using template with Pythonic function call syntax.""" r"""LFM2.5 tool using template with Pythonic function call syntax."""
@override @override
@staticmethod @staticmethod
@@ -560,7 +560,7 @@ class LFMToolUtils(ToolUtils):
tool = tool.get("function", tool) if tool.get("type") == "function" else tool tool = tool.get("function", tool) if tool.get("type") == "function" else tool
tool_list.append(tool) tool_list.append(tool)
return LFM_TOOL_PROMPT.format(tool_text=json.dumps(tool_list, ensure_ascii=False)) return LFM2_TOOL_PROMPT.format(tool_text=json.dumps(tool_list, ensure_ascii=False))
@override @override
@staticmethod @staticmethod
@@ -643,7 +643,7 @@ class LFMToolUtils(ToolUtils):
for keyword in node.keywords: for keyword in node.keywords:
key = keyword.arg key = keyword.arg
try: try:
value = LFMToolUtils._ast_to_value(keyword.value) value = LFM2ToolUtils._ast_to_value(keyword.value)
except (ValueError, SyntaxError): except (ValueError, SyntaxError):
return content return content
args_dict[key] = value args_dict[key] = value
@@ -657,7 +657,7 @@ TOOLS = {
"default": DefaultToolUtils(), "default": DefaultToolUtils(),
"glm4": GLM4ToolUtils(), "glm4": GLM4ToolUtils(),
"llama3": Llama3ToolUtils(), "llama3": Llama3ToolUtils(),
"lfm": LFMToolUtils(), "lfm2": LFM2ToolUtils(),
"minimax1": MiniMaxM1ToolUtils(), "minimax1": MiniMaxM1ToolUtils(),
"minimax2": MiniMaxM2ToolUtils(), "minimax2": MiniMaxM2ToolUtils(),
"mistral": MistralToolUtils(), "mistral": MistralToolUtils(),

View File

@@ -1502,7 +1502,7 @@ register_model_group(
DownloadSource.DEFAULT: "LiquidAI/LFM2.5-1.2B-Instruct", DownloadSource.DEFAULT: "LiquidAI/LFM2.5-1.2B-Instruct",
}, },
}, },
template="lfm", template="lfm2",
) )

View File

@@ -295,8 +295,8 @@ def test_qwen_multi_tool_extractor():
@pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.runs_on(["cpu", "mps"])
def test_lfm_function_formatter(): def test_lfm2_function_formatter():
formatter = FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="lfm") formatter = FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="lfm2")
tool_calls = json.dumps(FUNCTION) tool_calls = json.dumps(FUNCTION)
assert formatter.apply(content=tool_calls) == [ assert formatter.apply(content=tool_calls) == [
"""<|tool_call_start|>[tool_name(foo="bar", size=10)]<|tool_call_end|><|im_end|>\n""" """<|tool_call_start|>[tool_name(foo="bar", size=10)]<|tool_call_end|><|im_end|>\n"""
@@ -304,8 +304,8 @@ def test_lfm_function_formatter():
@pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.runs_on(["cpu", "mps"])
def test_lfm_multi_function_formatter(): def test_lfm2_multi_function_formatter():
formatter = FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="lfm") formatter = FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="lfm2")
tool_calls = json.dumps([FUNCTION] * 2) tool_calls = json.dumps([FUNCTION] * 2)
assert formatter.apply(content=tool_calls) == [ assert formatter.apply(content=tool_calls) == [
"""<|tool_call_start|>[tool_name(foo="bar", size=10), tool_name(foo="bar", size=10)]<|tool_call_end|>""" """<|tool_call_start|>[tool_name(foo="bar", size=10), tool_name(foo="bar", size=10)]<|tool_call_end|>"""
@@ -314,23 +314,23 @@ def test_lfm_multi_function_formatter():
@pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.runs_on(["cpu", "mps"])
def test_lfm_tool_formatter(): def test_lfm2_tool_formatter():
formatter = ToolFormatter(tool_format="lfm") formatter = ToolFormatter(tool_format="lfm2")
assert formatter.apply(content=json.dumps(TOOLS)) == [ assert formatter.apply(content=json.dumps(TOOLS)) == [
"List of tools: <|tool_list_start|>" + json.dumps(TOOLS, ensure_ascii=False) + "<|tool_list_end|>" "List of tools: <|tool_list_start|>" + json.dumps(TOOLS, ensure_ascii=False) + "<|tool_list_end|>"
] ]
@pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.runs_on(["cpu", "mps"])
def test_lfm_tool_extractor(): def test_lfm2_tool_extractor():
formatter = ToolFormatter(tool_format="lfm") formatter = ToolFormatter(tool_format="lfm2")
result = """<|tool_call_start|>[test_tool(foo="bar", size=10)]<|tool_call_end|>""" result = """<|tool_call_start|>[test_tool(foo="bar", size=10)]<|tool_call_end|>"""
assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")] assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")]
@pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.runs_on(["cpu", "mps"])
def test_lfm_multi_tool_extractor(): def test_lfm2_multi_tool_extractor():
formatter = ToolFormatter(tool_format="lfm") formatter = ToolFormatter(tool_format="lfm2")
result = """<|tool_call_start|>[test_tool(foo="bar", size=10), another_tool(foo="job", size=2)]<|tool_call_end|>""" result = """<|tool_call_start|>[test_tool(foo="bar", size=10), another_tool(foo="job", size=2)]<|tool_call_end|>"""
assert formatter.extract(result) == [ assert formatter.extract(result) == [
("test_tool", """{"foo": "bar", "size": 10}"""), ("test_tool", """{"foo": "bar", "size": 10}"""),
@@ -339,8 +339,8 @@ def test_lfm_multi_tool_extractor():
@pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.runs_on(["cpu", "mps"])
def test_lfm_tool_extractor_with_nested_dict(): def test_lfm2_tool_extractor_with_nested_dict():
formatter = ToolFormatter(tool_format="lfm") formatter = ToolFormatter(tool_format="lfm2")
result = """<|tool_call_start|>[search(query="test", options={"limit": 10, "offset": 0})]<|tool_call_end|>""" result = """<|tool_call_start|>[search(query="test", options={"limit": 10, "offset": 0})]<|tool_call_end|>"""
extracted = formatter.extract(result) extracted = formatter.extract(result)
assert len(extracted) == 1 assert len(extracted) == 1
@@ -351,8 +351,8 @@ def test_lfm_tool_extractor_with_nested_dict():
@pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.runs_on(["cpu", "mps"])
def test_lfm_tool_extractor_with_list_arg(): def test_lfm2_tool_extractor_with_list_arg():
formatter = ToolFormatter(tool_format="lfm") formatter = ToolFormatter(tool_format="lfm2")
result = """<|tool_call_start|>[batch_process(items=[1, 2, 3], enabled=True)]<|tool_call_end|>""" result = """<|tool_call_start|>[batch_process(items=[1, 2, 3], enabled=True)]<|tool_call_end|>"""
extracted = formatter.extract(result) extracted = formatter.extract(result)
assert len(extracted) == 1 assert len(extracted) == 1
@@ -363,17 +363,17 @@ def test_lfm_tool_extractor_with_list_arg():
@pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.runs_on(["cpu", "mps"])
def test_lfm_tool_extractor_no_match(): def test_lfm2_tool_extractor_no_match():
formatter = ToolFormatter(tool_format="lfm") formatter = ToolFormatter(tool_format="lfm2")
result = "This is a regular response without tool calls." result = "This is a regular response without tool calls."
extracted = formatter.extract(result) extracted = formatter.extract(result)
assert extracted == result assert extracted == result
@pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.runs_on(["cpu", "mps"])
def test_lfm_tool_round_trip(): def test_lfm2_tool_round_trip():
formatter = FunctionFormatter(slots=["{{content}}"], tool_format="lfm") formatter = FunctionFormatter(slots=["{{content}}"], tool_format="lfm2")
tool_formatter = ToolFormatter(tool_format="lfm") tool_formatter = ToolFormatter(tool_format="lfm2")
original = {"name": "my_func", "arguments": {"arg1": "hello", "arg2": 42, "arg3": True}} original = {"name": "my_func", "arguments": {"arg1": "hello", "arg2": 42, "arg3": True}}
formatted = formatter.apply(content=json.dumps(original)) formatted = formatter.apply(content=json.dumps(original))
extracted = tool_formatter.extract(formatted[0]) extracted = tool_formatter.extract(formatted[0])