diff --git a/README.md b/README.md
index b79c039c..2c059e7d 100644
--- a/README.md
+++ b/README.md
@@ -346,6 +346,7 @@ You also can add a custom chat template to [template.py](src/llamafactory/data/t
- [Magpie-ultra-v0.1 (en)](https://huggingface.co/datasets/argilla/magpie-ultra-v0.1)
- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub)
- [OpenO1-SFT (en&zh)](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)
+- [Open Thoughts (en)](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k)
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
- [Pokemon-gpt4o-captions (en&zh)](https://huggingface.co/datasets/jugg1024/pokemon-gpt4o-captions)
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
diff --git a/README_zh.md b/README_zh.md
index ae6a68dc..294f8ab5 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -348,6 +348,7 @@ https://github.com/user-attachments/assets/e6ce34b0-52d5-4f3e-a830-592106c4c272
- [Magpie-ultra-v0.1 (en)](https://huggingface.co/datasets/argilla/magpie-ultra-v0.1)
- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub)
- [OpenO1-SFT (en&zh)](https://huggingface.co/datasets/O1-OPEN/OpenO1-SFT)
+- [Open Thoughts (en)](https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k)
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
- [Pokemon-gpt4o-captions (en&zh)](https://huggingface.co/datasets/jugg1024/pokemon-gpt4o-captions)
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
diff --git a/src/llamafactory/data/template.py b/src/llamafactory/data/template.py
index c1701a84..2a1d1cfd 100644
--- a/src/llamafactory/data/template.py
+++ b/src/llamafactory/data/template.py
@@ -47,6 +47,7 @@ class Template:
format_prefix: "Formatter"
default_system: str
stop_words: List[str]
+ thought_words: Tuple[str, str]
efficient_eos: bool
replace_eos: bool
replace_jinja_template: bool
@@ -216,6 +217,7 @@ def _register_template(
format_prefix: Optional["Formatter"] = None,
default_system: str = "",
stop_words: Optional[Sequence[str]] = None,
+ thought_words: Optional[Tuple[str, str]] = None,
efficient_eos: bool = False,
replace_eos: bool = False,
replace_jinja_template: bool = False,
@@ -260,6 +262,7 @@ def _register_template(
format_prefix=format_prefix or default_prefix_formatter,
default_system=default_system,
stop_words=stop_words or [],
+ thought_words=thought_words or ("", ""),
efficient_eos=efficient_eos,
replace_eos=replace_eos,
replace_jinja_template=replace_jinja_template,
diff --git a/src/llamafactory/webui/chatter.py b/src/llamafactory/webui/chatter.py
index 7863dee3..354b80ea 100644
--- a/src/llamafactory/webui/chatter.py
+++ b/src/llamafactory/webui/chatter.py
@@ -36,17 +36,17 @@ if is_gradio_available():
import gradio as gr
-def _format_response(text: str, lang: str) -> str:
+def _format_response(text: str, lang: str, thought_words: Tuple[str, str] = ("", "")) -> str:
r"""
Post-processes the response text.
Based on: https://huggingface.co/spaces/Lyte/DeepSeek-R1-Distill-Qwen-1.5B-Demo-GGUF/blob/main/app.py
"""
- if "" not in text:
+ if thought_words[0] not in text:
return text
- text = text.replace("", "")
- result = text.split("", maxsplit=1)
+ text = text.replace(thought_words[0], "")
+ result = text.split(thought_words[1], maxsplit=1)
if len(result) == 1:
summary = ALERTS["info_thinking"][lang]
thought, answer = text, ""
@@ -209,7 +209,7 @@ class WebChatModel(ChatModel):
bot_text = "```json\n" + tool_calls + "\n```"
else:
output_messages = messages + [{"role": Role.ASSISTANT.value, "content": result}]
- bot_text = _format_response(result, lang)
+ bot_text = _format_response(result, lang, self.engine.template.thought_words)
chatbot[-1] = {"role": "assistant", "content": bot_text}
yield chatbot, output_messages