diff --git a/README.md b/README.md
index 405e9e37..10ad04ba 100644
--- a/README.md
+++ b/README.md
@@ -60,9 +60,10 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846
| [Baichuan](https://github.com/baichuan-inc/Baichuan-13B) | 7B/13B | W_pack | baichuan |
| [Baichuan2](https://github.com/baichuan-inc/Baichuan2) | 7B/13B | W_pack | baichuan2 |
| [InternLM](https://github.com/InternLM/InternLM) | 7B/20B | q_proj,v_proj | intern |
-| [Qwen](https://github.com/QwenLM/Qwen-7B) | 7B/14B | c_attn | chatml |
+| [Qwen](https://github.com/QwenLM/Qwen-7B) | 7B/14B | c_attn | qwen |
| [ChatGLM3](https://github.com/THUDM/ChatGLM3) | 6B | query_key_value | chatglm3 |
| [Phi-1.5](https://huggingface.co/microsoft/phi-1_5) | 1.3B | Wqkv | - |
+| [Mistral](https://huggingface.co/mistralai) | 7B | q_proj,v_proj | mistral |
> [!NOTE]
> **Default module** is used for the `--lora_target` argument, you can use `--lora_target all` to specify all the available modules.
diff --git a/README_zh.md b/README_zh.md
index b4a7174f..b58c43f2 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -60,9 +60,10 @@ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846
| [Baichuan](https://github.com/baichuan-inc/Baichuan-13B) | 7B/13B | W_pack | baichuan |
| [Baichuan2](https://github.com/baichuan-inc/Baichuan2) | 7B/13B | W_pack | baichuan2 |
| [InternLM](https://github.com/InternLM/InternLM) | 7B/20B | q_proj,v_proj | intern |
-| [Qwen](https://github.com/QwenLM/Qwen-7B) | 7B/14B | c_attn | chatml |
+| [Qwen](https://github.com/QwenLM/Qwen-7B) | 7B/14B | c_attn | qwen |
| [ChatGLM3](https://github.com/THUDM/ChatGLM3) | 6B | query_key_value | chatglm3 |
| [Phi-1.5](https://huggingface.co/microsoft/phi-1_5) | 1.3B | Wqkv | - |
+| [Mistral](https://huggingface.co/mistralai) | 7B | q_proj,v_proj | mistral |
> [!NOTE]
> **默认模块**应作为 `--lora_target` 参数的默认值,可使用 `--lora_target all` 参数指定全部模块。
diff --git a/src/llmtuner/extras/template.py b/src/llmtuner/extras/template.py
index e8fa9bc6..401750ce 100644
--- a/src/llmtuner/extras/template.py
+++ b/src/llmtuner/extras/template.py
@@ -225,86 +225,6 @@ def get_template_and_fix_tokenizer(
return template
-r"""
-Supports language model inference without histories.
-"""
-register_template(
- name="vanilla",
- prefix=[],
- prompt=[
- "{{query}}"
- ],
- system="",
- sep=[],
- use_history=False
-)
-
-
-r"""
-Default template.
-"""
-register_template(
- name="default",
- prefix=[
- "{{system}}"
- ],
- prompt=[
- "Human: {{query}}\nAssistant:"
- ],
- system=(
- "A chat between a curious user and an artificial intelligence assistant. "
- "The assistant gives helpful, detailed, and polite answers to the user's questions."
- ),
- sep=[
- "\n"
- ]
-)
-
-
-r"""
-Supports: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf
- https://huggingface.co/meta-llama/Llama-2-13b-chat-hf
- https://huggingface.co/meta-llama/Llama-2-70b-chat-hf
-"""
-register_template(
- name="llama2",
- prefix=[
- "<>\n{{system}}\n<>\n\n"
- ],
- prompt=[
- "[INST] {{query}} [/INST]"
- ],
- system=(
- "You are a helpful, respectful and honest assistant. "
- "Always answer as helpfully as possible, while being safe. "
- "Your answers should not include any harmful, unethical, "
- "racist, sexist, toxic, dangerous, or illegal content. "
- "Please ensure that your responses are socially unbiased and positive in nature.\n\n"
- "If a question does not make any sense, or is not factually coherent, "
- "explain why instead of answering something not correct. "
- "If you don't know the answer to a question, please don't share false information."
- ),
- sep=[]
-)
-
-
-r"""
-Supports: https://huggingface.co/ziqingyang/chinese-alpaca-2-7b
- https://huggingface.co/ziqingyang/chinese-alpaca-2-13b
-"""
-register_template(
- name="llama2_zh",
- prefix=[
- "<>\n{{system}}\n<>\n\n"
- ],
- prompt=[
- "[INST] {{query}} [/INST]"
- ],
- system="You are a helpful assistant. 你是一个乐于助人的助手。",
- sep=[]
-)
-
-
r"""
Supports: https://huggingface.co/tatsu-lab/alpaca-7b-wdiff
"""
@@ -326,67 +246,6 @@ register_template(
)
-r"""
-Supports: https://huggingface.co/lmsys/vicuna-7b-v1.5
- https://huggingface.co/lmsys/vicuna-13b-v1.5
-"""
-register_template(
- name="vicuna",
- prefix=[
- "{{system}}"
- ],
- prompt=[
- "USER: {{query}} ASSISTANT:"
- ],
- system=(
- "A chat between a curious user and an artificial intelligence assistant. "
- "The assistant gives helpful, detailed, and polite answers to the user's questions."
- ),
- sep=[]
-)
-
-
-r"""
-Supports: https://huggingface.co/BelleGroup/BELLE-LLaMA-EXT-13B
-"""
-register_template(
- name="belle",
- prefix=[
- "{{system}}"
- ],
- prompt=[
- "Human: {{query}}\n\nBelle: "
- ],
- system="",
- sep=[
- "\n\n"
- ]
-)
-
-
-r"""
-Supports: https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1
- https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1.1
- https://huggingface.co/IDEA-CCNL/Ziya2-13B-Chat
-"""
-register_template(
- name="ziya",
- prefix=[
- "{{system}}"
- ],
- prompt=[
- {"token": ""},
- ":{{query}}\n",
- {"token": ""},
- ":"
- ],
- system="",
- sep=[
- "\n"
- ]
-)
-
-
r"""
Supports: https://huggingface.co/BAAI/AquilaChat-7B
https://huggingface.co/BAAI/AquilaChat2-7B
@@ -414,32 +273,6 @@ register_template(
)
-r"""
-Supports: https://huggingface.co/internlm/internlm-chat-7b
- https://huggingface.co/internlm/internlm-chat-20b
-"""
-register_template(
- name="intern",
- prefix=[
- "{{system}}"
- ],
- prompt=[
- "<|User|>:{{query}}",
- {"token": ""},
- "\n<|Bot|>:"
- ],
- system="",
- sep=[
- {"token": ""},
- "\n"
- ],
- stop_words=[
- ""
- ],
- efficient_eos=True
-)
-
-
r"""
Supports: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat
"""
@@ -480,61 +313,38 @@ register_template(
r"""
-Supports: https://huggingface.co/HuggingFaceH4/starchat-alpha
- https://huggingface.co/HuggingFaceH4/starchat-beta
+Supports: https://huggingface.co/BelleGroup/BELLE-LLaMA-EXT-13B
"""
register_template(
- name="starchat",
+ name="belle",
prefix=[
- {"token": "<|system|>"},
- "\n{{system}}",
+ "{{system}}"
],
prompt=[
- {"token": "<|user|>"},
- "\n{{query}}",
- {"token": "<|end|>"},
- "\n",
- {"token": "<|assistant|>"}
+ "Human: {{query}}\n\nBelle: "
],
system="",
sep=[
- {"token": "<|end|>"},
- "\n"
- ],
- stop_words=[
- "<|end|>"
- ],
- efficient_eos=True
+ "\n\n"
+ ]
)
r"""
-Supports: https://huggingface.co/Qwen/Qwen-7B-Chat
- https://huggingface.co/Qwen/Qwen-14B-Chat
+Supports: https://huggingface.co/vivo-ai/BlueLM-7B-Chat
"""
register_template(
- name="chatml",
+ name="bluelm",
prefix=[
- {"token": "<|im_start|>"},
- "system\n{{system}}"
+ "{{system}}"
],
prompt=[
- {"token": "<|im_start|>"},
- "user\n{{query}}",
- {"token": "<|im_end|>"},
- "\n",
- {"token": "<|im_start|>"},
- "assistant\n"
+ {"token": "[|Human|]:"},
+ "{{query}}",
+ {"token": "[|AI|]:"}
],
- system="You are a helpful assistant.",
- sep=[
- {"token": "<|im_end|>"},
- "\n"
- ],
- stop_words=[
- "<|im_end|>"
- ],
- efficient_eos=True
+ system="",
+ sep=[]
)
@@ -585,45 +395,6 @@ register_template(
)
-r"""
-Supports: https://huggingface.co/openchat/openchat_v3.2_super
-"""
-register_template(
- name="openchat",
- prefix=[
- "{{system}}"
- ],
- prompt=[
- "GPT4 User: {{query}}",
- {"token": "<|end_of_turn|>"},
- "GPT4 Assistant:"
- ],
- system="",
- sep=[
- {"token": "<|end_of_turn|>"}
- ],
- efficient_eos=True
-)
-
-
-r"""
-Supports: https://huggingface.co/xverse/XVERSE-7B-Chat
- https://huggingface.co/xverse/XVERSE-13B-Chat
-"""
-register_template(
- name="xverse",
- prefix=[
- "{{system}}"
- ],
- prompt=[
- "Human: {{query}}\n\nAssistant: "
- ],
- system="",
- sep=[]
-)
-
-
-
r"""
Supports: https://huggingface.co/deepseek-ai/deepseek-coder-1.3b-instruct
https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-instruct
@@ -653,3 +424,290 @@ register_template(
],
efficient_eos=True
)
+
+
+r"""
+Default template.
+"""
+register_template(
+ name="default",
+ prefix=[
+ "{{system}}"
+ ],
+ prompt=[
+ "Human: {{query}}\nAssistant:"
+ ],
+ system=(
+ "A chat between a curious user and an artificial intelligence assistant. "
+ "The assistant gives helpful, detailed, and polite answers to the user's questions."
+ ),
+ sep=[
+ "\n"
+ ]
+)
+
+
+r"""
+Supports: https://huggingface.co/internlm/internlm-chat-7b
+ https://huggingface.co/internlm/internlm-chat-20b
+"""
+register_template(
+ name="intern",
+ prefix=[
+ "{{system}}"
+ ],
+ prompt=[
+ "<|User|>:{{query}}",
+ {"token": ""},
+ "\n<|Bot|>:"
+ ],
+ system="",
+ sep=[
+ {"token": ""},
+ "\n"
+ ],
+ stop_words=[
+ ""
+ ],
+ efficient_eos=True
+)
+
+
+r"""
+Supports: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf
+ https://huggingface.co/meta-llama/Llama-2-13b-chat-hf
+ https://huggingface.co/meta-llama/Llama-2-70b-chat-hf
+"""
+register_template(
+ name="llama2",
+ prefix=[
+ "<>\n{{system}}\n<>\n\n"
+ ],
+ prompt=[
+ "[INST] {{query}} [/INST]"
+ ],
+ system=(
+ "You are a helpful, respectful and honest assistant. "
+ "Always answer as helpfully as possible, while being safe. "
+ "Your answers should not include any harmful, unethical, "
+ "racist, sexist, toxic, dangerous, or illegal content. "
+ "Please ensure that your responses are socially unbiased and positive in nature.\n\n"
+ "If a question does not make any sense, or is not factually coherent, "
+ "explain why instead of answering something not correct. "
+ "If you don't know the answer to a question, please don't share false information."
+ ),
+ sep=[]
+)
+
+
+r"""
+Supports: https://huggingface.co/ziqingyang/chinese-alpaca-2-7b
+ https://huggingface.co/ziqingyang/chinese-alpaca-2-13b
+"""
+register_template(
+ name="llama2_zh",
+ prefix=[
+ "<>\n{{system}}\n<>\n\n"
+ ],
+ prompt=[
+ "[INST] {{query}} [/INST]"
+ ],
+ system="You are a helpful assistant. 你是一个乐于助人的助手。",
+ sep=[]
+)
+
+
+r"""
+Supports: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1
+"""
+register_template(
+ name="mistral",
+ prefix=[
+ "{{system}}"
+ ],
+ prompt=[
+ "[INST] {{query}} [/INST]"
+ ],
+ system="",
+ sep=[]
+)
+
+
+r"""
+Supports: https://huggingface.co/openchat/openchat_3.5
+"""
+register_template(
+ name="openchat",
+ prefix=[
+ "{{system}}"
+ ],
+ prompt=[
+ "GPT4 Correct User: {{query}}",
+ {"token": "<|end_of_turn|>"},
+ "GPT4 Correct Assistant:"
+ ],
+ system="You are a helpful assistant.",
+ sep=[
+ {"token": "<|end_of_turn|>"}
+ ],
+ stop_words=[
+ "<|end_of_turn|>"
+ ],
+ efficient_eos=True
+)
+
+
+r"""
+Supports: https://huggingface.co/Qwen/Qwen-7B-Chat
+ https://huggingface.co/Qwen/Qwen-14B-Chat
+"""
+register_template(
+ name="qwen",
+ prefix=[
+ {"token": "<|im_start|>"},
+ "system\n{{system}}"
+ ],
+ prompt=[
+ {"token": "<|im_start|>"},
+ "user\n{{query}}",
+ {"token": "<|im_end|>"},
+ "\n",
+ {"token": "<|im_start|>"},
+ "assistant\n"
+ ],
+ system="You are a helpful assistant.",
+ sep=[
+ {"token": "<|im_end|>"},
+ "\n"
+ ],
+ stop_words=[
+ "<|im_end|>"
+ ],
+ efficient_eos=True
+)
+
+
+r"""
+Supports: https://huggingface.co/HuggingFaceH4/starchat-alpha
+ https://huggingface.co/HuggingFaceH4/starchat-beta
+"""
+register_template(
+ name="starchat",
+ prefix=[
+ {"token": "<|system|>"},
+ "\n{{system}}",
+ ],
+ prompt=[
+ {"token": "<|user|>"},
+ "\n{{query}}",
+ {"token": "<|end|>"},
+ "\n",
+ {"token": "<|assistant|>"}
+ ],
+ system="",
+ sep=[
+ {"token": "<|end|>"},
+ "\n"
+ ],
+ stop_words=[
+ "<|end|>"
+ ],
+ efficient_eos=True
+)
+
+
+r"""
+Supports language model inference without histories.
+"""
+register_template(
+ name="vanilla",
+ prefix=[],
+ prompt=[
+ "{{query}}"
+ ],
+ system="",
+ sep=[],
+ use_history=False
+)
+
+
+r"""
+Supports: https://huggingface.co/lmsys/vicuna-7b-v1.5
+ https://huggingface.co/lmsys/vicuna-13b-v1.5
+"""
+register_template(
+ name="vicuna",
+ prefix=[
+ "{{system}}"
+ ],
+ prompt=[
+ "USER: {{query}} ASSISTANT:"
+ ],
+ system=(
+ "A chat between a curious user and an artificial intelligence assistant. "
+ "The assistant gives helpful, detailed, and polite answers to the user's questions."
+ ),
+ sep=[]
+)
+
+
+r"""
+Supports: https://huggingface.co/xverse/XVERSE-7B-Chat
+ https://huggingface.co/xverse/XVERSE-13B-Chat
+"""
+register_template(
+ name="xverse",
+ prefix=[
+ "{{system}}"
+ ],
+ prompt=[
+ "Human: {{query}}\n\nAssistant: "
+ ],
+ system="",
+ sep=[]
+)
+
+
+r"""
+Supports: https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha
+ https://huggingface.co/HuggingFaceH4/zephyr-7b-beta
+"""
+register_template(
+ name="zephyr",
+ prefix=[
+ {"token": "<|system|>"},
+ "\n{{system}}",
+ {"token": ""}
+ ],
+ prompt=[
+ {"token": "<|user|>"},
+ "\n{{query}}",
+ {"token": ""},
+ {"token": "<|assistant|>"}
+ ],
+ system="You are a friendly chatbot who always responds in the style of a pirate",
+ sep=[]
+)
+
+
+r"""
+Supports: https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1
+ https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1.1
+ https://huggingface.co/IDEA-CCNL/Ziya2-13B-Chat
+"""
+register_template(
+ name="ziya",
+ prefix=[
+ "{{system}}"
+ ],
+ prompt=[
+ {"token": ""},
+ ":{{query}}\n",
+ {"token": ""},
+ ":"
+ ],
+ system="",
+ sep=[
+ "\n"
+ ]
+)