mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-09-18 11:02:49 +08:00
[assets] update wechat (#9129)
This commit is contained in:
parent
cf48406d07
commit
5d89af9e58
Binary file not shown.
Before Width: | Height: | Size: 167 KiB After Width: | Height: | Size: 163 KiB |
Binary file not shown.
Before Width: | Height: | Size: 168 KiB After Width: | Height: | Size: 170 KiB |
@ -257,8 +257,8 @@ class OpenAIDatasetConverter(DatasetConverter):
|
||||
content = message[self.dataset_attr.content_tag]
|
||||
|
||||
if role in [self.dataset_attr.assistant_tag, self.dataset_attr.function_tag]:
|
||||
if "tool_calls" in message and len(message['tool_calls']) > 0:
|
||||
tool_calls_list = [tool['function'] for tool in message['tool_calls']]
|
||||
if "tool_calls" in message and len(message["tool_calls"]) > 0:
|
||||
tool_calls_list = [tool["function"] for tool in message["tool_calls"]]
|
||||
content = json.dumps(tool_calls_list, ensure_ascii=False)
|
||||
role = self.dataset_attr.function_tag
|
||||
|
||||
@ -340,7 +340,7 @@ class OpenAIDatasetConverter(DatasetConverter):
|
||||
if isinstance(tools, dict) or isinstance(tools, list):
|
||||
tools = json.dumps(tools, ensure_ascii=False)
|
||||
|
||||
short_system_prompt = 'detailed thinking off'
|
||||
short_system_prompt = "detailed thinking off"
|
||||
if not system:
|
||||
if not tools:
|
||||
system = short_system_prompt
|
||||
@ -348,13 +348,12 @@ class OpenAIDatasetConverter(DatasetConverter):
|
||||
pass
|
||||
else:
|
||||
if not tools:
|
||||
if 'detailed thinking on' in system or 'detailed thinking off' in system:
|
||||
if "detailed thinking on" in system or "detailed thinking off" in system:
|
||||
pass
|
||||
else:
|
||||
system += '\n' + short_system_prompt
|
||||
system += "\n" + short_system_prompt
|
||||
else:
|
||||
system += '\n'
|
||||
|
||||
system += "\n"
|
||||
|
||||
output = {
|
||||
"_prompt": prompt,
|
||||
|
@ -685,7 +685,9 @@ register_template(
|
||||
format_system=StringFormatter(slots=["<role>SYSTEM</role>{{content}}<|role_end|>"]),
|
||||
format_assistant=StringFormatter(slots=["{{content}}<|role_end|>"]),
|
||||
format_observation=StringFormatter(
|
||||
slots=["<role>OBSERVATION</role>\n<tool_response>\n{{content}}\n</tool_response><|role_end|><role>ASSISTANT</role>"]
|
||||
slots=[
|
||||
"<role>OBSERVATION</role>\n<tool_response>\n{{content}}\n</tool_response><|role_end|><role>ASSISTANT</role>"
|
||||
]
|
||||
),
|
||||
format_function=FunctionFormatter(slots=["{{content}}<|role_end|>"], tool_format="ling"),
|
||||
format_tools=ToolFormatter(tool_format="ling"),
|
||||
|
@ -219,7 +219,7 @@ def patch_valuehead_model(model: "AutoModelForCausalLMWithValueHead") -> None:
|
||||
|
||||
if base_model and hasattr(base_model, "get_rope_index"):
|
||||
return base_model.get_rope_index
|
||||
elif (base_model and hasattr(base_model, "model") and hasattr(base_model.model, "get_rope_index")):
|
||||
elif base_model and hasattr(base_model, "model") and hasattr(base_model.model, "get_rope_index"):
|
||||
return base_model.model.get_rope_index
|
||||
else:
|
||||
return None
|
||||
|
Loading…
x
Reference in New Issue
Block a user