[model] support MiMo-V2-Flash model (#9637)

This commit is contained in:
Hertz
2025-12-21 14:38:18 +08:00
committed by GitHub
parent 0894b4f37e
commit 4923f52a28
5 changed files with 39 additions and 1 deletions

View File

@@ -1610,6 +1610,26 @@ register_template(
template_class=ReasoningTemplate,
)
# copied from qwen template
register_template(
name="mimo_v2",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
default_system="You are MiMo, a helpful AI assistant engineered by Xiaomi.",
stop_words=["<|im_end|>"],
replace_eos=True,
thought_words=("<think>", "</think>"),
template_class=ReasoningTemplate,
)
# copied from qwen2vl
register_template(
name="mimo_vl",

View File

@@ -1803,6 +1803,21 @@ register_model_group(
)
register_model_group(
models={
"MiMo-V2-Flash-Base": {
DownloadSource.DEFAULT: "XiaomiMiMo/MiMo-V2-Flash-Base",
DownloadSource.MODELSCOPE: "XiaomiMiMo/MiMo-V2-Flash-Base",
},
"MiMo-V2-Flash": {
DownloadSource.DEFAULT: "XiaomiMiMo/MiMo-V2-Flash",
DownloadSource.MODELSCOPE: "XiaomiMiMo/MiMo-V2-Flash",
},
},
template="mimo_v2",
)
register_model_group(
models={
"MiMo-7B-VL-RL": {
@@ -1827,7 +1842,7 @@ register_model_group(
},
"MiMo-VL-7B-SFT-2508": {
DownloadSource.DEFAULT: "XiaomiMiMo/MiMo-VL-7B-SFT-2508",
DownloadSource.DEFAULT: "XiaomiMiMo/MiMo-VL-7B-SFT-2508",
DownloadSource.MODELSCOPE: "XiaomiMiMo/MiMo-VL-7B-SFT-2508",
},
},
template="qwen2_vl",

View File

@@ -113,6 +113,7 @@ def configure_quantization(
if quant_method == QuantizationMethod.FP8:
quant_config = FineGrainedFP8Config(dequantize=True)
init_kwargs["quantization_config"] = quant_config
init_kwargs["ignore_mismatched_sizes"] = True
quant_bits = quantization_config.get("bits", "?")
logger.info_rank0(f"Loading {quant_bits}-bit {quant_method.upper()}-quantized model.")