[model] support MiMo-V2-Flash model (#9637)

This commit is contained in:
Hertz
2025-12-21 14:38:18 +08:00
committed by GitHub
parent 0894b4f37e
commit 4923f52a28
5 changed files with 39 additions and 1 deletions

View File

@@ -113,6 +113,7 @@ def configure_quantization(
if quant_method == QuantizationMethod.FP8:
quant_config = FineGrainedFP8Config(dequantize=True)
init_kwargs["quantization_config"] = quant_config
init_kwargs["ignore_mismatched_sizes"] = True
quant_bits = quantization_config.get("bits", "?")
logger.info_rank0(f"Loading {quant_bits}-bit {quant_method.upper()}-quantized model.")