From d7d734d54c7c9a1e3c9660e9689e89a4abb65a9d Mon Sep 17 00:00:00 2001 From: Yaowei Zheng Date: Fri, 9 Jan 2026 16:17:26 +0800 Subject: [PATCH] [misc] fix fp8 (#9742) --- src/llamafactory/hparams/parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llamafactory/hparams/parser.py b/src/llamafactory/hparams/parser.py index 6e9541b84..ae8b3e424 100644 --- a/src/llamafactory/hparams/parser.py +++ b/src/llamafactory/hparams/parser.py @@ -340,7 +340,7 @@ def get_train_args(args: dict[str, Any] | list[str] | None = None) -> _TRAIN_CLS if training_args.deepspeed is not None and (finetuning_args.use_galore or finetuning_args.use_apollo): raise ValueError("GaLore and APOLLO are incompatible with DeepSpeed yet.") - if training_args.fp8 and training_args.quantization_bit is not None: + if training_args.fp8 and model_args.quantization_bit is not None: raise ValueError("FP8 training is not compatible with quantization. Please disable one of them.") if model_args.infer_backend != EngineName.HF: