From f153ee13be6b3ba6fa6a6911cd759b2971582c67 Mon Sep 17 00:00:00 2001 From: hiyouga <467089858@qq.com> Date: Thu, 29 Aug 2024 20:37:47 +0800 Subject: [PATCH] fix #5292 Former-commit-id: aa1afdc75614868172bd2f9c052647b8f226d3f2 --- src/llamafactory/hparams/parser.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/llamafactory/hparams/parser.py b/src/llamafactory/hparams/parser.py index bea3d650..ec303655 100644 --- a/src/llamafactory/hparams/parser.py +++ b/src/llamafactory/hparams/parser.py @@ -221,6 +221,9 @@ def get_train_args(args: Optional[Dict[str, Any]] = None) -> _TRAIN_CLS: if training_args.predict_with_generate and finetuning_args.compute_accuracy: raise ValueError("Cannot use `predict_with_generate` and `compute_accuracy` together.") + if training_args.predict_with_generate and is_deepspeed_zero3_enabled(): + raise ValueError("`predict_with_generate` is incompatible with DeepSpeed ZeRO-3.") + if training_args.do_train and model_args.quantization_device_map == "auto": raise ValueError("Cannot use device map for quantized models in training.")