diff --git a/src/llamafactory/model/adapter.py b/src/llamafactory/model/adapter.py index bbc0056c..05a3e298 100644 --- a/src/llamafactory/model/adapter.py +++ b/src/llamafactory/model/adapter.py @@ -204,7 +204,7 @@ def _setup_lora_tuning( if ( finetuning_args.use_dora and getattr(model, "quantization_method", None) is not None - and getattr(model, "quantization_method", None) != QuantizationMethod.BITS_AND_BYTES + and getattr(model, "quantization_method", None) != QuantizationMethod.BNB ): raise ValueError("DoRA is not compatible with PTQ-quantized models.") diff --git a/src/llamafactory/webui/control.py b/src/llamafactory/webui/control.py index 08aed40d..9939456f 100644 --- a/src/llamafactory/webui/control.py +++ b/src/llamafactory/webui/control.py @@ -56,7 +56,7 @@ def can_quantize_to(quantization_method: str) -> "gr.Dropdown": Inputs: top.quantization_method Outputs: top.quantization_bit """ - if quantization_method == QuantizationMethod.BITS_AND_BYTES.value: + if quantization_method == QuantizationMethod.BNB.value: available_bits = ["none", "8", "4"] elif quantization_method == QuantizationMethod.HQQ.value: available_bits = ["none", "8", "6", "5", "4", "3", "2", "1"]