mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-23 14:22:51 +08:00
[misc] fix bug in constant (#7765)
Co-authored-by: Sachin Beldona <sbeldona@cs.cmu.edu>
This commit is contained in:
parent
a4455e3021
commit
ec7257e70f
@ -204,7 +204,7 @@ def _setup_lora_tuning(
|
|||||||
if (
|
if (
|
||||||
finetuning_args.use_dora
|
finetuning_args.use_dora
|
||||||
and getattr(model, "quantization_method", None) is not None
|
and getattr(model, "quantization_method", None) is not None
|
||||||
and getattr(model, "quantization_method", None) != QuantizationMethod.BITS_AND_BYTES
|
and getattr(model, "quantization_method", None) != QuantizationMethod.BNB
|
||||||
):
|
):
|
||||||
raise ValueError("DoRA is not compatible with PTQ-quantized models.")
|
raise ValueError("DoRA is not compatible with PTQ-quantized models.")
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ def can_quantize_to(quantization_method: str) -> "gr.Dropdown":
|
|||||||
Inputs: top.quantization_method
|
Inputs: top.quantization_method
|
||||||
Outputs: top.quantization_bit
|
Outputs: top.quantization_bit
|
||||||
"""
|
"""
|
||||||
if quantization_method == QuantizationMethod.BITS_AND_BYTES.value:
|
if quantization_method == QuantizationMethod.BNB.value:
|
||||||
available_bits = ["none", "8", "4"]
|
available_bits = ["none", "8", "4"]
|
||||||
elif quantization_method == QuantizationMethod.HQQ.value:
|
elif quantization_method == QuantizationMethod.HQQ.value:
|
||||||
available_bits = ["none", "8", "6", "5", "4", "3", "2", "1"]
|
available_bits = ["none", "8", "6", "5", "4", "3", "2", "1"]
|
||||||
|
Loading…
x
Reference in New Issue
Block a user