mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-15 03:10:35 +08:00
fix #3365
This commit is contained in:
@@ -139,6 +139,7 @@ def _configure_quantization(
|
|||||||
|
|
||||||
if quant_method == QuantizationMethod.GPTQ:
|
if quant_method == QuantizationMethod.GPTQ:
|
||||||
require_version("auto_gptq>=0.5.0", "To fix: pip install auto_gptq>=0.5.0")
|
require_version("auto_gptq>=0.5.0", "To fix: pip install auto_gptq>=0.5.0")
|
||||||
|
quantization_config.pop("disable_exllama", None) # remove deprecated args
|
||||||
quantization_config["use_exllama"] = False # disable exllama
|
quantization_config["use_exllama"] = False # disable exllama
|
||||||
|
|
||||||
if quant_method == QuantizationMethod.AWQ:
|
if quant_method == QuantizationMethod.AWQ:
|
||||||
|
|||||||
Reference in New Issue
Block a user