fix: Repair the issue where quantization failed after merging the adapter.

Former-commit-id: f91a9a250afa0c2e1554b2c81c1860d7968dc816
This commit is contained in:
Liuww 2024-07-24 14:31:29 +08:00 committed by GitHub
parent d4440c07b6
commit ed05486b98

View File

@ -66,7 +66,7 @@ def save_model(
error = ALERTS["err_no_dataset"][lang]
elif export_quantization_bit not in GPTQ_BITS and not checkpoint_path:
error = ALERTS["err_no_adapter"][lang]
elif export_quantization_bit in GPTQ_BITS and isinstance(checkpoint_path, list):
elif export_quantization_bit in GPTQ_BITS and checkpoint_path and isinstance(checkpoint_path, list):
error = ALERTS["err_gptq_lora"][lang]
if error: