fix gptq training

This commit is contained in:
hiyouga
2023-12-02 00:27:15 +08:00
parent a973ce6e89
commit f57445c7a0
3 changed files with 12 additions and 10 deletions

View File

@@ -87,7 +87,7 @@ def init_adapter(
if is_trainable and checkpoint_to_resume is None: # create new lora weights while training
if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all":
target_modules = find_all_linear_modules(model, model_args.quantization_bit)
target_modules = find_all_linear_modules(model)
else:
target_modules = finetuning_args.lora_target