Former-commit-id: 801f7279693a0c785480ea67d663d99f4ca653da
This commit is contained in:
hiyouga 2024-01-16 23:53:50 +08:00
parent 5ab7fd0842
commit d1ec884e75

View File

@ -160,8 +160,6 @@ class FinetuningArguments(FreezeArguments, LoraArguments, RLHFArguments):
self.lora_alpha = self.lora_alpha or self.lora_rank * 2 self.lora_alpha = self.lora_alpha or self.lora_rank * 2
self.lora_target = split_arg(self.lora_target) self.lora_target = split_arg(self.lora_target)
self.additional_target = split_arg(self.additional_target) self.additional_target = split_arg(self.additional_target)
self.ref_model_adapters = split_arg(self.ref_model_adapters)
self.reward_model_adapters = split_arg(self.reward_model_adapters)
assert self.finetuning_type in ["lora", "freeze", "full"], "Invalid fine-tuning method." assert self.finetuning_type in ["lora", "freeze", "full"], "Invalid fine-tuning method."
assert self.ref_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization." assert self.ref_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization."