From 4be704823c79dabd199e4caab324b3b5eacd13ef Mon Sep 17 00:00:00 2001 From: hiyouga Date: Tue, 16 Jan 2024 23:53:50 +0800 Subject: [PATCH] fix #2195 Former-commit-id: a83fb6d3ff1e2a9657e926083a926d48b0f3e1a6 --- src/llmtuner/hparams/finetuning_args.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/llmtuner/hparams/finetuning_args.py b/src/llmtuner/hparams/finetuning_args.py index 810a8a63..6ef5b933 100644 --- a/src/llmtuner/hparams/finetuning_args.py +++ b/src/llmtuner/hparams/finetuning_args.py @@ -160,8 +160,6 @@ class FinetuningArguments(FreezeArguments, LoraArguments, RLHFArguments): self.lora_alpha = self.lora_alpha or self.lora_rank * 2 self.lora_target = split_arg(self.lora_target) self.additional_target = split_arg(self.additional_target) - self.ref_model_adapters = split_arg(self.ref_model_adapters) - self.reward_model_adapters = split_arg(self.reward_model_adapters) assert self.finetuning_type in ["lora", "freeze", "full"], "Invalid fine-tuning method." assert self.ref_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization."