From d659907f34a7938c39df107b03d83f41a41063ba Mon Sep 17 00:00:00 2001 From: hiyouga Date: Wed, 5 Jul 2023 21:18:28 +0800 Subject: [PATCH] fix freeze tuning Former-commit-id: e32a1db967da02f502559df59ec6d1ab4554febf --- src/utils/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/config.py b/src/utils/config.py index f0e63d8e..79dd691e 100644 --- a/src/utils/config.py +++ b/src/utils/config.py @@ -237,7 +237,7 @@ class FinetuningArguments: self.lora_target = [target.strip() for target in self.lora_target.split(",")] if self.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0 - trainable_layer_ids = [self.num_hidden_layers - k for k in range(self.num_layer_trainable)] + trainable_layer_ids = [self.num_hidden_layers - k - 1 for k in range(self.num_layer_trainable)] else: # fine-tuning the first n layers if num_layer_trainable < 0 trainable_layer_ids = [k for k in range(-self.num_layer_trainable)]