fix freeze tuning

This commit is contained in:
hiyouga
2023-07-05 21:18:28 +08:00
parent a2ba69183b
commit e6603977f6

View File

@@ -237,7 +237,7 @@ class FinetuningArguments:
self.lora_target = [target.strip() for target in self.lora_target.split(",")] self.lora_target = [target.strip() for target in self.lora_target.split(",")]
if self.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0 if self.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0
trainable_layer_ids = [self.num_hidden_layers - k for k in range(self.num_layer_trainable)] trainable_layer_ids = [self.num_hidden_layers - k - 1 for k in range(self.num_layer_trainable)]
else: # fine-tuning the first n layers if num_layer_trainable < 0 else: # fine-tuning the first n layers if num_layer_trainable < 0
trainable_layer_ids = [k for k in range(-self.num_layer_trainable)] trainable_layer_ids = [k for k in range(-self.num_layer_trainable)]