mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-23 06:12:50 +08:00
fix freeze tuning
Former-commit-id: e6603977f695b71f6a36640256f6653ff513d783
This commit is contained in:
parent
265dc1b6a0
commit
bcd7493212
@ -237,7 +237,7 @@ class FinetuningArguments:
|
|||||||
self.lora_target = [target.strip() for target in self.lora_target.split(",")]
|
self.lora_target = [target.strip() for target in self.lora_target.split(",")]
|
||||||
|
|
||||||
if self.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0
|
if self.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0
|
||||||
trainable_layer_ids = [self.num_hidden_layers - k for k in range(self.num_layer_trainable)]
|
trainable_layer_ids = [self.num_hidden_layers - k - 1 for k in range(self.num_layer_trainable)]
|
||||||
else: # fine-tuning the first n layers if num_layer_trainable < 0
|
else: # fine-tuning the first n layers if num_layer_trainable < 0
|
||||||
trainable_layer_ids = [k for k in range(-self.num_layer_trainable)]
|
trainable_layer_ids = [k for k in range(-self.num_layer_trainable)]
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user