mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-04 20:52:59 +08:00
update neftune logic
Former-commit-id: 7de7174ce3ea804b2ba58560193cda25cbd675ef
This commit is contained in:
parent
2af83198c7
commit
daeff710eb
@ -56,18 +56,20 @@ def prepare_model_for_training(
|
||||
logger.info("Upcasting weights in layernorm in float32.")
|
||||
|
||||
if finetuning_args.neft_alpha > 1e-6:
|
||||
input_embed: torch.nn.Embedding = model.get_input_embeddings()
|
||||
input_embed = model.get_input_embeddings()
|
||||
if isinstance(input_embed, torch.nn.Embedding):
|
||||
def noisy_forward(self: torch.nn.Embedding, x: torch.Tensor) -> torch.Tensor:
|
||||
embeddings = input_embed.__class__.forward(self, x)
|
||||
if self.training:
|
||||
dims = self.num_embeddings * self.embedding_dim
|
||||
mag_norm = finetuning_args.neft_alpha / (dims ** 0.5)
|
||||
embeddings += torch.zeros_like(embeddings).uniform_(-mag_norm, mag_norm)
|
||||
return embeddings
|
||||
|
||||
def noisy_forward(self: torch.nn.Embedding, x: torch.Tensor) -> torch.Tensor:
|
||||
embeddings = torch.nn.Embedding.forward(self, x)
|
||||
if self.training:
|
||||
dims = self.num_embeddings * self.embedding_dim
|
||||
mag_norm = finetuning_args.neft_alpha / (dims ** 0.5)
|
||||
embeddings += torch.zeros_like(embeddings).uniform_(-mag_norm, mag_norm)
|
||||
return embeddings
|
||||
|
||||
input_embed.forward = MethodType(noisy_forward, input_embed)
|
||||
logger.info("Using noisy embedding with alpha={:.2f}".format(finetuning_args.neft_alpha))
|
||||
input_embed.forward = MethodType(noisy_forward, input_embed)
|
||||
logger.info("Using noisy embedding with alpha={:.2f}".format(finetuning_args.neft_alpha))
|
||||
else:
|
||||
logger.warning("Input embeddings are not normal nn.Embedding, cannot transform into noisy embedding.")
|
||||
|
||||
if use_gradient_checkpointing:
|
||||
if hasattr(model, "enable_input_require_grads"):
|
||||
@ -82,12 +84,11 @@ def prepare_model_for_training(
|
||||
logger.info("Gradient checkpointing enabled.")
|
||||
|
||||
if finetuning_args.finetuning_type != "full" and hasattr(model, output_layer_name):
|
||||
output_layer: torch.nn.Linear = getattr(model, output_layer_name)
|
||||
input_dtype = output_layer.weight.dtype
|
||||
output_layer = getattr(model, output_layer_name)
|
||||
if isinstance(output_layer, torch.nn.Linear):
|
||||
def forward_in_fp32(self, x: torch.Tensor) -> torch.Tensor:
|
||||
return output_layer.__class__.forward(self, x.to(output_layer.weight.dtype)).to(torch.float32)
|
||||
|
||||
def forward_in_fp32(self, x: torch.Tensor) -> torch.Tensor:
|
||||
return torch.nn.Linear.forward(self, x.to(input_dtype)).to(torch.float32)
|
||||
|
||||
output_layer.forward = MethodType(forward_in_fp32, output_layer)
|
||||
output_layer.forward = MethodType(forward_in_fp32, output_layer)
|
||||
|
||||
return model
|
||||
|
Loading…
x
Reference in New Issue
Block a user