mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-04 20:52:59 +08:00
update neftune logic
Former-commit-id: 7de7174ce3ea804b2ba58560193cda25cbd675ef
This commit is contained in:
parent
2af83198c7
commit
daeff710eb
@ -56,10 +56,10 @@ def prepare_model_for_training(
|
|||||||
logger.info("Upcasting weights in layernorm in float32.")
|
logger.info("Upcasting weights in layernorm in float32.")
|
||||||
|
|
||||||
if finetuning_args.neft_alpha > 1e-6:
|
if finetuning_args.neft_alpha > 1e-6:
|
||||||
input_embed: torch.nn.Embedding = model.get_input_embeddings()
|
input_embed = model.get_input_embeddings()
|
||||||
|
if isinstance(input_embed, torch.nn.Embedding):
|
||||||
def noisy_forward(self: torch.nn.Embedding, x: torch.Tensor) -> torch.Tensor:
|
def noisy_forward(self: torch.nn.Embedding, x: torch.Tensor) -> torch.Tensor:
|
||||||
embeddings = torch.nn.Embedding.forward(self, x)
|
embeddings = input_embed.__class__.forward(self, x)
|
||||||
if self.training:
|
if self.training:
|
||||||
dims = self.num_embeddings * self.embedding_dim
|
dims = self.num_embeddings * self.embedding_dim
|
||||||
mag_norm = finetuning_args.neft_alpha / (dims ** 0.5)
|
mag_norm = finetuning_args.neft_alpha / (dims ** 0.5)
|
||||||
@ -68,6 +68,8 @@ def prepare_model_for_training(
|
|||||||
|
|
||||||
input_embed.forward = MethodType(noisy_forward, input_embed)
|
input_embed.forward = MethodType(noisy_forward, input_embed)
|
||||||
logger.info("Using noisy embedding with alpha={:.2f}".format(finetuning_args.neft_alpha))
|
logger.info("Using noisy embedding with alpha={:.2f}".format(finetuning_args.neft_alpha))
|
||||||
|
else:
|
||||||
|
logger.warning("Input embeddings are not normal nn.Embedding, cannot transform into noisy embedding.")
|
||||||
|
|
||||||
if use_gradient_checkpointing:
|
if use_gradient_checkpointing:
|
||||||
if hasattr(model, "enable_input_require_grads"):
|
if hasattr(model, "enable_input_require_grads"):
|
||||||
@ -82,11 +84,10 @@ def prepare_model_for_training(
|
|||||||
logger.info("Gradient checkpointing enabled.")
|
logger.info("Gradient checkpointing enabled.")
|
||||||
|
|
||||||
if finetuning_args.finetuning_type != "full" and hasattr(model, output_layer_name):
|
if finetuning_args.finetuning_type != "full" and hasattr(model, output_layer_name):
|
||||||
output_layer: torch.nn.Linear = getattr(model, output_layer_name)
|
output_layer = getattr(model, output_layer_name)
|
||||||
input_dtype = output_layer.weight.dtype
|
if isinstance(output_layer, torch.nn.Linear):
|
||||||
|
|
||||||
def forward_in_fp32(self, x: torch.Tensor) -> torch.Tensor:
|
def forward_in_fp32(self, x: torch.Tensor) -> torch.Tensor:
|
||||||
return torch.nn.Linear.forward(self, x.to(input_dtype)).to(torch.float32)
|
return output_layer.__class__.forward(self, x.to(output_layer.weight.dtype)).to(torch.float32)
|
||||||
|
|
||||||
output_layer.forward = MethodType(forward_in_fp32, output_layer)
|
output_layer.forward = MethodType(forward_in_fp32, output_layer)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user