mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-06 13:42:51 +08:00
tiny fix
Former-commit-id: 6953096c9d8f85d56cc980a4bec3a052411fb4a0
This commit is contained in:
parent
5b211cfbe9
commit
657dff438c
@ -181,8 +181,8 @@ def resize_embedding_layer(model: "PreTrainedModel", tokenizer: "PreTrainedToken
|
|||||||
logger.warning("Current model does not support resizing token embeddings.")
|
logger.warning("Current model does not support resizing token embeddings.")
|
||||||
return
|
return
|
||||||
|
|
||||||
old_vocab_size = model.get_input_embeddings().weight.size(0)
|
current_embedding_size = model.get_input_embeddings().weight.size(0)
|
||||||
if len(tokenizer) > old_vocab_size:
|
if len(tokenizer) > current_embedding_size:
|
||||||
model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=64)
|
model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=64)
|
||||||
new_vocab_size = model.get_input_embeddings().weight.size(0)
|
new_embedding_size = model.get_input_embeddings().weight.size(0)
|
||||||
logger.info("Resized token embeddings from {} to {}.".format(old_vocab_size, new_vocab_size))
|
logger.info("Resized token embeddings from {} to {}.".format(current_embedding_size, new_embedding_size))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user