From 657dff438c6593601c515606824f53db2dedd55f Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Wed, 13 Dec 2023 10:21:29 +0800 Subject: [PATCH] tiny fix Former-commit-id: 6953096c9d8f85d56cc980a4bec3a052411fb4a0 --- src/llmtuner/model/utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/llmtuner/model/utils.py b/src/llmtuner/model/utils.py index d3a5e1ed..0eabae25 100644 --- a/src/llmtuner/model/utils.py +++ b/src/llmtuner/model/utils.py @@ -181,8 +181,8 @@ def resize_embedding_layer(model: "PreTrainedModel", tokenizer: "PreTrainedToken logger.warning("Current model does not support resizing token embeddings.") return - old_vocab_size = model.get_input_embeddings().weight.size(0) - if len(tokenizer) > old_vocab_size: + current_embedding_size = model.get_input_embeddings().weight.size(0) + if len(tokenizer) > current_embedding_size: model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=64) - new_vocab_size = model.get_input_embeddings().weight.size(0) - logger.info("Resized token embeddings from {} to {}.".format(old_vocab_size, new_vocab_size)) + new_embedding_size = model.get_input_embeddings().weight.size(0) + logger.info("Resized token embeddings from {} to {}.".format(current_embedding_size, new_embedding_size))