From d15fe288dff4c5523eda1286aa0ff0c8e8f32d9e Mon Sep 17 00:00:00 2001 From: hiyouga Date: Tue, 15 Aug 2023 00:07:56 +0800 Subject: [PATCH] alert pad_token source Former-commit-id: 80b4053602c02aec724ecf980f8a279ffdf9f975 --- src/llmtuner/extras/template.py | 5 ++++- src/llmtuner/tuner/core/loader.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/llmtuner/extras/template.py b/src/llmtuner/extras/template.py index bb2886d5..62153b0d 100644 --- a/src/llmtuner/extras/template.py +++ b/src/llmtuner/extras/template.py @@ -204,7 +204,10 @@ def get_template_and_fix_tokenizer( logger.info("Add eos token: {}".format(tokenizer.eos_token)) if tokenizer.pad_token_id is None: - tokenizer.pad_token = tokenizer.eos_token + if tokenizer.unk_token_id is not None: + tokenizer.pad_token = tokenizer.unk_token + else: + tokenizer.pad_token = tokenizer.eos_token logger.info("Add pad token: {}".format(tokenizer.pad_token)) tokenizer.add_special_tokens(dict(additional_special_tokens=template.stop_words)) diff --git a/src/llmtuner/tuner/core/loader.py b/src/llmtuner/tuner/core/loader.py index e480d44c..4bf767a6 100644 --- a/src/llmtuner/tuner/core/loader.py +++ b/src/llmtuner/tuner/core/loader.py @@ -154,7 +154,7 @@ def load_model_and_tokenizer( model.generate = MethodType(PreTrainedModel.generate, model) # Fix LM head (for ChatGLM2) - if not hasattr(model, "lm_head"): + if not hasattr(model, "lm_head") and hasattr(model, "transformer"): setattr(model, "lm_head", model.transformer.output_layer) # Register auto class to save the custom code files.