From bc665bacc7656f03778080a0590dac569ea890b4 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Fri, 11 Aug 2023 13:56:26 +0800 Subject: [PATCH] add defaults Former-commit-id: 4636d3bbe6b984ca93e3a80ae5239f3ddda461bd --- src/llmtuner/dsets/preprocess.py | 2 +- src/llmtuner/extras/template.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llmtuner/dsets/preprocess.py b/src/llmtuner/dsets/preprocess.py index dc882ae0..64e0d8b1 100644 --- a/src/llmtuner/dsets/preprocess.py +++ b/src/llmtuner/dsets/preprocess.py @@ -32,7 +32,7 @@ def preprocess_dataset( def preprocess_pretrain_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]: # build grouped texts with format `X1 X2 X3 ...` (without ) - if isinstance(getattr(tokenizer, "tokenizer"), tiktoken.Encoding): # for tiktoken tokenizer (Qwen) + if isinstance(getattr(tokenizer, "tokenizer", None), tiktoken.Encoding): # for tiktoken tokenizer (Qwen) kwargs = dict(allowed_special="all") else: kwargs = dict(add_special_tokens=False) diff --git a/src/llmtuner/extras/template.py b/src/llmtuner/extras/template.py index 64c22285..c6e21d87 100644 --- a/src/llmtuner/extras/template.py +++ b/src/llmtuner/extras/template.py @@ -123,7 +123,7 @@ class Template: r""" Converts context to token ids. """ - if isinstance(getattr(tokenizer, "tokenizer"), tiktoken.Encoding): # for tiktoken tokenizer (Qwen) + if isinstance(getattr(tokenizer, "tokenizer", None), tiktoken.Encoding): # for tiktoken tokenizer (Qwen) kwargs = dict(allowed_special="all") else: kwargs = dict(add_special_tokens=False)