diff --git a/src/llmtuner/dsets/preprocess.py b/src/llmtuner/dsets/preprocess.py index dc882ae0..64e0d8b1 100644 --- a/src/llmtuner/dsets/preprocess.py +++ b/src/llmtuner/dsets/preprocess.py @@ -32,7 +32,7 @@ def preprocess_dataset( def preprocess_pretrain_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]: # build grouped texts with format `X1 X2 X3 ...` (without ) - if isinstance(getattr(tokenizer, "tokenizer"), tiktoken.Encoding): # for tiktoken tokenizer (Qwen) + if isinstance(getattr(tokenizer, "tokenizer", None), tiktoken.Encoding): # for tiktoken tokenizer (Qwen) kwargs = dict(allowed_special="all") else: kwargs = dict(add_special_tokens=False) diff --git a/src/llmtuner/extras/template.py b/src/llmtuner/extras/template.py index 64c22285..c6e21d87 100644 --- a/src/llmtuner/extras/template.py +++ b/src/llmtuner/extras/template.py @@ -123,7 +123,7 @@ class Template: r""" Converts context to token ids. """ - if isinstance(getattr(tokenizer, "tokenizer"), tiktoken.Encoding): # for tiktoken tokenizer (Qwen) + if isinstance(getattr(tokenizer, "tokenizer", None), tiktoken.Encoding): # for tiktoken tokenizer (Qwen) kwargs = dict(allowed_special="all") else: kwargs = dict(add_special_tokens=False)