From f1485ab92707bace72ab85364e18a3e670765b83 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Fri, 11 Aug 2023 13:45:47 +0800 Subject: [PATCH] fix baichuan template Former-commit-id: 9c6dd1051417c91074daa7dd6ed6cc53448135ad --- src/llmtuner/dsets/preprocess.py | 4 +++- src/llmtuner/extras/template.py | 15 +++++++++------ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/llmtuner/dsets/preprocess.py b/src/llmtuner/dsets/preprocess.py index 4efbcbb6..dc882ae0 100644 --- a/src/llmtuner/dsets/preprocess.py +++ b/src/llmtuner/dsets/preprocess.py @@ -1,3 +1,4 @@ +import tiktoken from typing import TYPE_CHECKING, Any, Dict, Generator, List, Literal from itertools import chain @@ -31,10 +32,11 @@ def preprocess_dataset( def preprocess_pretrain_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]: # build grouped texts with format `X1 X2 X3 ...` (without ) - if hasattr(tokenizer, "tokenizer"): # for tiktoken tokenizer (Qwen) + if isinstance(getattr(tokenizer, "tokenizer"), tiktoken.Encoding): # for tiktoken tokenizer (Qwen) kwargs = dict(allowed_special="all") else: kwargs = dict(add_special_tokens=False) + tokenized_examples = tokenizer(examples["prompt"], **kwargs) concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()} total_length = len(concatenated_examples[list(concatenated_examples.keys())[0]]) diff --git a/src/llmtuner/extras/template.py b/src/llmtuner/extras/template.py index c3388ab3..4bf490aa 100644 --- a/src/llmtuner/extras/template.py +++ b/src/llmtuner/extras/template.py @@ -1,5 +1,6 @@ -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union +import tiktoken from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union from llmtuner.extras.logging import get_logger @@ -122,7 +123,7 @@ class Template: r""" Converts context to token ids. """ - if hasattr(tokenizer, "tokenizer"): # for tiktoken tokenizer (Qwen) + if isinstance(getattr(tokenizer, "tokenizer"), tiktoken.Encoding): # for tiktoken tokenizer (Qwen) kwargs = dict(allowed_special="all") else: kwargs = dict(add_special_tokens=False) @@ -428,14 +429,16 @@ Supports: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat """ register_template( name="baichuan", - prefix=[], + prefix=[ + {"token": ""} # user token (a little difference in the first turn) + ], prompt=[ - {"token": ""}, # user token (a little difference in position) - "{{query}}" + "{{query}}", + {"token": ""} # assistant token ], sep=[], stop_words=[ - "" # assistant token + {"token": ""} # user token ], use_history=True )