mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-05 21:22:50 +08:00
fix baichuan template
Former-commit-id: 9c6dd1051417c91074daa7dd6ed6cc53448135ad
This commit is contained in:
parent
abdfa26d06
commit
f1485ab927
@ -1,3 +1,4 @@
|
|||||||
|
import tiktoken
|
||||||
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Literal
|
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Literal
|
||||||
from itertools import chain
|
from itertools import chain
|
||||||
|
|
||||||
@ -31,10 +32,11 @@ def preprocess_dataset(
|
|||||||
|
|
||||||
def preprocess_pretrain_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]:
|
def preprocess_pretrain_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]:
|
||||||
# build grouped texts with format `X1 X2 X3 ...` (without <eos>)
|
# build grouped texts with format `X1 X2 X3 ...` (without <eos>)
|
||||||
if hasattr(tokenizer, "tokenizer"): # for tiktoken tokenizer (Qwen)
|
if isinstance(getattr(tokenizer, "tokenizer"), tiktoken.Encoding): # for tiktoken tokenizer (Qwen)
|
||||||
kwargs = dict(allowed_special="all")
|
kwargs = dict(allowed_special="all")
|
||||||
else:
|
else:
|
||||||
kwargs = dict(add_special_tokens=False)
|
kwargs = dict(add_special_tokens=False)
|
||||||
|
|
||||||
tokenized_examples = tokenizer(examples["prompt"], **kwargs)
|
tokenized_examples = tokenizer(examples["prompt"], **kwargs)
|
||||||
concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()}
|
concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()}
|
||||||
total_length = len(concatenated_examples[list(concatenated_examples.keys())[0]])
|
total_length = len(concatenated_examples[list(concatenated_examples.keys())[0]])
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
import tiktoken
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
||||||
|
|
||||||
from llmtuner.extras.logging import get_logger
|
from llmtuner.extras.logging import get_logger
|
||||||
|
|
||||||
@ -122,7 +123,7 @@ class Template:
|
|||||||
r"""
|
r"""
|
||||||
Converts context to token ids.
|
Converts context to token ids.
|
||||||
"""
|
"""
|
||||||
if hasattr(tokenizer, "tokenizer"): # for tiktoken tokenizer (Qwen)
|
if isinstance(getattr(tokenizer, "tokenizer"), tiktoken.Encoding): # for tiktoken tokenizer (Qwen)
|
||||||
kwargs = dict(allowed_special="all")
|
kwargs = dict(allowed_special="all")
|
||||||
else:
|
else:
|
||||||
kwargs = dict(add_special_tokens=False)
|
kwargs = dict(add_special_tokens=False)
|
||||||
@ -428,14 +429,16 @@ Supports: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat
|
|||||||
"""
|
"""
|
||||||
register_template(
|
register_template(
|
||||||
name="baichuan",
|
name="baichuan",
|
||||||
prefix=[],
|
prefix=[
|
||||||
|
{"token": "<reserved_102>"} # user token (a little difference in the first turn)
|
||||||
|
],
|
||||||
prompt=[
|
prompt=[
|
||||||
{"token": "<reserved_102>"}, # user token (a little difference in position)
|
"{{query}}",
|
||||||
"{{query}}"
|
{"token": "<reserved_103>"} # assistant token
|
||||||
],
|
],
|
||||||
sep=[],
|
sep=[],
|
||||||
stop_words=[
|
stop_words=[
|
||||||
"<reserved_103>" # assistant token
|
{"token": "<reserved_102>"} # user token
|
||||||
],
|
],
|
||||||
use_history=True
|
use_history=True
|
||||||
)
|
)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user