fix tokenizer config changed after pretrain

Changing tokenizer's attribute at preprocessing stage will result in saving a wrong tokenizer.
for example, baichuan2

Former-commit-id: 043c316ac8913e10b2274867033f194ea92bfcd6
This commit is contained in:
lvzi 2023-11-08 15:50:46 +08:00 committed by GitHub
parent 91f406cc99
commit 13eb365eb7

View File

@ -47,9 +47,13 @@ def preprocess_dataset(
kwargs = dict(add_special_tokens=True)
if hasattr(tokenizer, "add_eos_token"): # for LLaMA tokenizer
add_eos_token_flag = getattr(tokenizer, "add_eos_token")
setattr(tokenizer, "add_eos_token", True)
tokenized_examples = tokenizer(examples["prompt"], **kwargs)
# Make sure the saved tokenizer is the same as the original
if hasattr(tokenizer, "add_eos_token"): # for Baichuan2 tokenizer
setattr(tokenizer, "add_eos_token", add_eos_token_flag)
concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()}
total_length = len(concatenated_examples[list(concatenated_examples.keys())[0]])
block_size = data_args.cutoff_len