fix bos and eos token

Former-commit-id: d87c8fd8ab84c9f58c0b1f3fb4ad0adf98b25715
This commit is contained in:
hiyouga 2023-08-04 23:55:57 +08:00
parent dbb284b5a2
commit 65369ecf48
2 changed files with 14 additions and 9 deletions

View File

@ -30,7 +30,7 @@ def preprocess_dataset(
yield query, response, history, prefix yield query, response, history, prefix
def preprocess_pretrain_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]: def preprocess_pretrain_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]:
# build grouped texts with format `<bos> X1 X2 X3 ...` (without <eos>) # build grouped texts with format `X1 X2 X3 ...` (without <eos>)
tokenized_examples = tokenizer(examples["prompt"], add_special_tokens=False) tokenized_examples = tokenizer(examples["prompt"], add_special_tokens=False)
concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()} concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()}
total_length = len(concatenated_examples[list(concatenated_examples.keys())[0]]) total_length = len(concatenated_examples[list(concatenated_examples.keys())[0]])
@ -55,17 +55,17 @@ def preprocess_dataset(
for query, response, history, prefix in construct_example(examples): for query, response, history, prefix in construct_example(examples):
input_ids, labels = [], [] input_ids, labels = [], []
for source_ids, target_ids in template.get_dialog(tokenizer, query, response, history, prefix): # TODO: fix bos for source_ids, target_ids in template.get_dialog(tokenizer, query, response, history, prefix):
if len(source_ids) > data_args.max_source_length: if len(source_ids) > data_args.max_source_length:
source_ids = source_ids[:data_args.max_source_length] source_ids = source_ids[:data_args.max_source_length]
if len(target_ids) > data_args.max_target_length - 1: # eos token if len(target_ids) > data_args.max_target_length:
target_ids = target_ids[:data_args.max_target_length - 1] target_ids = target_ids[:data_args.max_target_length]
if len(input_ids) + len(source_ids) + len(target_ids) + 1 > max_length: if len(input_ids) + len(source_ids) + len(target_ids) > max_length:
break break
input_ids += source_ids + target_ids + [tokenizer.eos_token_id] input_ids += source_ids + target_ids
labels += [IGNORE_INDEX] * len(source_ids) + target_ids + [tokenizer.eos_token_id] labels += [IGNORE_INDEX] * len(source_ids) + target_ids
model_inputs["input_ids"].append(input_ids) model_inputs["input_ids"].append(input_ids)
model_inputs["attention_mask"].append([1] * len(input_ids)) model_inputs["attention_mask"].append([1] * len(input_ids))

View File

@ -29,7 +29,7 @@ class Template:
encoded_pairs = self._encode(tokenizer=tokenizer, prefix=prefix, history=history) encoded_pairs = self._encode(tokenizer=tokenizer, prefix=prefix, history=history)
prompt_ids = [] prompt_ids = []
for query_ids, resp_ids in encoded_pairs[:-1]: for query_ids, resp_ids in encoded_pairs[:-1]:
prompt_ids = prompt_ids + query_ids + resp_ids + [tokenizer.eos_token_id] prompt_ids = prompt_ids + query_ids + resp_ids
prompt_ids = prompt_ids + encoded_pairs[-1][0] prompt_ids = prompt_ids + encoded_pairs[-1][0]
return prompt_ids, encoded_pairs[-1][1] return prompt_ids, encoded_pairs[-1][1]
@ -73,6 +73,11 @@ class Template:
r""" r"""
Encodes formatted inputs to pairs of token ids. Encodes formatted inputs to pairs of token ids.
""" """
if tokenizer.bos_token and getattr(tokenizer, "add_bos_token", False): # bos token is optional
bos_token_id = [tokenizer.bos_token_id]
else:
bos_token_id = []
eos_token_id = [tokenizer.eos_token_id] # eos token is required
encoded_pairs = [] encoded_pairs = []
for turn_idx, (query, resp) in enumerate(history): for turn_idx, (query, resp) in enumerate(history):
if turn_idx == 0: if turn_idx == 0:
@ -81,7 +86,7 @@ class Template:
prefix_ids = self._convert_inputs_to_ids(tokenizer, context=self.sep) prefix_ids = self._convert_inputs_to_ids(tokenizer, context=self.sep)
query_ids = self._convert_inputs_to_ids(tokenizer, context=self.prompt, query=query) query_ids = self._convert_inputs_to_ids(tokenizer, context=self.prompt, query=query)
resp_ids = self._convert_inputs_to_ids(tokenizer, context=[resp]) resp_ids = self._convert_inputs_to_ids(tokenizer, context=[resp])
encoded_pairs.append((prefix_ids + query_ids, resp_ids)) encoded_pairs.append((bos_token_id + prefix_ids + query_ids, resp_ids + eos_token_id))
return encoded_pairs return encoded_pairs
def _convert_inputs_to_ids( def _convert_inputs_to_ids(