mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2026-03-09 05:05:59 +08:00
[v1] add batch generator (#9744)
This commit is contained in:
@@ -12,9 +12,24 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import torch
|
||||
from transformers import PreTrainedTokenizer
|
||||
|
||||
from .types import Processor
|
||||
from .constants import IGNORE_INDEX
|
||||
from .types import BatchInput, ModelInput, Processor, Tensor
|
||||
|
||||
|
||||
def is_tokenizer(processor: Processor) -> bool:
|
||||
"""Check if processor is tokenizer.
|
||||
|
||||
Args:
|
||||
processor: Processor.
|
||||
|
||||
Returns:
|
||||
Whether processor is tokenizer.
|
||||
"""
|
||||
return not hasattr(processor, "tokenizer")
|
||||
|
||||
|
||||
def get_tokenizer(processor: Processor) -> PreTrainedTokenizer:
|
||||
@@ -27,3 +42,34 @@ def get_tokenizer(processor: Processor) -> PreTrainedTokenizer:
|
||||
Tokenizer.
|
||||
"""
|
||||
return processor.tokenizer if hasattr(processor, "tokenizer") else processor
|
||||
|
||||
|
||||
def _pad_and_truncate(tensor: Tensor, max_seqlen: int, pad_value: int = 0) -> Tensor:
|
||||
if tensor.shape[-1] >= max_seqlen:
|
||||
return tensor[..., :max_seqlen]
|
||||
|
||||
pad_shape = list(tensor.shape)
|
||||
pad_shape[-1] = max_seqlen - tensor.shape[-1]
|
||||
pad_tensor = torch.full(pad_shape, pad_value, dtype=tensor.dtype, device=tensor.device)
|
||||
return torch.cat([tensor, pad_tensor], dim=-1)
|
||||
|
||||
|
||||
def pad_and_truncate(samples: list[ModelInput], max_seqlen: int) -> list[BatchInput]:
|
||||
max_length = min(max(len(sample["input_ids"]) for sample in samples), max_seqlen)
|
||||
padded_samples = []
|
||||
for sample in samples:
|
||||
padded_sample = {}
|
||||
for key, value in sample.items():
|
||||
if "label" in key:
|
||||
pad_value = IGNORE_INDEX
|
||||
else:
|
||||
pad_value = 0
|
||||
|
||||
if not isinstance(value, str):
|
||||
padded_sample[key] = _pad_and_truncate(torch.tensor(value), max_length, pad_value)
|
||||
else:
|
||||
padded_sample[key] = value
|
||||
|
||||
padded_samples.append(padded_sample)
|
||||
|
||||
return padded_samples
|
||||
|
||||
Reference in New Issue
Block a user