mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-04 20:52:59 +08:00
tiny fix
Former-commit-id: d2ebd225dbb922adec99c1eb774c16f5cb973d2c
This commit is contained in:
parent
755e3e49b4
commit
8a8ba08bf7
@ -100,7 +100,7 @@ def preprocess_dataset(
|
|||||||
return model_inputs
|
return model_inputs
|
||||||
|
|
||||||
def preprocess_packed_supervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]:
|
def preprocess_packed_supervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]:
|
||||||
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
# build inputs with format `<bos> X Y <eos>` and labels with format `<bos> X Y <eos>`
|
||||||
# we do not mask the inputs in packed training.
|
# we do not mask the inputs in packed training.
|
||||||
model_inputs = {"input_ids": [], "attention_mask": [], "labels": []}
|
model_inputs = {"input_ids": [], "attention_mask": [], "labels": []}
|
||||||
input_ids, labels = [], []
|
input_ids, labels = [], []
|
||||||
|
@ -173,7 +173,7 @@ class LlamaFlashAttention2(LlamaAttention):
|
|||||||
state = state.reshape(bsz * num_group, group_size, self.num_heads, self.head_dim)
|
state = state.reshape(bsz * num_group, group_size, self.num_heads, self.head_dim)
|
||||||
|
|
||||||
if attention_mask is not None:
|
if attention_mask is not None:
|
||||||
logger.warning_once("Padded sequences are less efficient.")
|
logger.warning_once("Padded sequences are less efficient in FlashAttention.")
|
||||||
batch_size = query_states.shape[0]
|
batch_size = query_states.shape[0]
|
||||||
# -q_len: assumes left padding
|
# -q_len: assumes left padding
|
||||||
unpadded_q, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(query_states, attention_mask[:, -q_len:])
|
unpadded_q, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(query_states, attention_mask[:, -q_len:])
|
||||||
|
Loading…
x
Reference in New Issue
Block a user