mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2026-04-27 18:29:08 +08:00
[misc] code lint (#10439)
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
@@ -152,7 +152,7 @@ def _make_packed_feature(
|
||||
video_subseq_ids = packing_params["video_subseq_ids"]
|
||||
audio_subseq_ids = packing_params["audio_subseq_ids"]
|
||||
unpadded_length = packing_params["unpadded_length"]
|
||||
right_padding_length = packing_params["right_padding_length"] # which only preserved in tests
|
||||
right_padding_length = packing_params["right_padding_length"] # which only preserved in tests
|
||||
cutoff_plus_one = sequence_boundaries[-1]
|
||||
content_len = unpadded_length
|
||||
pad_len = right_padding_length
|
||||
@@ -229,10 +229,11 @@ def _make_packed_features(
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def _get_expected_position_ids(packing_params, get_rope_func, input_ids, attention_mask) -> torch.Tensor:
|
||||
bound_list = packing_params["sequence_boundaries"]
|
||||
input_ids_slices = [input_ids[bound_list[i]:bound_list[i+1]] for i in range(len(bound_list) - 1)]
|
||||
attention_mask_slices = [attention_mask[bound_list[i]:bound_list[i+1]] for i in range(len(bound_list) - 1)]
|
||||
input_ids_slices = [input_ids[bound_list[i] : bound_list[i + 1]] for i in range(len(bound_list) - 1)]
|
||||
attention_mask_slices = [attention_mask[bound_list[i] : bound_list[i + 1]] for i in range(len(bound_list) - 1)]
|
||||
img_counts_by_subseq = Counter(packing_params["image_subseq_ids"])
|
||||
all_position_ids = []
|
||||
for i, input_ids_slice in enumerate(input_ids_slices):
|
||||
@@ -296,7 +297,7 @@ def test_multimodal_collator_with_packing():
|
||||
features[0]["input_ids"],
|
||||
features[0]["attention_mask"],
|
||||
)
|
||||
batch_input = data_collator(features) # [3, bsz, seq_len]
|
||||
batch_input = data_collator(features) # [3, bsz, seq_len]
|
||||
valid_len = expected_position_ids.shape[-1]
|
||||
assert batch_input["position_ids"][1:, :, :valid_len].eq(expected_position_ids).all()
|
||||
|
||||
|
||||
@@ -219,14 +219,19 @@ def test_gemma4_plugin():
|
||||
check_inputs = {"plugin": gemma4_plugin, **tokenizer_module}
|
||||
# validate
|
||||
mm_inputs = gemma4_plugin._get_mm_inputs(IMAGES, NO_VIDEOS, NO_AUDIOS, processor)
|
||||
num_image_soft_tokens = 256 # when we use default max_soft_tokens=280
|
||||
num_image_soft_tokens = 256 # when we use default max_soft_tokens=280
|
||||
image_token = getattr(processor, "image_token")
|
||||
boi_token = getattr(processor, "boi_token")
|
||||
eoi_token = getattr(processor, "eoi_token")
|
||||
|
||||
expected_mm_type_ids = [[int(token_id == getattr(processor, "image_token_id")) for token_id in token_ids] for token_ids in BATCH_IDS]
|
||||
expected_mm_type_ids = [
|
||||
[int(token_id == getattr(processor, "image_token_id")) for token_id in token_ids] for token_ids in BATCH_IDS
|
||||
]
|
||||
check_inputs["expected_mm_messages"] = [
|
||||
{"role": "user", "content": f"{boi_token}{image_token * num_image_soft_tokens}{eoi_token}What is in this image?"},
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"{boi_token}{image_token * num_image_soft_tokens}{eoi_token}What is in this image?",
|
||||
},
|
||||
{"role": "assistant", "content": "A cat."},
|
||||
]
|
||||
for key in ("num_soft_tokens_per_image",):
|
||||
|
||||
@@ -181,6 +181,7 @@ def test_reasoning_encode_multiturn(cot_messages: bool, enable_thinking: bool):
|
||||
(prompt_str_1, answer_str_1, prompt_str_2, answer_str_2),
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.runs_on(["cpu", "mps"])
|
||||
@pytest.mark.parametrize("enable_thinking", [True, False, None])
|
||||
@pytest.mark.parametrize("discarding_history_cot", [True, False])
|
||||
@@ -188,7 +189,9 @@ def test_reasoning_encode_multiturn_discarding_history_cot(enable_thinking: bool
|
||||
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B")
|
||||
data_args = DataArguments(template="qwen3", enable_thinking=enable_thinking)
|
||||
template = get_template_and_fix_tokenizer(tokenizer, data_args)
|
||||
encoded_pairs = template.encode_multiturn(tokenizer, MESSAGES_WITH_THOUGHT, discarding_history_cot=discarding_history_cot)
|
||||
encoded_pairs = template.encode_multiturn(
|
||||
tokenizer, MESSAGES_WITH_THOUGHT, discarding_history_cot=discarding_history_cot
|
||||
)
|
||||
|
||||
prompt_str_1 = f"<|im_start|>user\n{MESSAGES_WITH_THOUGHT[0]['content']}<|im_end|>\n<|im_start|>assistant\n"
|
||||
prompt_str_2 = f"<|im_start|>user\n{MESSAGES_WITH_THOUGHT[2]['content']}<|im_end|>\n<|im_start|>assistant\n"
|
||||
|
||||
Reference in New Issue
Block a user