mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-03 04:02:49 +08:00
lint
Former-commit-id: cee3dc484be4bdf31ffe1d8b9c60604b84ed6a00
This commit is contained in:
parent
a3f99f123a
commit
b664bcf307
@ -142,10 +142,7 @@ def test_llava_next_plugin():
|
|||||||
check_inputs = {"plugin": llava_next_plugin, "tokenizer": tokenizer, "processor": processor}
|
check_inputs = {"plugin": llava_next_plugin, "tokenizer": tokenizer, "processor": processor}
|
||||||
image_seqlen = 1176
|
image_seqlen = 1176
|
||||||
check_inputs["expected_mm_messages"] = [
|
check_inputs["expected_mm_messages"] = [
|
||||||
{
|
{key: value.replace("<image>", "<image>" * image_seqlen) for key, value in message.items()}
|
||||||
key: value.replace("<image>", "<image>" * image_seqlen)
|
|
||||||
for key, value in message.items()
|
|
||||||
}
|
|
||||||
for message in MM_MESSAGES
|
for message in MM_MESSAGES
|
||||||
]
|
]
|
||||||
check_inputs["expected_mm_inputs"] = _get_mm_inputs(processor)
|
check_inputs["expected_mm_inputs"] = _get_mm_inputs(processor)
|
||||||
@ -158,10 +155,7 @@ def test_llava_next_video_plugin():
|
|||||||
check_inputs = {"plugin": llava_next_video_plugin, "tokenizer": tokenizer, "processor": processor}
|
check_inputs = {"plugin": llava_next_video_plugin, "tokenizer": tokenizer, "processor": processor}
|
||||||
image_seqlen = 1176
|
image_seqlen = 1176
|
||||||
check_inputs["expected_mm_messages"] = [
|
check_inputs["expected_mm_messages"] = [
|
||||||
{
|
{key: value.replace("<image>", "<image>" * image_seqlen) for key, value in message.items()}
|
||||||
key: value.replace("<image>", "<image>" * image_seqlen)
|
|
||||||
for key, value in message.items()
|
|
||||||
}
|
|
||||||
for message in MM_MESSAGES
|
for message in MM_MESSAGES
|
||||||
]
|
]
|
||||||
check_inputs["expected_mm_inputs"] = _get_mm_inputs(processor)
|
check_inputs["expected_mm_inputs"] = _get_mm_inputs(processor)
|
||||||
@ -207,10 +201,7 @@ def test_video_llava_plugin():
|
|||||||
check_inputs = {"plugin": video_llava_plugin, "tokenizer": tokenizer, "processor": processor}
|
check_inputs = {"plugin": video_llava_plugin, "tokenizer": tokenizer, "processor": processor}
|
||||||
image_seqlen = 256
|
image_seqlen = 256
|
||||||
check_inputs["expected_mm_messages"] = [
|
check_inputs["expected_mm_messages"] = [
|
||||||
{
|
{key: value.replace("<image>", "<image>" * image_seqlen) for key, value in message.items()}
|
||||||
key: value.replace("<image>", "<image>" * image_seqlen)
|
|
||||||
for key, value in message.items()
|
|
||||||
}
|
|
||||||
for message in MM_MESSAGES
|
for message in MM_MESSAGES
|
||||||
]
|
]
|
||||||
check_inputs["expected_mm_inputs"] = _get_mm_inputs(processor)
|
check_inputs["expected_mm_inputs"] = _get_mm_inputs(processor)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user