From 231756a5bfd9a6aff111d1f07abe03dd7bf5d93b Mon Sep 17 00:00:00 2001 From: xvxuopop <127376094+xvxuopop@users.noreply.github.com> Date: Thu, 27 Nov 2025 02:14:53 +0800 Subject: [PATCH] [chat] fix the error when the vLLM version is greater than 0.10.0 (#9539) Co-authored-by: Yaowei Zheng --- src/llamafactory/chat/vllm_engine.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/llamafactory/chat/vllm_engine.py b/src/llamafactory/chat/vllm_engine.py index b3370527..709afe4a 100644 --- a/src/llamafactory/chat/vllm_engine.py +++ b/src/llamafactory/chat/vllm_engine.py @@ -15,6 +15,7 @@ import uuid from collections.abc import AsyncGenerator, AsyncIterator from typing import TYPE_CHECKING, Any, Optional, Union +from packaging import version from typing_extensions import override @@ -77,11 +78,18 @@ class VllmEngine(BaseEngine): "tensor_parallel_size": get_device_count() or 1, "gpu_memory_utilization": model_args.vllm_gpu_util, "disable_log_stats": True, - "disable_log_requests": True, "enforce_eager": model_args.vllm_enforce_eager, "enable_lora": model_args.adapter_name_or_path is not None, "max_lora_rank": model_args.vllm_max_lora_rank, } + + import vllm + + if version.parse(vllm.__version__) <= version.parse("0.10.0"): + engine_args["disable_log_requests"] = True + else: + engine_args["enable_log_requests"] = False + if self.template.mm_plugin.__class__.__name__ != "BasePlugin": engine_args["limit_mm_per_prompt"] = {"image": 4, "video": 2, "audio": 2}