From c42e5cf401a6c363dab57f8ace96be5a3a2a44f7 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Mon, 11 Nov 2024 13:57:14 +0800 Subject: [PATCH] fix #5988 Former-commit-id: 9e08e206a8ea9926768b0f1d5ff9d7e3e216c269 --- src/llamafactory/chat/vllm_engine.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/llamafactory/chat/vllm_engine.py b/src/llamafactory/chat/vllm_engine.py index f1a6e9d8..c696978e 100644 --- a/src/llamafactory/chat/vllm_engine.py +++ b/src/llamafactory/chat/vllm_engine.py @@ -83,7 +83,8 @@ class VllmEngine(BaseEngine): "enable_lora": model_args.adapter_name_or_path is not None, "max_lora_rank": model_args.vllm_max_lora_rank, } - engine_args.update(model_args.vllm_config) + if isinstance(model_args.vllm_config, dict): + engine_args.update(model_args.vllm_config) if getattr(config, "is_yi_vl_derived_model", None): import vllm.model_executor.models.llava