From 9c394f11ef6904b9788d282196ff4037f329a2f8 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Mon, 11 Nov 2024 13:57:14 +0800 Subject: [PATCH] fix #5988 Former-commit-id: 8d70edf39bce7cb7be272f0bd2820306adf093ad --- src/llamafactory/chat/vllm_engine.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/llamafactory/chat/vllm_engine.py b/src/llamafactory/chat/vllm_engine.py index f1a6e9d8..c696978e 100644 --- a/src/llamafactory/chat/vllm_engine.py +++ b/src/llamafactory/chat/vllm_engine.py @@ -83,7 +83,8 @@ class VllmEngine(BaseEngine): "enable_lora": model_args.adapter_name_or_path is not None, "max_lora_rank": model_args.vllm_max_lora_rank, } - engine_args.update(model_args.vllm_config) + if isinstance(model_args.vllm_config, dict): + engine_args.update(model_args.vllm_config) if getattr(config, "is_yi_vl_derived_model", None): import vllm.model_executor.models.llava