diff --git a/src/llamafactory/chat/vllm_engine.py b/src/llamafactory/chat/vllm_engine.py index f1a6e9d8..c696978e 100644 --- a/src/llamafactory/chat/vllm_engine.py +++ b/src/llamafactory/chat/vllm_engine.py @@ -83,7 +83,8 @@ class VllmEngine(BaseEngine): "enable_lora": model_args.adapter_name_or_path is not None, "max_lora_rank": model_args.vllm_max_lora_rank, } - engine_args.update(model_args.vllm_config) + if isinstance(model_args.vllm_config, dict): + engine_args.update(model_args.vllm_config) if getattr(config, "is_yi_vl_derived_model", None): import vllm.model_executor.models.llava