Merge pull request #5990 from hiyouga/hiyouga/dev_vllm

[generate] fix vllm config args

Former-commit-id: ee0745022bd7484f4f2e6b183088f55d5e60c085
This commit is contained in:
hoshi-hiyouga 2024-11-11 14:10:35 +08:00 committed by GitHub
commit e2da3cc9fa

View File

@ -83,7 +83,8 @@ class VllmEngine(BaseEngine):
"enable_lora": model_args.adapter_name_or_path is not None,
"max_lora_rank": model_args.vllm_max_lora_rank,
}
engine_args.update(model_args.vllm_config)
if isinstance(model_args.vllm_config, dict):
engine_args.update(model_args.vllm_config)
if getattr(config, "is_yi_vl_derived_model", None):
import vllm.model_executor.models.llava