mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-15 03:10:35 +08:00
feat: pass the max_lora_rank parameter to vLLM backend
Former-commit-id: b20d62ba3c
This commit is contained in:
@@ -59,6 +59,7 @@ class VllmEngine(BaseEngine):
|
||||
"disable_log_requests": True,
|
||||
"enforce_eager": model_args.vllm_enforce_eager,
|
||||
"enable_lora": model_args.adapter_name_or_path is not None,
|
||||
"max_lora_rank": model_args.vllm_max_lora_rank,
|
||||
}
|
||||
|
||||
if model_args.visual_inputs:
|
||||
|
||||
Reference in New Issue
Block a user