From 3df021d4d746fdab95ec558b33fbde76a559f250 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Tue, 15 Apr 2025 14:57:40 +0800 Subject: [PATCH] [deps] upgrade vllm (#7728) --- setup.py | 2 +- src/llamafactory/hparams/parser.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 697e3341..e00edb3d 100644 --- a/setup.py +++ b/setup.py @@ -53,7 +53,7 @@ extra_require = { "gptq": ["optimum>=1.17.0", "auto-gptq>=0.5.0"], "awq": ["autoawq"], "aqlm": ["aqlm[gpu]>=1.1.0"], - "vllm": ["vllm>=0.4.3,<=0.8.3"], + "vllm": ["vllm>=0.4.3,<=0.8.4"], "sglang": ["sglang[srt]>=0.4.5", "transformers==4.51.1"], "galore": ["galore-torch"], "apollo": ["apollo-torch"], diff --git a/src/llamafactory/hparams/parser.py b/src/llamafactory/hparams/parser.py index 5be79ed1..8d2e9c5b 100644 --- a/src/llamafactory/hparams/parser.py +++ b/src/llamafactory/hparams/parser.py @@ -135,7 +135,7 @@ def _check_extra_dependencies( check_version("mixture-of-depth>=1.1.6", mandatory=True) if model_args.infer_backend == EngineName.VLLM: - check_version("vllm>=0.4.3,<=0.8.3") + check_version("vllm>=0.4.3,<=0.8.4") check_version("vllm", mandatory=True) elif model_args.infer_backend == EngineName.SGLANG: check_version("sglang>=0.4.4")