From f4ec4fa6adfe9d618b7db3bb64a2180c8d421918 Mon Sep 17 00:00:00 2001 From: hoshi-hiyouga Date: Thu, 6 Mar 2025 17:14:17 +0800 Subject: [PATCH] [script] fix vllm version (#7193) Former-commit-id: ababdde597b2b9bf0ab3f30f036bc8d97de07f03 --- scripts/vllm_infer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/vllm_infer.py b/scripts/vllm_infer.py index a42c2b4c..6f47f173 100644 --- a/scripts/vllm_infer.py +++ b/scripts/vllm_infer.py @@ -55,7 +55,7 @@ def vllm_infer( Performs batch generation using vLLM engine, which supports tensor parallelism. Usage: python vllm_infer.py --model_name_or_path meta-llama/Llama-2-7b-hf --template llama --dataset alpaca_en_demo """ - check_version("vllm>=0.4.3,<=0.7.2") + check_version("vllm>=0.4.3,<=0.7.3") if pipeline_parallel_size > get_device_count(): raise ValueError("Pipeline parallel size should be smaller than the number of gpus.")