[script] fix vllm version (#7193)

Former-commit-id: 313355759dc906d3612364dc6c8f6344afdedb97
This commit is contained in:
hoshi-hiyouga 2025-03-06 17:14:17 +08:00 committed by GitHub
parent eba31ae313
commit b6c0e8608e

View File

@ -55,7 +55,7 @@ def vllm_infer(
Performs batch generation using vLLM engine, which supports tensor parallelism.
Usage: python vllm_infer.py --model_name_or_path meta-llama/Llama-2-7b-hf --template llama --dataset alpaca_en_demo
"""
check_version("vllm>=0.4.3,<=0.7.2")
check_version("vllm>=0.4.3,<=0.7.3")
if pipeline_parallel_size > get_device_count():
raise ValueError("Pipeline parallel size should be smaller than the number of gpus.")