[deps] upgrade vllm (#6857)

This commit is contained in:
hoshi-hiyouga
2025-02-08 15:02:28 +08:00
committed by GitHub
parent 40048ab77a
commit 5f38bcaba9
12 changed files with 43 additions and 34 deletions

View File

@@ -118,6 +118,6 @@ def configure_packing(model_args: "ModelArguments", is_trainable: bool) -> None:
if not is_trainable or not model_args.block_diag_attn:
return
check_version("transformers>=4.43.0,<=4.48.2")
check_version("transformers>=4.43.0")
transformers.modeling_flash_attention_utils._get_unpad_data = get_unpad_data
logger.info_rank0("Using block diagonal attention for sequence packing without cross-attention.")