support vllm

This commit is contained in:
hiyouga
2024-03-07 20:26:31 +08:00
parent f74f804a71
commit d07ad5cc1c
32 changed files with 752 additions and 316 deletions

View File

@@ -157,6 +157,12 @@ def get_current_device() -> torch.device:
def get_device_count() -> int:
r"""
Gets the number of available GPU devices.
"""
if not torch.cuda.is_available():
return 0
return torch.cuda.device_count()

View File

@@ -51,3 +51,7 @@ def is_unsloth_available():
def is_uvicorn_available():
return _is_package_available("uvicorn")
def is_vllm_available():
return _is_package_available("vllm")