mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-02 03:32:50 +08:00
[deps] upgrade vllm (#7183)
Former-commit-id: d739fddb10f51f422c12b1e7d5d7208309bf6c91
This commit is contained in:
parent
2b21c749c1
commit
e7556b591e
@ -393,7 +393,7 @@ huggingface-cli login
|
|||||||
| Mandatory | Minimum | Recommend |
|
| Mandatory | Minimum | Recommend |
|
||||||
| ------------ | ------- | --------- |
|
| ------------ | ------- | --------- |
|
||||||
| python | 3.9 | 3.10 |
|
| python | 3.9 | 3.10 |
|
||||||
| torch | 1.13.1 | 2.4.0 |
|
| torch | 1.13.1 | 2.5.1 |
|
||||||
| transformers | 4.41.2 | 4.49.0 |
|
| transformers | 4.41.2 | 4.49.0 |
|
||||||
| datasets | 2.16.0 | 3.2.0 |
|
| datasets | 2.16.0 | 3.2.0 |
|
||||||
| accelerate | 0.34.0 | 1.2.1 |
|
| accelerate | 0.34.0 | 1.2.1 |
|
||||||
@ -405,7 +405,7 @@ huggingface-cli login
|
|||||||
| CUDA | 11.6 | 12.2 |
|
| CUDA | 11.6 | 12.2 |
|
||||||
| deepspeed | 0.10.0 | 0.16.2 |
|
| deepspeed | 0.10.0 | 0.16.2 |
|
||||||
| bitsandbytes | 0.39.0 | 0.43.1 |
|
| bitsandbytes | 0.39.0 | 0.43.1 |
|
||||||
| vllm | 0.4.3 | 0.7.2 |
|
| vllm | 0.4.3 | 0.7.3 |
|
||||||
| flash-attn | 2.3.0 | 2.7.2 |
|
| flash-attn | 2.3.0 | 2.7.2 |
|
||||||
|
|
||||||
### Hardware Requirement
|
### Hardware Requirement
|
||||||
|
@ -395,7 +395,7 @@ huggingface-cli login
|
|||||||
| 必需项 | 至少 | 推荐 |
|
| 必需项 | 至少 | 推荐 |
|
||||||
| ------------ | ------- | --------- |
|
| ------------ | ------- | --------- |
|
||||||
| python | 3.9 | 3.10 |
|
| python | 3.9 | 3.10 |
|
||||||
| torch | 1.13.1 | 2.4.0 |
|
| torch | 1.13.1 | 2.5.1 |
|
||||||
| transformers | 4.41.2 | 4.49.0 |
|
| transformers | 4.41.2 | 4.49.0 |
|
||||||
| datasets | 2.16.0 | 3.2.0 |
|
| datasets | 2.16.0 | 3.2.0 |
|
||||||
| accelerate | 0.34.0 | 1.2.1 |
|
| accelerate | 0.34.0 | 1.2.1 |
|
||||||
@ -407,7 +407,7 @@ huggingface-cli login
|
|||||||
| CUDA | 11.6 | 12.2 |
|
| CUDA | 11.6 | 12.2 |
|
||||||
| deepspeed | 0.10.0 | 0.16.2 |
|
| deepspeed | 0.10.0 | 0.16.2 |
|
||||||
| bitsandbytes | 0.39.0 | 0.43.1 |
|
| bitsandbytes | 0.39.0 | 0.43.1 |
|
||||||
| vllm | 0.4.3 | 0.7.2 |
|
| vllm | 0.4.3 | 0.7.3 |
|
||||||
| flash-attn | 2.3.0 | 2.7.2 |
|
| flash-attn | 2.3.0 | 2.7.2 |
|
||||||
|
|
||||||
### 硬件依赖
|
### 硬件依赖
|
||||||
|
2
setup.py
2
setup.py
@ -54,7 +54,7 @@ extra_require = {
|
|||||||
"gptq": ["optimum>=1.17.0", "auto-gptq>=0.5.0"],
|
"gptq": ["optimum>=1.17.0", "auto-gptq>=0.5.0"],
|
||||||
"awq": ["autoawq"],
|
"awq": ["autoawq"],
|
||||||
"aqlm": ["aqlm[gpu]>=1.1.0"],
|
"aqlm": ["aqlm[gpu]>=1.1.0"],
|
||||||
"vllm": ["vllm>=0.4.3,<=0.7.2"],
|
"vllm": ["vllm>=0.4.3,<=0.7.3"],
|
||||||
"galore": ["galore-torch"],
|
"galore": ["galore-torch"],
|
||||||
"apollo": ["apollo-torch"],
|
"apollo": ["apollo-torch"],
|
||||||
"badam": ["badam>=1.2.1"],
|
"badam": ["badam>=1.2.1"],
|
||||||
|
@ -137,7 +137,7 @@ def _check_extra_dependencies(
|
|||||||
check_version("mixture-of-depth>=1.1.6", mandatory=True)
|
check_version("mixture-of-depth>=1.1.6", mandatory=True)
|
||||||
|
|
||||||
if model_args.infer_backend == "vllm":
|
if model_args.infer_backend == "vllm":
|
||||||
check_version("vllm>=0.4.3,<=0.7.2")
|
check_version("vllm>=0.4.3,<=0.7.3")
|
||||||
check_version("vllm", mandatory=True)
|
check_version("vllm", mandatory=True)
|
||||||
|
|
||||||
if finetuning_args.use_galore:
|
if finetuning_args.use_galore:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user