mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-10-16 00:28:10 +08:00
Update model_args.py
Former-commit-id: f40a2fe5334865763e4d513292d359317b7a091b
This commit is contained in:
parent
6373d307ec
commit
0fd1a05cec
@ -117,7 +117,10 @@ class ModelArguments:
|
|||||||
default=False,
|
default=False,
|
||||||
metadata={"help": "Whether or not to disable CUDA graph in the vLLM engine."},
|
metadata={"help": "Whether or not to disable CUDA graph in the vLLM engine."},
|
||||||
)
|
)
|
||||||
vllm_max_lora_rank: int = field(default=8, metadata={"help": "The maximum supported rank of all LoRAs."})
|
vllm_max_lora_rank: int = field(
|
||||||
|
default=8,
|
||||||
|
metadata={"help": "Maximum rank of all LoRAs in the vLLM engine."},
|
||||||
|
)
|
||||||
offload_folder: str = field(
|
offload_folder: str = field(
|
||||||
default="offload",
|
default="offload",
|
||||||
metadata={"help": "Path to offload model weights."},
|
metadata={"help": "Path to offload model weights."},
|
||||||
|
Loading…
x
Reference in New Issue
Block a user