mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-11-06 02:42:15 +08:00
Update model_args.py
Former-commit-id: f40a2fe5334865763e4d513292d359317b7a091b
This commit is contained in:
parent
e47e835e4e
commit
db2ccb2d36
@ -117,7 +117,10 @@ class ModelArguments:
|
|||||||
default=False,
|
default=False,
|
||||||
metadata={"help": "Whether or not to disable CUDA graph in the vLLM engine."},
|
metadata={"help": "Whether or not to disable CUDA graph in the vLLM engine."},
|
||||||
)
|
)
|
||||||
vllm_max_lora_rank: int = field(default=8, metadata={"help": "The maximum supported rank of all LoRAs."})
|
vllm_max_lora_rank: int = field(
|
||||||
|
default=8,
|
||||||
|
metadata={"help": "Maximum rank of all LoRAs in the vLLM engine."},
|
||||||
|
)
|
||||||
offload_folder: str = field(
|
offload_folder: str = field(
|
||||||
default="offload",
|
default="offload",
|
||||||
metadata={"help": "Path to offload model weights."},
|
metadata={"help": "Path to offload model weights."},
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user