mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-02 03:32:50 +08:00
188 lines
7.3 KiB
Python
188 lines
7.3 KiB
Python
from dataclasses import asdict, dataclass, field
|
|
from typing import Any, Dict, Literal, Optional
|
|
|
|
|
|
@dataclass
|
|
class ModelArguments:
|
|
r"""
|
|
Arguments pertaining to which model/config/tokenizer we are going to fine-tune or infer.
|
|
"""
|
|
|
|
model_name_or_path: str = field(
|
|
metadata={
|
|
"help": "Path to the model weight or identifier from huggingface.co/models or modelscope.cn/models."
|
|
},
|
|
)
|
|
adapter_name_or_path: Optional[str] = field(
|
|
default=None,
|
|
metadata={"help": "Path to the adapter weight or identifier from huggingface.co/models."},
|
|
)
|
|
cache_dir: Optional[str] = field(
|
|
default=None,
|
|
metadata={"help": "Where to store the pre-trained models downloaded from huggingface.co or modelscope.cn."},
|
|
)
|
|
use_fast_tokenizer: bool = field(
|
|
default=False,
|
|
metadata={"help": "Whether or not to use one of the fast tokenizer (backed by the tokenizers library)."},
|
|
)
|
|
resize_vocab: bool = field(
|
|
default=False,
|
|
metadata={"help": "Whether or not to resize the tokenizer vocab and the embedding layers."},
|
|
)
|
|
split_special_tokens: bool = field(
|
|
default=False,
|
|
metadata={"help": "Whether or not the special tokens should be split during the tokenization process."},
|
|
)
|
|
model_revision: str = field(
|
|
default="main",
|
|
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
|
|
)
|
|
low_cpu_mem_usage: bool = field(
|
|
default=True,
|
|
metadata={"help": "Whether or not to use memory-efficient model loading."},
|
|
)
|
|
quantization_bit: Optional[int] = field(
|
|
default=None,
|
|
metadata={"help": "The number of bits to quantize the model using bitsandbytes."},
|
|
)
|
|
quantization_type: Literal["fp4", "nf4"] = field(
|
|
default="nf4",
|
|
metadata={"help": "Quantization data type to use in int4 training."},
|
|
)
|
|
double_quantization: bool = field(
|
|
default=True,
|
|
metadata={"help": "Whether or not to use double quantization in int4 training."},
|
|
)
|
|
quantization_device_map: Optional[Literal["auto"]] = field(
|
|
default=None,
|
|
metadata={"help": "Device map used to infer the 4-bit quantized model, needs bitsandbytes>=0.43.0."},
|
|
)
|
|
rope_scaling: Optional[Literal["linear", "dynamic"]] = field(
|
|
default=None,
|
|
metadata={"help": "Which scaling strategy should be adopted for the RoPE embeddings."},
|
|
)
|
|
flash_attn: bool = field(
|
|
default=False,
|
|
metadata={"help": "Enable FlashAttention for faster training."},
|
|
)
|
|
shift_attn: bool = field(
|
|
default=False,
|
|
metadata={"help": "Enable shift short attention (S^2-Attn) proposed by LongLoRA."},
|
|
)
|
|
mixture_of_depths: Optional[Literal["convert", "load"]] = field(
|
|
default=None,
|
|
metadata={"help": "Convert the model to mixture-of-depths (MoD) or load the MoD model."},
|
|
)
|
|
use_unsloth: bool = field(
|
|
default=False,
|
|
metadata={"help": "Whether or not to use unsloth's optimization for the LoRA training."},
|
|
)
|
|
moe_aux_loss_coef: Optional[float] = field(
|
|
default=None,
|
|
metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."},
|
|
)
|
|
disable_gradient_checkpointing: bool = field(
|
|
default=False,
|
|
metadata={"help": "Whether or not to disable gradient checkpointing."},
|
|
)
|
|
upcast_layernorm: bool = field(
|
|
default=False,
|
|
metadata={"help": "Whether or not to upcast the layernorm weights in fp32."},
|
|
)
|
|
upcast_lmhead_output: bool = field(
|
|
default=False,
|
|
metadata={"help": "Whether or not to upcast the output of lm_head in fp32."},
|
|
)
|
|
infer_backend: Literal["huggingface", "vllm"] = field(
|
|
default="huggingface",
|
|
metadata={"help": "Backend engine used at inference."},
|
|
)
|
|
vllm_maxlen: int = field(
|
|
default=2048,
|
|
metadata={"help": "Maximum input length of the vLLM engine."},
|
|
)
|
|
vllm_gpu_util: float = field(
|
|
default=0.9,
|
|
metadata={"help": "The fraction of GPU memory in (0,1) to be used for the vLLM engine."},
|
|
)
|
|
vllm_enforce_eager: bool = field(
|
|
default=False,
|
|
metadata={"help": "Whether or not to disable CUDA graph in the vLLM engine."},
|
|
)
|
|
offload_folder: str = field(
|
|
default="offload",
|
|
metadata={"help": "Path to offload model weights."},
|
|
)
|
|
use_cache: bool = field(
|
|
default=True,
|
|
metadata={"help": "Whether or not to use KV cache in generation."},
|
|
)
|
|
hf_hub_token: Optional[str] = field(
|
|
default=None,
|
|
metadata={"help": "Auth token to log in with Hugging Face Hub."},
|
|
)
|
|
ms_hub_token: Optional[str] = field(
|
|
default=None,
|
|
metadata={"help": "Auth token to log in with ModelScope Hub."},
|
|
)
|
|
export_dir: Optional[str] = field(
|
|
default=None,
|
|
metadata={"help": "Path to the directory to save the exported model."},
|
|
)
|
|
export_size: int = field(
|
|
default=1,
|
|
metadata={"help": "The file shard size (in GB) of the exported model."},
|
|
)
|
|
export_device: str = field(
|
|
default="cpu",
|
|
metadata={"help": "The device used in model export."},
|
|
)
|
|
export_quantization_bit: Optional[int] = field(
|
|
default=None,
|
|
metadata={"help": "The number of bits to quantize the exported model."},
|
|
)
|
|
export_quantization_dataset: Optional[str] = field(
|
|
default=None,
|
|
metadata={"help": "Path to the dataset or dataset name to use in quantizing the exported model."},
|
|
)
|
|
export_quantization_nsamples: int = field(
|
|
default=128,
|
|
metadata={"help": "The number of samples used for quantization."},
|
|
)
|
|
export_quantization_maxlen: int = field(
|
|
default=1024,
|
|
metadata={"help": "The maximum length of the model inputs used for quantization."},
|
|
)
|
|
export_legacy_format: bool = field(
|
|
default=False,
|
|
metadata={"help": "Whether or not to save the `.bin` files instead of `.safetensors`."},
|
|
)
|
|
export_hub_model_id: Optional[str] = field(
|
|
default=None,
|
|
metadata={"help": "The name of the repository if push the model to the Hugging Face hub."},
|
|
)
|
|
print_param_status: bool = field(
|
|
default=False,
|
|
metadata={"help": "For debugging purposes, print the status of the parameters in the model."},
|
|
)
|
|
|
|
def __post_init__(self):
|
|
self.compute_dtype = None
|
|
self.device_map = None
|
|
self.model_max_length = None
|
|
|
|
if self.split_special_tokens and self.use_fast_tokenizer:
|
|
raise ValueError("`split_special_tokens` is only supported for slow tokenizers.")
|
|
|
|
if self.adapter_name_or_path is not None: # support merging multiple lora weights
|
|
self.adapter_name_or_path = [path.strip() for path in self.adapter_name_or_path.split(",")]
|
|
|
|
assert self.quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization."
|
|
assert self.export_quantization_bit in [None, 8, 4, 3, 2], "We only accept 2/3/4/8-bit quantization."
|
|
|
|
if self.export_quantization_bit is not None and self.export_quantization_dataset is None:
|
|
raise ValueError("Quantization dataset is necessary for exporting.")
|
|
|
|
def to_dict(self) -> Dict[str, Any]:
|
|
return asdict(self)
|