mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-15 03:10:35 +08:00
@@ -85,6 +85,10 @@ class ModelArguments:
|
||||
default=False,
|
||||
metadata={"help": "Whethor or not to use multimodal LLM that accepts visual inputs."},
|
||||
)
|
||||
tune_mm_proj: bool = field(
|
||||
default=False,
|
||||
metadata={"help": "Whethor or not only finetune mm_projector for MLLM."},
|
||||
)
|
||||
moe_aux_loss_coef: Optional[float] = field(
|
||||
default=None,
|
||||
metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."},
|
||||
|
||||
Reference in New Issue
Block a user