mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2026-01-02 12:10:34 +08:00
@@ -85,10 +85,6 @@ class ModelArguments:
|
|||||||
default=False,
|
default=False,
|
||||||
metadata={"help": "Whethor or not to use multimodal LLM that accepts visual inputs."},
|
metadata={"help": "Whethor or not to use multimodal LLM that accepts visual inputs."},
|
||||||
)
|
)
|
||||||
autocast_projector: bool = field(
|
|
||||||
default=True,
|
|
||||||
metadata={"help": "Whethor or not to autocast projector."},
|
|
||||||
)
|
|
||||||
moe_aux_loss_coef: Optional[float] = field(
|
moe_aux_loss_coef: Optional[float] = field(
|
||||||
default=None,
|
default=None,
|
||||||
metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."},
|
metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."},
|
||||||
|
|||||||
Reference in New Issue
Block a user