mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-22 22:02:51 +08:00
Update patcher.py
Former-commit-id: 5a0c8a8d343adb15b510f65286ee08f33b1b2751
This commit is contained in:
parent
3d65c4ceab
commit
e09d68985f
@ -17,7 +17,7 @@ from .utils.moe import add_z3_leaf_module, configure_moe
|
||||
from .utils.quantization import configure_quantization
|
||||
from .utils.rope import configure_rope
|
||||
from .utils.valuehead import prepare_valuehead_model
|
||||
from .utils.visual import autocast_projector_dtype, configure_hidden_size, configure_visual
|
||||
from .utils.visual import autocast_projector_dtype, configure_visual_model
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@ -54,8 +54,7 @@ def patch_config(
|
||||
configure_longlora(config, model_args, is_trainable)
|
||||
configure_quantization(config, tokenizer, model_args, init_kwargs)
|
||||
configure_moe(config, model_args, is_trainable)
|
||||
configure_hidden_size(config)
|
||||
configure_visual(config, model_args)
|
||||
configure_visual_model(config)
|
||||
|
||||
if model_args.use_cache and not is_trainable:
|
||||
setattr(config, "use_cache", True)
|
||||
|
Loading…
x
Reference in New Issue
Block a user