mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-18 04:40:35 +08:00
fix llava config
This commit is contained in:
@@ -46,6 +46,9 @@ def init_adapter(
|
||||
if (not finetuning_args.pure_bf16) and (not finetuning_args.use_badam):
|
||||
model = model.float()
|
||||
|
||||
if model_args.visual_inputs and hasattr(model, "vision_tower"): # freeze vision model
|
||||
model.vision_tower.requires_grad_(False)
|
||||
|
||||
if finetuning_args.finetuning_type == "freeze" and is_trainable:
|
||||
logger.info("Fine-tuning method: Freeze")
|
||||
num_layers = (
|
||||
|
||||
Reference in New Issue
Block a user