mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-14 19:06:26 +08:00
fix #4139
This commit is contained in:
@@ -239,7 +239,7 @@ def init_adapter(
|
|||||||
)
|
)
|
||||||
model = get_peft_model(model, lora_config)
|
model = get_peft_model(model, lora_config)
|
||||||
|
|
||||||
if cast_trainable_params_to_fp32:
|
if is_trainable and cast_trainable_params_to_fp32:
|
||||||
for param in filter(lambda p: p.requires_grad, model.parameters()):
|
for param in filter(lambda p: p.requires_grad, model.parameters()):
|
||||||
param.data = param.data.to(torch.float32)
|
param.data = param.data.to(torch.float32)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user