mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-22 13:42:51 +08:00
fix #4609
unwrap_model_for_generation(reward_model) is necessary for zero3 training Former-commit-id: 8845e94f917b503bbee0604d7290efea7260a30c
This commit is contained in:
parent
0d438e5cf4
commit
3595d98b4c
@ -393,7 +393,7 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
|
|||||||
else:
|
else:
|
||||||
reward_model = self.reward_model
|
reward_model = self.reward_model
|
||||||
|
|
||||||
with self.amp_context: # support bf16
|
with unwrap_model_for_generation(reward_model, self.accelerator), self.amp_context: # support bf16
|
||||||
_, _, values = reward_model(**batch, return_dict=True, use_cache=False)
|
_, _, values = reward_model(**batch, return_dict=True, use_cache=False)
|
||||||
|
|
||||||
if self.finetuning_args.reward_model_type == "lora":
|
if self.finetuning_args.reward_model_type == "lora":
|
||||||
@ -496,4 +496,5 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
|
|||||||
self.model.save_checkpoint(output_dir)
|
self.model.save_checkpoint(output_dir)
|
||||||
|
|
||||||
elif self.args.should_save:
|
elif self.args.should_save:
|
||||||
self._save(output_dir)
|
unwrapped_model: "AutoModelForCausalLMWithValueHead" = self.accelerator.unwrap_model(self.model)
|
||||||
|
self._save(output_dir, state_dict=unwrapped_model.state_dict())
|
||||||
|
Loading…
x
Reference in New Issue
Block a user