fix ppo save model

Former-commit-id: 7ba57d5b1469cd0de0bb391b915bedec97b20ebd
This commit is contained in:
hiyouga 2023-09-12 16:25:29 +08:00
parent 4e86462bad
commit c8780205bc

View File

@ -147,7 +147,8 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
dataiter = iter(self.dataloader)
steps_trained = 0
self.log_callback.on_train_end(
self.log_callback.on_train_end(self.args, self.state, self.control)
self.save_callback.on_train_end(
self.args, self.state, self.control, model=self.accelerator.unwrap_model(self.model)
)