fix ppo save model

Former-commit-id: 300ca6d904524f46cb520056e1319a1e9a13d169
This commit is contained in:
hiyouga 2023-09-12 16:25:29 +08:00
parent e19a44c12b
commit 3305e66f8c

View File

@ -147,7 +147,8 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
dataiter = iter(self.dataloader) dataiter = iter(self.dataloader)
steps_trained = 0 steps_trained = 0
self.log_callback.on_train_end( self.log_callback.on_train_end(self.args, self.state, self.control)
self.save_callback.on_train_end(
self.args, self.state, self.control, model=self.accelerator.unwrap_model(self.model) self.args, self.state, self.control, model=self.accelerator.unwrap_model(self.model)
) )