From f17ab6ad926b2a36db492531c61a356ac8d0112e Mon Sep 17 00:00:00 2001 From: hiyouga <467089858@qq.com> Date: Tue, 2 Jul 2024 23:06:13 +0800 Subject: [PATCH] tiny fix Former-commit-id: 98c4a0af6b3e27ae393d2847f48a01d23d9c8780 --- src/llamafactory/train/ppo/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llamafactory/train/ppo/trainer.py b/src/llamafactory/train/ppo/trainer.py index 7e0c0111..37d9d37e 100644 --- a/src/llamafactory/train/ppo/trainer.py +++ b/src/llamafactory/train/ppo/trainer.py @@ -393,7 +393,7 @@ class CustomPPOTrainer(PPOTrainer, Trainer): else: reward_model = self.reward_model - with unwrap_model_for_generation(reward_model, self.accelerator), self.amp_context: # support bf16 + with self.amp_context: # support bf16 _, _, values = reward_model(**batch, return_dict=True, use_cache=False) if self.finetuning_args.reward_model_type == "lora":