mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-10-14 23:58:11 +08:00
tiny fix
Former-commit-id: 08f7e0862b9df353a0e4d8274617c1a5e6fa6619
This commit is contained in:
parent
1fc551e1be
commit
83fc73c580
@ -157,8 +157,8 @@ class PPOTrainerForLLaMA(PPOTrainer, PeftTrainer):
|
|||||||
|
|
||||||
stats = self.step(queries, responses, rewards)
|
stats = self.step(queries, responses, rewards)
|
||||||
|
|
||||||
loss_meter.update(stats["ppo/loss/total"])
|
loss_meter.update(stats["ppo/loss/total"], n=len(rewards))
|
||||||
reward_meter.update(torch.tensor(rewards).sum().item(), n=len(rewards))
|
reward_meter.update(torch.stack(rewards).mean().item(), n=len(rewards))
|
||||||
|
|
||||||
if steps_trained == len_dataloader:
|
if steps_trained == len_dataloader:
|
||||||
dataiter = iter(self.dataloader)
|
dataiter = iter(self.dataloader)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user