From 53d7c5109f9b45cd4c4a46421121bd73b1d4ac9c Mon Sep 17 00:00:00 2001 From: hiyouga Date: Thu, 28 Dec 2023 18:09:28 +0800 Subject: [PATCH] fix ppo trainer Former-commit-id: ca5b5823b03822ef899405d233a82396be997f44 --- src/llmtuner/train/ppo/trainer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llmtuner/train/ppo/trainer.py b/src/llmtuner/train/ppo/trainer.py index 0c6af1d2..ec9e3fdf 100644 --- a/src/llmtuner/train/ppo/trainer.py +++ b/src/llmtuner/train/ppo/trainer.py @@ -203,7 +203,7 @@ class CustomPPOTrainer(PPOTrainer, Trainer): r""" Generates model's responses given queries. """ - if self.finetuning_args.upcast_layernorm: + if self.model_args.upcast_layernorm: layernorm_params = dump_layernorm(self.model) if batch["input_ids"].size(0) == 1: # handle llama2 ppo with gradient accumulation > 1 @@ -218,7 +218,7 @@ class CustomPPOTrainer(PPOTrainer, Trainer): **batch ) - if self.finetuning_args.upcast_layernorm: + if self.model_args.upcast_layernorm: restore_layernorm(self.model, layernorm_params) query = batch["input_ids"].detach().cpu()