mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-04 12:42:51 +08:00
fix ppo trainer
Former-commit-id: 5431be42f9c43095d478f2250fac64ef189eb3ad
This commit is contained in:
parent
024b0b1ab2
commit
d0946f08db
@ -203,7 +203,7 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
|
|||||||
r"""
|
r"""
|
||||||
Generates model's responses given queries.
|
Generates model's responses given queries.
|
||||||
"""
|
"""
|
||||||
if self.finetuning_args.upcast_layernorm:
|
if self.model_args.upcast_layernorm:
|
||||||
layernorm_params = dump_layernorm(self.model)
|
layernorm_params = dump_layernorm(self.model)
|
||||||
|
|
||||||
if batch["input_ids"].size(0) == 1: # handle llama2 ppo with gradient accumulation > 1
|
if batch["input_ids"].size(0) == 1: # handle llama2 ppo with gradient accumulation > 1
|
||||||
@ -218,7 +218,7 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
|
|||||||
**batch
|
**batch
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.finetuning_args.upcast_layernorm:
|
if self.model_args.upcast_layernorm:
|
||||||
restore_layernorm(self.model, layernorm_params)
|
restore_layernorm(self.model, layernorm_params)
|
||||||
|
|
||||||
query = batch["input_ids"].detach().cpu()
|
query = batch["input_ids"].detach().cpu()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user