From d7130ec63529b6bca3e5d04780bdf74c251ddabc Mon Sep 17 00:00:00 2001 From: hiyouga <467089858@qq.com> Date: Wed, 10 Jul 2024 11:05:45 +0800 Subject: [PATCH] fix ppo trainer Former-commit-id: fb0c40011689b3ae84cc3b258bf3c66af3e1e430 --- src/llamafactory/train/ppo/trainer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/llamafactory/train/ppo/trainer.py b/src/llamafactory/train/ppo/trainer.py index 6a05b704..31d461e3 100644 --- a/src/llamafactory/train/ppo/trainer.py +++ b/src/llamafactory/train/ppo/trainer.py @@ -106,7 +106,8 @@ class CustomPPOTrainer(PPOTrainer, Trainer): DistributedDataParallelKwargs(find_unused_parameters=training_args.ddp_find_unused_parameters) ] ppo_config.accelerator_kwargs["deepspeed_plugin"] = training_args.deepspeed_plugin - if ppo_config.log_with == "tensorboard": # tensorboard raises error about accelerator_kwargs + if ppo_config.log_with is not None: + logger.warning("PPOTrainer cannot use external logger when DeepSpeed is enabled.") ppo_config.log_with = None # Create optimizer and scheduler