From 28e613efd01599ac8ce3820f6dbb964235c0cf86 Mon Sep 17 00:00:00 2001 From: hiyouga <467089858@qq.com> Date: Wed, 26 Jun 2024 19:52:35 +0800 Subject: [PATCH] fix #4458 Former-commit-id: 8d6cd69ac43afd4bd7c14bd02b0061455827ac9e --- src/llamafactory/train/ppo/trainer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/llamafactory/train/ppo/trainer.py b/src/llamafactory/train/ppo/trainer.py index 70d01919..c5f6e175 100644 --- a/src/llamafactory/train/ppo/trainer.py +++ b/src/llamafactory/train/ppo/trainer.py @@ -99,10 +99,10 @@ class CustomPPOTrainer(PPOTrainer, Trainer): ) # Add deepspeed config - ppo_config.accelerator_kwargs["kwargs_handlers"] = [ - DistributedDataParallelKwargs(find_unused_parameters=training_args.ddp_find_unused_parameters) - ] if training_args.deepspeed_plugin is not None: + ppo_config.accelerator_kwargs["kwargs_handlers"] = [ + DistributedDataParallelKwargs(find_unused_parameters=training_args.ddp_find_unused_parameters) + ] ppo_config.accelerator_kwargs["deepspeed_plugin"] = training_args.deepspeed_plugin # Create optimizer and scheduler