add deepspeed check in PPO training

This commit is contained in:
hiyouga
2023-09-07 19:12:40 +08:00
parent e2bf7c3bad
commit ed1c2c5557
2 changed files with 6 additions and 0 deletions

View File

@@ -119,6 +119,9 @@ def get_train_args(
if general_args.stage == "ppo" and model_args.reward_model is None:
raise ValueError("Reward model is necessary for PPO training.")
if general_args.stage == "ppo" and training_args.deepspeed is not None:
raise ValueError("PPO training is incompatible with DeepSpeed, use Accelerate instead.")
if general_args.stage == "ppo" and data_args.streaming:
raise ValueError("Streaming mode does not suppport PPO training currently.")