mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-10-15 08:08:09 +08:00
fix bug
Former-commit-id: 2fd7a8fc3134af66193a5e8db8fea35025f82de9
This commit is contained in:
parent
60aea7521b
commit
09f165d442
@ -66,7 +66,7 @@ class CustomPPOTrainer(PPOTrainer, Trainer):
|
||||
if self.args.max_steps > 0:
|
||||
logger.info("max_steps is given, it will override any value given in num_train_epochs")
|
||||
|
||||
if reward_model is not None:
|
||||
if finetuning_args.reward_model_type == "full":
|
||||
if self.is_deepspeed_enabled:
|
||||
if not (
|
||||
getattr(reward_model.pretrained_model, "is_loaded_in_8bit", False)
|
||||
|
Loading…
x
Reference in New Issue
Block a user