fix bug in freeze tuning

Former-commit-id: ff52b1779c
This commit is contained in:
hiyouga
2023-11-16 14:25:11 +08:00
parent 627212e48b
commit 0ed0b8f9c5
2 changed files with 8 additions and 1 deletions

View File

@@ -76,4 +76,5 @@ def create_reward_model(
reward_finetuning_args = FinetuningArguments(finetuning_type="lora")
reward_model, _ = load_model_and_tokenizer(reward_model_args, reward_finetuning_args, is_trainable=False, stage="ppo")
logger.info("Load full weights of reward model from {}".format(finetuning_args.reward_model))
logger.warning("Please ensure the ppo model and reward model share SAME tokenizer and vocabulary.")
return reward_model