From eb5a852dd55219947c258699a1ce3b355f0bdfe1 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Thu, 16 Nov 2023 02:27:03 +0800 Subject: [PATCH] fix import bug Former-commit-id: 35b91ea34caade45dd51813b94da5177b852aa4c --- src/llmtuner/model/__init__.py | 4 +- src/llmtuner/model/parser.py | 7 +-- src/llmtuner/model/utils.py | 70 +------------------------- src/llmtuner/train/dpo/workflow.py | 8 +-- src/llmtuner/train/ppo/workflow.py | 7 +-- src/llmtuner/train/utils.py | 79 ++++++++++++++++++++++++++++++ 6 files changed, 91 insertions(+), 84 deletions(-) create mode 100644 src/llmtuner/train/utils.py diff --git a/src/llmtuner/model/__init__.py b/src/llmtuner/model/__init__.py index 0c0410ba..fb9a05e7 100644 --- a/src/llmtuner/model/__init__.py +++ b/src/llmtuner/model/__init__.py @@ -1,3 +1,5 @@ +# Level: loader > adapter > parser, utils + from llmtuner.model.loader import load_model_and_tokenizer from llmtuner.model.parser import get_train_args, get_infer_args, get_eval_args -from llmtuner.model.utils import create_ref_model, create_reward_model, dispatch_model, generate_model_card +from llmtuner.model.utils import dispatch_model, generate_model_card, load_valuehead_params diff --git a/src/llmtuner/model/parser.py b/src/llmtuner/model/parser.py index a6687430..64f48e17 100644 --- a/src/llmtuner/model/parser.py +++ b/src/llmtuner/model/parser.py @@ -82,15 +82,13 @@ def get_train_args(args: Optional[Dict[str, Any]] = None) -> _TRAIN_CLS: raise ValueError("Please enable `predict_with_generate` to save model predictions.") if finetuning_args.stage in ["rm", "ppo"]: - if finetuning_args.finetuning_type != "lora": - raise ValueError("RM and PPO stages can only be performed with the LoRA method.") if training_args.resume_from_checkpoint is not None: raise ValueError("RM and PPO stages do not support `resume_from_checkpoint`.") if training_args.load_best_model_at_end: raise ValueError("RM and PPO stages do not support `load_best_model_at_end`.") if finetuning_args.stage == "ppo" and not training_args.do_train: - raise ValueError("PPO training does not support evaluation.") + raise ValueError("PPO training does not support evaluation, use the SFT stage to evaluate models.") if finetuning_args.stage in ["rm", "dpo"]: for dataset_attr in data_args.dataset_list: @@ -131,6 +129,9 @@ def get_train_args(args: Optional[Dict[str, Any]] = None) -> _TRAIN_CLS: if (not training_args.do_train) and model_args.quantization_bit is not None: logger.warning("Evaluating model in 4/8-bit mode may cause lower scores.") + if (not training_args.do_train) and finetuning_args.stage == "dpo" and finetuning_args.ref_model is None: + logger.warning("Specify `ref_model` for computing rewards at evaluation.") + # postprocess training_args if ( training_args.local_rank != -1 diff --git a/src/llmtuner/model/utils.py b/src/llmtuner/model/utils.py index 0e556c11..7badc905 100644 --- a/src/llmtuner/model/utils.py +++ b/src/llmtuner/model/utils.py @@ -1,5 +1,5 @@ import torch -from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Set, Tuple, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple from transformers.utils import cached_file from transformers.trainer import WEIGHTS_NAME, SAFE_WEIGHTS_NAME @@ -7,83 +7,15 @@ from transformers.trainer import WEIGHTS_NAME, SAFE_WEIGHTS_NAME from llmtuner.extras.constants import LAYERNORM_NAMES from llmtuner.extras.logging import get_logger from llmtuner.hparams import ModelArguments, FinetuningArguments -from llmtuner.model import load_model_and_tokenizer if TYPE_CHECKING: from transformers.modeling_utils import PreTrainedModel - from trl import AutoModelForCausalLMWithValueHead from llmtuner.hparams import DataArguments logger = get_logger(__name__) -def create_ref_model( - model_args: "ModelArguments", - finetuning_args: "FinetuningArguments", - stage: Literal["ppo", "dpo"] -) -> Union["PreTrainedModel", "AutoModelForCausalLMWithValueHead"]: - r""" - Creates reference model for PPO/DPO training. Evaluation mode is not supported. - - The valuehead parameter is randomly initialized since it is useless for PPO training. - """ - if finetuning_args.ref_model is not None: - ref_model_args_dict = model_args.to_dict() - ref_model_args_dict.update(dict( - model_name_or_path=finetuning_args.ref_model, - checkpoint_dir=finetuning_args.ref_model_checkpoint, - quantization_bit=finetuning_args.ref_model_quantization_bit - )) - ref_model_args = ModelArguments(**ref_model_args_dict) - ref_finetuning_args = FinetuningArguments(finetuning_type="lora") - ref_model, _ = load_model_and_tokenizer(ref_model_args, ref_finetuning_args, is_trainable=False, stage=stage) - logger.info("Created reference model from {}".format(finetuning_args.ref_model)) - else: - if finetuning_args.finetuning_type == "lora": - ref_model = None - else: - ref_model, _ = load_model_and_tokenizer(model_args, finetuning_args, is_trainable=False, stage=stage) - logger.info("Created reference model from the model itself.") - - return ref_model - - -def create_reward_model( - model: "AutoModelForCausalLMWithValueHead", - model_args: "ModelArguments", - finetuning_args: "FinetuningArguments" -) -> "AutoModelForCausalLMWithValueHead": - r""" - Creates reward model for PPO training. - """ - if finetuning_args.reward_model_type == "lora": - model.pretrained_model.load_adapter(finetuning_args.reward_model, "reward") - for name, param in model.named_parameters(): # https://github.com/huggingface/peft/issues/1090 - if "default" in name: - param.data = param.data.to(torch.float32) # trainable params should in fp32 - vhead_params = load_valuehead_params(model_args.checkpoint_dir[-1], model_args) - assert vhead_params is not None, "Reward model is not correctly loaded." - model.register_buffer("reward_head_weight", vhead_params["v_head.summary.weight"], persistent=False) - model.register_buffer("reward_head_bias", vhead_params["v_head.summary.bias"], persistent=False) - model.register_buffer("default_head_weight", torch.zeros_like(vhead_params["v_head.summary.weight"]), persistent=False) - model.register_buffer("default_head_bias", torch.zeros_like(vhead_params["v_head.summary.bias"]), persistent=False) - logger.info("Loaded adapter weights of reward model from {}".format(finetuning_args.reward_model)) - return None - else: - reward_model_args_dict = model_args.to_dict() - reward_model_args_dict.update(dict( - model_name_or_path=finetuning_args.reward_model, - checkpoint_dir=finetuning_args.reward_model_checkpoint, - quantization_bit=finetuning_args.reward_model_quantization_bit - )) - reward_model_args = ModelArguments(**reward_model_args_dict) - reward_finetuning_args = FinetuningArguments(finetuning_type="lora") - reward_model, _ = load_model_and_tokenizer(reward_model_args, reward_finetuning_args, is_trainable=False, stage="ppo") - logger.info("Load full weights of reward model from {}".format(finetuning_args.reward_model)) - return reward_model - - def dispatch_model(model: "PreTrainedModel") -> "PreTrainedModel": r""" Dispatches a pre-trained model to GPUs with balanced memory. diff --git a/src/llmtuner/train/dpo/workflow.py b/src/llmtuner/train/dpo/workflow.py index 0bd2a2ed..5281f4e4 100644 --- a/src/llmtuner/train/dpo/workflow.py +++ b/src/llmtuner/train/dpo/workflow.py @@ -6,10 +6,10 @@ from transformers import Seq2SeqTrainingArguments from llmtuner.data import get_dataset, preprocess_dataset, split_dataset from llmtuner.extras.constants import IGNORE_INDEX -from llmtuner.extras.logging import get_logger from llmtuner.extras.ploting import plot_loss from llmtuner.hparams import ModelArguments -from llmtuner.model import create_ref_model, generate_model_card, load_model_and_tokenizer +from llmtuner.model import generate_model_card, load_model_and_tokenizer +from llmtuner.train.utils import create_ref_model from llmtuner.train.dpo.collator import DPODataCollatorWithPadding from llmtuner.train.dpo.trainer import CustomDPOTrainer @@ -18,9 +18,6 @@ if TYPE_CHECKING: from llmtuner.hparams import DataArguments, FinetuningArguments -logger = get_logger(__name__) - - def run_dpo( model_args: "ModelArguments", data_args: "DataArguments", @@ -74,7 +71,6 @@ def run_dpo( if training_args.do_eval: metrics = trainer.evaluate(metric_key_prefix="eval") if id(model) == id(ref_model): # unable to compute rewards without a reference model - logger.warning("Specify `ref_model` for computing rewards at evaluation.") remove_keys = [key for key in metrics.keys() if "rewards" in key] for key in remove_keys: metrics.pop(key) diff --git a/src/llmtuner/train/ppo/workflow.py b/src/llmtuner/train/ppo/workflow.py index 3448efa5..41a99e2c 100644 --- a/src/llmtuner/train/ppo/workflow.py +++ b/src/llmtuner/train/ppo/workflow.py @@ -9,9 +9,9 @@ from transformers.optimization import get_scheduler from llmtuner.data import get_dataset, preprocess_dataset from llmtuner.extras.callbacks import SavePeftModelCallback -from llmtuner.extras.logging import get_logger from llmtuner.extras.ploting import plot_loss -from llmtuner.model import create_ref_model, create_reward_model, load_model_and_tokenizer +from llmtuner.model import load_model_and_tokenizer +from llmtuner.train.utils import create_ref_model, create_reward_model from llmtuner.train.ppo.trainer import CustomPPOTrainer if TYPE_CHECKING: @@ -19,9 +19,6 @@ if TYPE_CHECKING: from llmtuner.hparams import ModelArguments, DataArguments, FinetuningArguments, GeneratingArguments -logger = get_logger(__name__) - - def run_ppo( model_args: "ModelArguments", data_args: "DataArguments", diff --git a/src/llmtuner/train/utils.py b/src/llmtuner/train/utils.py new file mode 100644 index 00000000..70307b18 --- /dev/null +++ b/src/llmtuner/train/utils.py @@ -0,0 +1,79 @@ +import torch +from typing import TYPE_CHECKING, Literal, Union + +from llmtuner.extras.logging import get_logger +from llmtuner.hparams import ModelArguments, FinetuningArguments +from llmtuner.model import load_model_and_tokenizer, load_valuehead_params + +if TYPE_CHECKING: + from transformers.modeling_utils import PreTrainedModel + from trl import AutoModelForCausalLMWithValueHead + + +logger = get_logger(__name__) + + +def create_ref_model( + model_args: "ModelArguments", + finetuning_args: "FinetuningArguments", + stage: Literal["ppo", "dpo"] +) -> Union["PreTrainedModel", "AutoModelForCausalLMWithValueHead"]: + r""" + Creates reference model for PPO/DPO training. Evaluation mode is not supported. + + The valuehead parameter is randomly initialized since it is useless for PPO training. + """ + if finetuning_args.ref_model is not None: + ref_model_args_dict = model_args.to_dict() + ref_model_args_dict.update(dict( + model_name_or_path=finetuning_args.ref_model, + checkpoint_dir=finetuning_args.ref_model_checkpoint, + quantization_bit=finetuning_args.ref_model_quantization_bit + )) + ref_model_args = ModelArguments(**ref_model_args_dict) + ref_finetuning_args = FinetuningArguments(finetuning_type="lora") + ref_model, _ = load_model_and_tokenizer(ref_model_args, ref_finetuning_args, is_trainable=False, stage=stage) + logger.info("Created reference model from {}".format(finetuning_args.ref_model)) + else: + if finetuning_args.finetuning_type == "lora": + ref_model = None + else: + ref_model, _ = load_model_and_tokenizer(model_args, finetuning_args, is_trainable=False, stage=stage) + logger.info("Created reference model from the model itself.") + + return ref_model + + +def create_reward_model( + model: "AutoModelForCausalLMWithValueHead", + model_args: "ModelArguments", + finetuning_args: "FinetuningArguments" +) -> "AutoModelForCausalLMWithValueHead": + r""" + Creates reward model for PPO training. + """ + if finetuning_args.reward_model_type == "lora": + model.pretrained_model.load_adapter(finetuning_args.reward_model, "reward") + for name, param in model.named_parameters(): # https://github.com/huggingface/peft/issues/1090 + if "default" in name: + param.data = param.data.to(torch.float32) # trainable params should in fp32 + vhead_params = load_valuehead_params(model_args.checkpoint_dir[-1], model_args) + assert vhead_params is not None, "Reward model is not correctly loaded." + model.register_buffer("reward_head_weight", vhead_params["v_head.summary.weight"], persistent=False) + model.register_buffer("reward_head_bias", vhead_params["v_head.summary.bias"], persistent=False) + model.register_buffer("default_head_weight", torch.zeros_like(vhead_params["v_head.summary.weight"]), persistent=False) + model.register_buffer("default_head_bias", torch.zeros_like(vhead_params["v_head.summary.bias"]), persistent=False) + logger.info("Loaded adapter weights of reward model from {}".format(finetuning_args.reward_model)) + return None + else: + reward_model_args_dict = model_args.to_dict() + reward_model_args_dict.update(dict( + model_name_or_path=finetuning_args.reward_model, + checkpoint_dir=finetuning_args.reward_model_checkpoint, + quantization_bit=finetuning_args.reward_model_quantization_bit + )) + reward_model_args = ModelArguments(**reward_model_args_dict) + reward_finetuning_args = FinetuningArguments(finetuning_type="lora") + reward_model, _ = load_model_and_tokenizer(reward_model_args, reward_finetuning_args, is_trainable=False, stage="ppo") + logger.info("Load full weights of reward model from {}".format(finetuning_args.reward_model)) + return reward_model