mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-04 04:32:50 +08:00
fix import bug
Former-commit-id: 35b91ea34caade45dd51813b94da5177b852aa4c
This commit is contained in:
parent
f441932bd1
commit
eb5a852dd5
@ -1,3 +1,5 @@
|
|||||||
|
# Level: loader > adapter > parser, utils
|
||||||
|
|
||||||
from llmtuner.model.loader import load_model_and_tokenizer
|
from llmtuner.model.loader import load_model_and_tokenizer
|
||||||
from llmtuner.model.parser import get_train_args, get_infer_args, get_eval_args
|
from llmtuner.model.parser import get_train_args, get_infer_args, get_eval_args
|
||||||
from llmtuner.model.utils import create_ref_model, create_reward_model, dispatch_model, generate_model_card
|
from llmtuner.model.utils import dispatch_model, generate_model_card, load_valuehead_params
|
||||||
|
@ -82,15 +82,13 @@ def get_train_args(args: Optional[Dict[str, Any]] = None) -> _TRAIN_CLS:
|
|||||||
raise ValueError("Please enable `predict_with_generate` to save model predictions.")
|
raise ValueError("Please enable `predict_with_generate` to save model predictions.")
|
||||||
|
|
||||||
if finetuning_args.stage in ["rm", "ppo"]:
|
if finetuning_args.stage in ["rm", "ppo"]:
|
||||||
if finetuning_args.finetuning_type != "lora":
|
|
||||||
raise ValueError("RM and PPO stages can only be performed with the LoRA method.")
|
|
||||||
if training_args.resume_from_checkpoint is not None:
|
if training_args.resume_from_checkpoint is not None:
|
||||||
raise ValueError("RM and PPO stages do not support `resume_from_checkpoint`.")
|
raise ValueError("RM and PPO stages do not support `resume_from_checkpoint`.")
|
||||||
if training_args.load_best_model_at_end:
|
if training_args.load_best_model_at_end:
|
||||||
raise ValueError("RM and PPO stages do not support `load_best_model_at_end`.")
|
raise ValueError("RM and PPO stages do not support `load_best_model_at_end`.")
|
||||||
|
|
||||||
if finetuning_args.stage == "ppo" and not training_args.do_train:
|
if finetuning_args.stage == "ppo" and not training_args.do_train:
|
||||||
raise ValueError("PPO training does not support evaluation.")
|
raise ValueError("PPO training does not support evaluation, use the SFT stage to evaluate models.")
|
||||||
|
|
||||||
if finetuning_args.stage in ["rm", "dpo"]:
|
if finetuning_args.stage in ["rm", "dpo"]:
|
||||||
for dataset_attr in data_args.dataset_list:
|
for dataset_attr in data_args.dataset_list:
|
||||||
@ -131,6 +129,9 @@ def get_train_args(args: Optional[Dict[str, Any]] = None) -> _TRAIN_CLS:
|
|||||||
if (not training_args.do_train) and model_args.quantization_bit is not None:
|
if (not training_args.do_train) and model_args.quantization_bit is not None:
|
||||||
logger.warning("Evaluating model in 4/8-bit mode may cause lower scores.")
|
logger.warning("Evaluating model in 4/8-bit mode may cause lower scores.")
|
||||||
|
|
||||||
|
if (not training_args.do_train) and finetuning_args.stage == "dpo" and finetuning_args.ref_model is None:
|
||||||
|
logger.warning("Specify `ref_model` for computing rewards at evaluation.")
|
||||||
|
|
||||||
# postprocess training_args
|
# postprocess training_args
|
||||||
if (
|
if (
|
||||||
training_args.local_rank != -1
|
training_args.local_rank != -1
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import torch
|
import torch
|
||||||
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Set, Tuple, Union
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
|
||||||
|
|
||||||
from transformers.utils import cached_file
|
from transformers.utils import cached_file
|
||||||
from transformers.trainer import WEIGHTS_NAME, SAFE_WEIGHTS_NAME
|
from transformers.trainer import WEIGHTS_NAME, SAFE_WEIGHTS_NAME
|
||||||
@ -7,83 +7,15 @@ from transformers.trainer import WEIGHTS_NAME, SAFE_WEIGHTS_NAME
|
|||||||
from llmtuner.extras.constants import LAYERNORM_NAMES
|
from llmtuner.extras.constants import LAYERNORM_NAMES
|
||||||
from llmtuner.extras.logging import get_logger
|
from llmtuner.extras.logging import get_logger
|
||||||
from llmtuner.hparams import ModelArguments, FinetuningArguments
|
from llmtuner.hparams import ModelArguments, FinetuningArguments
|
||||||
from llmtuner.model import load_model_and_tokenizer
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from transformers.modeling_utils import PreTrainedModel
|
from transformers.modeling_utils import PreTrainedModel
|
||||||
from trl import AutoModelForCausalLMWithValueHead
|
|
||||||
from llmtuner.hparams import DataArguments
|
from llmtuner.hparams import DataArguments
|
||||||
|
|
||||||
|
|
||||||
logger = get_logger(__name__)
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def create_ref_model(
|
|
||||||
model_args: "ModelArguments",
|
|
||||||
finetuning_args: "FinetuningArguments",
|
|
||||||
stage: Literal["ppo", "dpo"]
|
|
||||||
) -> Union["PreTrainedModel", "AutoModelForCausalLMWithValueHead"]:
|
|
||||||
r"""
|
|
||||||
Creates reference model for PPO/DPO training. Evaluation mode is not supported.
|
|
||||||
|
|
||||||
The valuehead parameter is randomly initialized since it is useless for PPO training.
|
|
||||||
"""
|
|
||||||
if finetuning_args.ref_model is not None:
|
|
||||||
ref_model_args_dict = model_args.to_dict()
|
|
||||||
ref_model_args_dict.update(dict(
|
|
||||||
model_name_or_path=finetuning_args.ref_model,
|
|
||||||
checkpoint_dir=finetuning_args.ref_model_checkpoint,
|
|
||||||
quantization_bit=finetuning_args.ref_model_quantization_bit
|
|
||||||
))
|
|
||||||
ref_model_args = ModelArguments(**ref_model_args_dict)
|
|
||||||
ref_finetuning_args = FinetuningArguments(finetuning_type="lora")
|
|
||||||
ref_model, _ = load_model_and_tokenizer(ref_model_args, ref_finetuning_args, is_trainable=False, stage=stage)
|
|
||||||
logger.info("Created reference model from {}".format(finetuning_args.ref_model))
|
|
||||||
else:
|
|
||||||
if finetuning_args.finetuning_type == "lora":
|
|
||||||
ref_model = None
|
|
||||||
else:
|
|
||||||
ref_model, _ = load_model_and_tokenizer(model_args, finetuning_args, is_trainable=False, stage=stage)
|
|
||||||
logger.info("Created reference model from the model itself.")
|
|
||||||
|
|
||||||
return ref_model
|
|
||||||
|
|
||||||
|
|
||||||
def create_reward_model(
|
|
||||||
model: "AutoModelForCausalLMWithValueHead",
|
|
||||||
model_args: "ModelArguments",
|
|
||||||
finetuning_args: "FinetuningArguments"
|
|
||||||
) -> "AutoModelForCausalLMWithValueHead":
|
|
||||||
r"""
|
|
||||||
Creates reward model for PPO training.
|
|
||||||
"""
|
|
||||||
if finetuning_args.reward_model_type == "lora":
|
|
||||||
model.pretrained_model.load_adapter(finetuning_args.reward_model, "reward")
|
|
||||||
for name, param in model.named_parameters(): # https://github.com/huggingface/peft/issues/1090
|
|
||||||
if "default" in name:
|
|
||||||
param.data = param.data.to(torch.float32) # trainable params should in fp32
|
|
||||||
vhead_params = load_valuehead_params(model_args.checkpoint_dir[-1], model_args)
|
|
||||||
assert vhead_params is not None, "Reward model is not correctly loaded."
|
|
||||||
model.register_buffer("reward_head_weight", vhead_params["v_head.summary.weight"], persistent=False)
|
|
||||||
model.register_buffer("reward_head_bias", vhead_params["v_head.summary.bias"], persistent=False)
|
|
||||||
model.register_buffer("default_head_weight", torch.zeros_like(vhead_params["v_head.summary.weight"]), persistent=False)
|
|
||||||
model.register_buffer("default_head_bias", torch.zeros_like(vhead_params["v_head.summary.bias"]), persistent=False)
|
|
||||||
logger.info("Loaded adapter weights of reward model from {}".format(finetuning_args.reward_model))
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
reward_model_args_dict = model_args.to_dict()
|
|
||||||
reward_model_args_dict.update(dict(
|
|
||||||
model_name_or_path=finetuning_args.reward_model,
|
|
||||||
checkpoint_dir=finetuning_args.reward_model_checkpoint,
|
|
||||||
quantization_bit=finetuning_args.reward_model_quantization_bit
|
|
||||||
))
|
|
||||||
reward_model_args = ModelArguments(**reward_model_args_dict)
|
|
||||||
reward_finetuning_args = FinetuningArguments(finetuning_type="lora")
|
|
||||||
reward_model, _ = load_model_and_tokenizer(reward_model_args, reward_finetuning_args, is_trainable=False, stage="ppo")
|
|
||||||
logger.info("Load full weights of reward model from {}".format(finetuning_args.reward_model))
|
|
||||||
return reward_model
|
|
||||||
|
|
||||||
|
|
||||||
def dispatch_model(model: "PreTrainedModel") -> "PreTrainedModel":
|
def dispatch_model(model: "PreTrainedModel") -> "PreTrainedModel":
|
||||||
r"""
|
r"""
|
||||||
Dispatches a pre-trained model to GPUs with balanced memory.
|
Dispatches a pre-trained model to GPUs with balanced memory.
|
||||||
|
@ -6,10 +6,10 @@ from transformers import Seq2SeqTrainingArguments
|
|||||||
|
|
||||||
from llmtuner.data import get_dataset, preprocess_dataset, split_dataset
|
from llmtuner.data import get_dataset, preprocess_dataset, split_dataset
|
||||||
from llmtuner.extras.constants import IGNORE_INDEX
|
from llmtuner.extras.constants import IGNORE_INDEX
|
||||||
from llmtuner.extras.logging import get_logger
|
|
||||||
from llmtuner.extras.ploting import plot_loss
|
from llmtuner.extras.ploting import plot_loss
|
||||||
from llmtuner.hparams import ModelArguments
|
from llmtuner.hparams import ModelArguments
|
||||||
from llmtuner.model import create_ref_model, generate_model_card, load_model_and_tokenizer
|
from llmtuner.model import generate_model_card, load_model_and_tokenizer
|
||||||
|
from llmtuner.train.utils import create_ref_model
|
||||||
from llmtuner.train.dpo.collator import DPODataCollatorWithPadding
|
from llmtuner.train.dpo.collator import DPODataCollatorWithPadding
|
||||||
from llmtuner.train.dpo.trainer import CustomDPOTrainer
|
from llmtuner.train.dpo.trainer import CustomDPOTrainer
|
||||||
|
|
||||||
@ -18,9 +18,6 @@ if TYPE_CHECKING:
|
|||||||
from llmtuner.hparams import DataArguments, FinetuningArguments
|
from llmtuner.hparams import DataArguments, FinetuningArguments
|
||||||
|
|
||||||
|
|
||||||
logger = get_logger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_dpo(
|
def run_dpo(
|
||||||
model_args: "ModelArguments",
|
model_args: "ModelArguments",
|
||||||
data_args: "DataArguments",
|
data_args: "DataArguments",
|
||||||
@ -74,7 +71,6 @@ def run_dpo(
|
|||||||
if training_args.do_eval:
|
if training_args.do_eval:
|
||||||
metrics = trainer.evaluate(metric_key_prefix="eval")
|
metrics = trainer.evaluate(metric_key_prefix="eval")
|
||||||
if id(model) == id(ref_model): # unable to compute rewards without a reference model
|
if id(model) == id(ref_model): # unable to compute rewards without a reference model
|
||||||
logger.warning("Specify `ref_model` for computing rewards at evaluation.")
|
|
||||||
remove_keys = [key for key in metrics.keys() if "rewards" in key]
|
remove_keys = [key for key in metrics.keys() if "rewards" in key]
|
||||||
for key in remove_keys:
|
for key in remove_keys:
|
||||||
metrics.pop(key)
|
metrics.pop(key)
|
||||||
|
@ -9,9 +9,9 @@ from transformers.optimization import get_scheduler
|
|||||||
|
|
||||||
from llmtuner.data import get_dataset, preprocess_dataset
|
from llmtuner.data import get_dataset, preprocess_dataset
|
||||||
from llmtuner.extras.callbacks import SavePeftModelCallback
|
from llmtuner.extras.callbacks import SavePeftModelCallback
|
||||||
from llmtuner.extras.logging import get_logger
|
|
||||||
from llmtuner.extras.ploting import plot_loss
|
from llmtuner.extras.ploting import plot_loss
|
||||||
from llmtuner.model import create_ref_model, create_reward_model, load_model_and_tokenizer
|
from llmtuner.model import load_model_and_tokenizer
|
||||||
|
from llmtuner.train.utils import create_ref_model, create_reward_model
|
||||||
from llmtuner.train.ppo.trainer import CustomPPOTrainer
|
from llmtuner.train.ppo.trainer import CustomPPOTrainer
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
@ -19,9 +19,6 @@ if TYPE_CHECKING:
|
|||||||
from llmtuner.hparams import ModelArguments, DataArguments, FinetuningArguments, GeneratingArguments
|
from llmtuner.hparams import ModelArguments, DataArguments, FinetuningArguments, GeneratingArguments
|
||||||
|
|
||||||
|
|
||||||
logger = get_logger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def run_ppo(
|
def run_ppo(
|
||||||
model_args: "ModelArguments",
|
model_args: "ModelArguments",
|
||||||
data_args: "DataArguments",
|
data_args: "DataArguments",
|
||||||
|
79
src/llmtuner/train/utils.py
Normal file
79
src/llmtuner/train/utils.py
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
import torch
|
||||||
|
from typing import TYPE_CHECKING, Literal, Union
|
||||||
|
|
||||||
|
from llmtuner.extras.logging import get_logger
|
||||||
|
from llmtuner.hparams import ModelArguments, FinetuningArguments
|
||||||
|
from llmtuner.model import load_model_and_tokenizer, load_valuehead_params
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from transformers.modeling_utils import PreTrainedModel
|
||||||
|
from trl import AutoModelForCausalLMWithValueHead
|
||||||
|
|
||||||
|
|
||||||
|
logger = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def create_ref_model(
|
||||||
|
model_args: "ModelArguments",
|
||||||
|
finetuning_args: "FinetuningArguments",
|
||||||
|
stage: Literal["ppo", "dpo"]
|
||||||
|
) -> Union["PreTrainedModel", "AutoModelForCausalLMWithValueHead"]:
|
||||||
|
r"""
|
||||||
|
Creates reference model for PPO/DPO training. Evaluation mode is not supported.
|
||||||
|
|
||||||
|
The valuehead parameter is randomly initialized since it is useless for PPO training.
|
||||||
|
"""
|
||||||
|
if finetuning_args.ref_model is not None:
|
||||||
|
ref_model_args_dict = model_args.to_dict()
|
||||||
|
ref_model_args_dict.update(dict(
|
||||||
|
model_name_or_path=finetuning_args.ref_model,
|
||||||
|
checkpoint_dir=finetuning_args.ref_model_checkpoint,
|
||||||
|
quantization_bit=finetuning_args.ref_model_quantization_bit
|
||||||
|
))
|
||||||
|
ref_model_args = ModelArguments(**ref_model_args_dict)
|
||||||
|
ref_finetuning_args = FinetuningArguments(finetuning_type="lora")
|
||||||
|
ref_model, _ = load_model_and_tokenizer(ref_model_args, ref_finetuning_args, is_trainable=False, stage=stage)
|
||||||
|
logger.info("Created reference model from {}".format(finetuning_args.ref_model))
|
||||||
|
else:
|
||||||
|
if finetuning_args.finetuning_type == "lora":
|
||||||
|
ref_model = None
|
||||||
|
else:
|
||||||
|
ref_model, _ = load_model_and_tokenizer(model_args, finetuning_args, is_trainable=False, stage=stage)
|
||||||
|
logger.info("Created reference model from the model itself.")
|
||||||
|
|
||||||
|
return ref_model
|
||||||
|
|
||||||
|
|
||||||
|
def create_reward_model(
|
||||||
|
model: "AutoModelForCausalLMWithValueHead",
|
||||||
|
model_args: "ModelArguments",
|
||||||
|
finetuning_args: "FinetuningArguments"
|
||||||
|
) -> "AutoModelForCausalLMWithValueHead":
|
||||||
|
r"""
|
||||||
|
Creates reward model for PPO training.
|
||||||
|
"""
|
||||||
|
if finetuning_args.reward_model_type == "lora":
|
||||||
|
model.pretrained_model.load_adapter(finetuning_args.reward_model, "reward")
|
||||||
|
for name, param in model.named_parameters(): # https://github.com/huggingface/peft/issues/1090
|
||||||
|
if "default" in name:
|
||||||
|
param.data = param.data.to(torch.float32) # trainable params should in fp32
|
||||||
|
vhead_params = load_valuehead_params(model_args.checkpoint_dir[-1], model_args)
|
||||||
|
assert vhead_params is not None, "Reward model is not correctly loaded."
|
||||||
|
model.register_buffer("reward_head_weight", vhead_params["v_head.summary.weight"], persistent=False)
|
||||||
|
model.register_buffer("reward_head_bias", vhead_params["v_head.summary.bias"], persistent=False)
|
||||||
|
model.register_buffer("default_head_weight", torch.zeros_like(vhead_params["v_head.summary.weight"]), persistent=False)
|
||||||
|
model.register_buffer("default_head_bias", torch.zeros_like(vhead_params["v_head.summary.bias"]), persistent=False)
|
||||||
|
logger.info("Loaded adapter weights of reward model from {}".format(finetuning_args.reward_model))
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
reward_model_args_dict = model_args.to_dict()
|
||||||
|
reward_model_args_dict.update(dict(
|
||||||
|
model_name_or_path=finetuning_args.reward_model,
|
||||||
|
checkpoint_dir=finetuning_args.reward_model_checkpoint,
|
||||||
|
quantization_bit=finetuning_args.reward_model_quantization_bit
|
||||||
|
))
|
||||||
|
reward_model_args = ModelArguments(**reward_model_args_dict)
|
||||||
|
reward_finetuning_args = FinetuningArguments(finetuning_type="lora")
|
||||||
|
reward_model, _ = load_model_and_tokenizer(reward_model_args, reward_finetuning_args, is_trainable=False, stage="ppo")
|
||||||
|
logger.info("Load full weights of reward model from {}".format(finetuning_args.reward_model))
|
||||||
|
return reward_model
|
Loading…
x
Reference in New Issue
Block a user