mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-17 04:10:36 +08:00
modify style
Former-commit-id: 54b713d0c4ffdfc6a7faeb14471b58bb1cd8acf5
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
from .loader import load_config, load_model, load_tokenizer
|
||||
from .utils.misc import find_all_linear_modules, load_valuehead_params
|
||||
|
||||
|
||||
__all__ = [
|
||||
"load_config",
|
||||
"load_model",
|
||||
|
||||
@@ -38,9 +38,7 @@ def init_adapter(
|
||||
logger.info("Adapter is not found at evaluation, load the base model.")
|
||||
return model
|
||||
|
||||
if finetuning_args.finetuning_type != "lora" and getattr(
|
||||
model, "quantization_method", None
|
||||
):
|
||||
if finetuning_args.finetuning_type != "lora" and getattr(model, "quantization_method", None):
|
||||
raise ValueError("You can only use lora for quantized models.")
|
||||
|
||||
if finetuning_args.finetuning_type == "full" and is_trainable:
|
||||
@@ -68,12 +66,8 @@ def init_adapter(
|
||||
|
||||
stride = num_layers // finetuning_args.num_layer_trainable
|
||||
trainable_layer_ids = range(stride - 1, num_layers + stride - 1, stride)
|
||||
elif (
|
||||
finetuning_args.num_layer_trainable > 0
|
||||
): # fine-tuning the last n layers if num_layer_trainable > 0
|
||||
trainable_layer_ids = range(
|
||||
num_layers - finetuning_args.num_layer_trainable, num_layers
|
||||
)
|
||||
elif finetuning_args.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0
|
||||
trainable_layer_ids = range(num_layers - finetuning_args.num_layer_trainable, num_layers)
|
||||
else: # fine-tuning the first n layers if num_layer_trainable < 0
|
||||
trainable_layer_ids = range(-finetuning_args.num_layer_trainable)
|
||||
|
||||
@@ -88,15 +82,11 @@ def init_adapter(
|
||||
for module_name in finetuning_args.name_module_trainable:
|
||||
if module_name not in freeze_modules:
|
||||
raise ValueError(
|
||||
"Module {} is not found, please choose from {}".format(
|
||||
module_name, ", ".join(freeze_modules)
|
||||
)
|
||||
"Module {} is not found, please choose from {}".format(module_name, ", ".join(freeze_modules))
|
||||
)
|
||||
|
||||
for idx in trainable_layer_ids:
|
||||
trainable_layers.append(
|
||||
".{:d}.{}".format(idx, module_name if module_name != "all" else "")
|
||||
)
|
||||
trainable_layers.append(".{:d}.{}".format(idx, module_name if module_name != "all" else ""))
|
||||
|
||||
for name, param in model.named_parameters():
|
||||
if any(trainable_layer in name for trainable_layer in trainable_layers):
|
||||
@@ -105,43 +95,27 @@ def init_adapter(
|
||||
else:
|
||||
param.requires_grad_(False)
|
||||
|
||||
logger.info(
|
||||
"Set trainable layers: {}".format(",".join(map(str, trainable_layer_ids)))
|
||||
)
|
||||
logger.info("Set trainable layers: {}".format(",".join(map(str, trainable_layer_ids))))
|
||||
|
||||
if finetuning_args.finetuning_type == "lora":
|
||||
logger.info(
|
||||
"Fine-tuning method: {}".format(
|
||||
"DoRA" if finetuning_args.use_dora else "LoRA"
|
||||
)
|
||||
)
|
||||
logger.info("Fine-tuning method: {}".format("DoRA" if finetuning_args.use_dora else "LoRA"))
|
||||
adapter_to_resume = None
|
||||
|
||||
if model_args.adapter_name_or_path is not None:
|
||||
is_mergeable = True
|
||||
if getattr(
|
||||
model, "quantization_method", None
|
||||
): # merge lora in quantized model is unstable
|
||||
assert (
|
||||
len(model_args.adapter_name_or_path) == 1
|
||||
), "Quantized model only accepts a single adapter."
|
||||
if getattr(model, "quantization_method", None): # merge lora in quantized model is unstable
|
||||
assert len(model_args.adapter_name_or_path) == 1, "Quantized model only accepts a single adapter."
|
||||
is_mergeable = False
|
||||
|
||||
if is_deepspeed_zero3_enabled():
|
||||
assert (
|
||||
len(model_args.adapter_name_or_path) == 1
|
||||
), "Cannot use multiple adapters in DeepSpeed ZeRO-3."
|
||||
assert len(model_args.adapter_name_or_path) == 1, "Cannot use multiple adapters in DeepSpeed ZeRO-3."
|
||||
is_mergeable = False
|
||||
|
||||
if model_args.use_unsloth:
|
||||
assert (
|
||||
len(model_args.adapter_name_or_path) == 1
|
||||
), "Unsloth model only accepts a single adapter."
|
||||
assert len(model_args.adapter_name_or_path) == 1, "Unsloth model only accepts a single adapter."
|
||||
is_mergeable = False
|
||||
|
||||
if (is_trainable and not finetuning_args.create_new_adapter) or (
|
||||
not is_mergeable
|
||||
):
|
||||
if (is_trainable and not finetuning_args.create_new_adapter) or (not is_mergeable):
|
||||
adapter_to_merge = model_args.adapter_name_or_path[:-1]
|
||||
adapter_to_resume = model_args.adapter_name_or_path[-1]
|
||||
else:
|
||||
@@ -158,9 +132,7 @@ def init_adapter(
|
||||
|
||||
if adapter_to_resume is not None: # resume lora training
|
||||
if model_args.use_unsloth:
|
||||
model = load_unsloth_peft_model(
|
||||
config, model_args, is_trainable=is_trainable
|
||||
)
|
||||
model = load_unsloth_peft_model(config, model_args, is_trainable=is_trainable)
|
||||
else:
|
||||
model = PeftModel.from_pretrained(
|
||||
model,
|
||||
@@ -169,27 +141,19 @@ def init_adapter(
|
||||
offload_folder=model_args.offload_folder,
|
||||
)
|
||||
|
||||
if (
|
||||
is_trainable and adapter_to_resume is None
|
||||
): # create new lora weights while training
|
||||
if (
|
||||
len(finetuning_args.lora_target) == 1
|
||||
and finetuning_args.lora_target[0] == "all"
|
||||
):
|
||||
if is_trainable and adapter_to_resume is None: # create new lora weights while training
|
||||
if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all":
|
||||
target_modules = find_all_linear_modules(model)
|
||||
else:
|
||||
target_modules = finetuning_args.lora_target
|
||||
|
||||
if finetuning_args.use_llama_pro:
|
||||
target_modules = find_expanded_modules(
|
||||
model, target_modules, finetuning_args.num_layer_trainable
|
||||
)
|
||||
target_modules = find_expanded_modules(model, target_modules, finetuning_args.num_layer_trainable)
|
||||
|
||||
if (
|
||||
finetuning_args.use_dora
|
||||
and getattr(model, "quantization_method", None) is not None
|
||||
and getattr(model, "quantization_method", None)
|
||||
!= QuantizationMethod.BITS_AND_BYTES
|
||||
and getattr(model, "quantization_method", None) != QuantizationMethod.BITS_AND_BYTES
|
||||
):
|
||||
raise ValueError("DoRA is not compatible with PTQ-quantized models.")
|
||||
|
||||
@@ -202,11 +166,7 @@ def init_adapter(
|
||||
module_names.add(name.split(".")[-1])
|
||||
|
||||
finetuning_args.additional_target = module_names
|
||||
logger.warning(
|
||||
"Vocab has been resized, add {} to trainable params.".format(
|
||||
",".join(module_names)
|
||||
)
|
||||
)
|
||||
logger.warning("Vocab has been resized, add {} to trainable params.".format(",".join(module_names)))
|
||||
|
||||
peft_kwargs = {
|
||||
"r": finetuning_args.lora_rank,
|
||||
@@ -233,10 +193,6 @@ def init_adapter(
|
||||
param.data = param.data.to(torch.float32)
|
||||
|
||||
if model_args.adapter_name_or_path is not None:
|
||||
logger.info(
|
||||
"Loaded adapter(s): {}".format(
|
||||
",".join(model_args.adapter_name_or_path)
|
||||
)
|
||||
)
|
||||
logger.info("Loaded adapter(s): {}".format(",".join(model_args.adapter_name_or_path)))
|
||||
|
||||
return model
|
||||
|
||||
@@ -3,9 +3,9 @@ from typing import TYPE_CHECKING, Any, Dict, Union
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
AutoModelForCausalLM,
|
||||
AutoTokenizer,
|
||||
AutoProcessor,
|
||||
AutoModelForVision2Seq,
|
||||
AutoProcessor,
|
||||
AutoTokenizer,
|
||||
)
|
||||
from trl import AutoModelForCausalLMWithValueHead
|
||||
|
||||
@@ -17,6 +17,7 @@ from .utils.misc import load_valuehead_params, register_autoclass
|
||||
from .utils.mod import convert_pretrained_model_to_mod, load_mod_pretrained_model
|
||||
from .utils.unsloth import load_unsloth_pretrained_model
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer
|
||||
|
||||
@@ -42,7 +43,7 @@ def _get_init_kwargs(model_args: "ModelArguments") -> Dict[str, Any]:
|
||||
|
||||
def load_tokenizer(
|
||||
model_args: "ModelArguments",
|
||||
) -> Dict[str, Union["PreTrainedTokenizer", "AutoProcesser"]]:
|
||||
) -> Dict[str, Union["PreTrainedTokenizer", "AutoProcessor"]]:
|
||||
r"""
|
||||
Loads pretrained tokenizer.
|
||||
|
||||
@@ -70,14 +71,10 @@ def load_tokenizer(
|
||||
dict(additional_special_tokens=model_args.new_special_tokens),
|
||||
replace_additional_special_tokens=False,
|
||||
)
|
||||
logger.info(
|
||||
"Add {} to special tokens.".format(",".join(model_args.new_special_tokens))
|
||||
)
|
||||
logger.info("Add {} to special tokens.".format(",".join(model_args.new_special_tokens)))
|
||||
if num_added_tokens > 0 and not model_args.resize_vocab:
|
||||
model_args.resize_vocab = True
|
||||
logger.warning(
|
||||
"New tokens have been added, changed `resize_vocab` to True."
|
||||
)
|
||||
logger.warning("New tokens have been added, changed `resize_vocab` to True.")
|
||||
|
||||
patch_tokenizer(tokenizer)
|
||||
tokenizer_modules = {"tokenizer": tokenizer, "processor": None}
|
||||
@@ -174,10 +171,8 @@ def load_model(
|
||||
|
||||
trainable_params, all_param = count_parameters(model)
|
||||
if is_trainable:
|
||||
param_stats = (
|
||||
"trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format(
|
||||
trainable_params, all_param, 100 * trainable_params / all_param
|
||||
)
|
||||
param_stats = "trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format(
|
||||
trainable_params, all_param, 100 * trainable_params / all_param
|
||||
)
|
||||
else:
|
||||
param_stats = "all params: {:d}".format(all_param)
|
||||
|
||||
Reference in New Issue
Block a user