mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-15 11:20:35 +08:00
fix #2666
This commit is contained in:
@@ -6,14 +6,14 @@ from llamafactory.hparams import get_train_args
|
||||
from llamafactory.model import load_model, load_tokenizer
|
||||
|
||||
|
||||
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-LlamaForCausalLM")
|
||||
TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3")
|
||||
|
||||
TRAINING_ARGS = {
|
||||
TRAIN_ARGS = {
|
||||
"model_name_or_path": TINY_LLAMA,
|
||||
"stage": "sft",
|
||||
"do_train": True,
|
||||
"finetuning_type": "lora",
|
||||
"dataset": "llamafactory/tiny_dataset",
|
||||
"dataset": "llamafactory/tiny-supervised-dataset",
|
||||
"dataset_dir": "ONLINE",
|
||||
"template": "llama3",
|
||||
"cutoff_len": 1024,
|
||||
@@ -25,12 +25,7 @@ TRAINING_ARGS = {
|
||||
|
||||
|
||||
def test_lora_all_modules():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args(
|
||||
{
|
||||
"lora_target": "all",
|
||||
**TRAINING_ARGS,
|
||||
}
|
||||
)
|
||||
model_args, _, _, finetuning_args, _ = get_train_args({"lora_target": "all", **TRAIN_ARGS})
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
linear_modules = set()
|
||||
@@ -48,11 +43,7 @@ def test_lora_all_modules():
|
||||
|
||||
def test_lora_extra_modules():
|
||||
model_args, _, _, finetuning_args, _ = get_train_args(
|
||||
{
|
||||
"lora_target": "all",
|
||||
"additional_target": "embed_tokens,lm_head",
|
||||
**TRAINING_ARGS,
|
||||
}
|
||||
{"lora_target": "all", "additional_target": "embed_tokens,lm_head", **TRAIN_ARGS}
|
||||
)
|
||||
tokenizer_module = load_tokenizer(model_args)
|
||||
model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True)
|
||||
|
||||
Reference in New Issue
Block a user