import os import torch from llamafactory.hparams import get_train_args from llamafactory.model import load_model, load_tokenizer TINY_LLAMA = os.environ.get("TINY_LLAMA", "llamafactory/tiny-random-Llama-3") TRAIN_ARGS = { "model_name_or_path": TINY_LLAMA, "stage": "sft", "do_train": True, "finetuning_type": "full", "dataset": "llamafactory/tiny-supervised-dataset", "dataset_dir": "ONLINE", "template": "llama3", "cutoff_len": 1024, "overwrite_cache": True, "output_dir": "dummy_dir", "overwrite_output_dir": True, "fp16": True, } def test_full(): model_args, _, _, finetuning_args, _ = get_train_args(TRAIN_ARGS) tokenizer_module = load_tokenizer(model_args) model = load_model(tokenizer_module["tokenizer"], model_args, finetuning_args, is_trainable=True) for param in model.parameters(): assert param.requires_grad is True assert param.dtype == torch.float32