use fixture

This commit is contained in:
hiyouga
2024-06-15 20:06:17 +08:00
parent 1b834f50be
commit 80a9e6bf94
3 changed files with 24 additions and 7 deletions

View File

@@ -13,8 +13,9 @@
# limitations under the License.
import os
from typing import Sequence
from typing import Dict, Sequence
import pytest
import torch
from peft import LoraModel, PeftModel
from transformers import AutoModelForCausalLM
@@ -71,6 +72,16 @@ def compare_model(model_a: "torch.nn.Module", model_b: "torch.nn.Module", diff_k
assert torch.allclose(state_dict_a[name], state_dict_b[name]) is True
@pytest.fixture
def fix_valuehead_cpu_loading():
def post_init(self: "AutoModelForCausalLMWithValueHead", state_dict: Dict[str, "torch.Tensor"]):
state_dict = {k[7:]: state_dict[k] for k in state_dict.keys() if k.startswith("v_head.")}
self.v_head.load_state_dict(state_dict, strict=False)
del state_dict
AutoModelForCausalLMWithValueHead.post_init = post_init
def test_lora_train_qv_modules():
model_args, _, _, finetuning_args, _ = get_train_args({"lora_target": "q_proj,v_proj", **TRAIN_ARGS})
tokenizer_module = load_tokenizer(model_args)
@@ -166,6 +177,7 @@ def test_lora_train_new_adapters():
)
@pytest.mark.usefixtures("fix_valuehead_cpu_loading")
def test_lora_train_valuehead():
model_args, _, finetuning_args, _ = get_infer_args(INFER_ARGS)
tokenizer_module = load_tokenizer(model_args)