[model] fix lora on quant models (#7456)

Co-authored-by: root <root@ai>
This commit is contained in:
GuoCoder 2025-03-25 11:59:46 +08:00 committed by GitHub
parent bc9ada9db7
commit b6d8749bf3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 2 additions and 2 deletions

View File

@ -3,7 +3,7 @@ transformers>=4.41.2,<=4.50.0,!=4.46.*,!=4.47.*,!=4.48.0;python_version>='3.10'
transformers>=4.41.2,<=4.49.0,!=4.46.*,!=4.47.*,!=4.48.*;sys_platform == 'darwin'
datasets>=2.16.0,<=3.3.2
accelerate>=0.34.0,<=1.4.0
peft>=0.11.1,<=0.12.0
peft>=0.11.1,<=0.15.0
trl>=0.8.6,<=0.9.6
tokenizers>=0.19.0,<=0.21.0
gradio>=4.38.0,<=5.21.0

View File

@ -91,7 +91,7 @@ def check_dependencies() -> None:
check_version("transformers>=4.41.2,<=4.50.0,!=4.46.0,!=4.46.1,!=4.46.2,!=4.46.3,!=4.47.0,!=4.47.1,!=4.48.0")
check_version("datasets>=2.16.0,<=3.3.2")
check_version("accelerate>=0.34.0,<=1.4.0")
check_version("peft>=0.11.1,<=0.12.0")
check_version("peft>=0.11.1,<=0.15.0")
check_version("trl>=0.8.6,<=0.9.6")
if is_transformers_version_greater_than("4.46.0") and not is_transformers_version_greater_than("4.48.1"):
logger.warning_rank0_once("There are known bugs in transformers v4.46.0-v4.48.0, please use other versions.")