mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-16 03:40:34 +08:00
[model] fix lora on quant models (#7456)
Co-authored-by: root <root@ai>
This commit is contained in:
@@ -3,7 +3,7 @@ transformers>=4.41.2,<=4.50.0,!=4.46.*,!=4.47.*,!=4.48.0;python_version>='3.10'
|
||||
transformers>=4.41.2,<=4.49.0,!=4.46.*,!=4.47.*,!=4.48.*;sys_platform == 'darwin'
|
||||
datasets>=2.16.0,<=3.3.2
|
||||
accelerate>=0.34.0,<=1.4.0
|
||||
peft>=0.11.1,<=0.12.0
|
||||
peft>=0.11.1,<=0.15.0
|
||||
trl>=0.8.6,<=0.9.6
|
||||
tokenizers>=0.19.0,<=0.21.0
|
||||
gradio>=4.38.0,<=5.21.0
|
||||
|
||||
Reference in New Issue
Block a user