mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-02 11:42:49 +08:00
support infer 4bit model on GPUs #3023
Former-commit-id: eb259cc5738dfb383e4cc5d32579501c580e11b1
This commit is contained in:
parent
cefe7f7bcf
commit
b7468ea0a8
@ -53,6 +53,10 @@ class ModelArguments:
|
|||||||
default=True,
|
default=True,
|
||||||
metadata={"help": "Whether or not to use double quantization in int4 training."},
|
metadata={"help": "Whether or not to use double quantization in int4 training."},
|
||||||
)
|
)
|
||||||
|
quantization_device_map: Optional[Literal["auto"]] = field(
|
||||||
|
default=None,
|
||||||
|
metadata={"help": "Device map used for loading the 4-bit quantized model, needs bitsandbytes>=0.43.0."},
|
||||||
|
)
|
||||||
rope_scaling: Optional[Literal["linear", "dynamic"]] = field(
|
rope_scaling: Optional[Literal["linear", "dynamic"]] = field(
|
||||||
default=None,
|
default=None,
|
||||||
metadata={"help": "Which scaling strategy should be adopted for the RoPE embeddings."},
|
metadata={"help": "Which scaling strategy should be adopted for the RoPE embeddings."},
|
||||||
|
@ -208,11 +208,6 @@ def _configure_quantization(
|
|||||||
logger.info("Quantizing model to {} bit.".format(model_args.export_quantization_bit))
|
logger.info("Quantizing model to {} bit.".format(model_args.export_quantization_bit))
|
||||||
|
|
||||||
elif model_args.quantization_bit is not None: # bnb
|
elif model_args.quantization_bit is not None: # bnb
|
||||||
if is_deepspeed_zero3_enabled():
|
|
||||||
require_version("transformers>=4.39.0", "To fix: pip install transformers>=4.39.0")
|
|
||||||
require_version("accelerate>=0.28.0", "To fix: pip install accelerate>=0.28.0")
|
|
||||||
require_version("bitsandbytes>=0.43.0", "To fix: pip install bitsandbytes>=0.43.0")
|
|
||||||
|
|
||||||
if model_args.quantization_bit == 8:
|
if model_args.quantization_bit == 8:
|
||||||
require_version("bitsandbytes>=0.37.0", "To fix: pip install bitsandbytes>=0.37.0")
|
require_version("bitsandbytes>=0.37.0", "To fix: pip install bitsandbytes>=0.37.0")
|
||||||
init_kwargs["quantization_config"] = BitsAndBytesConfig(load_in_8bit=True)
|
init_kwargs["quantization_config"] = BitsAndBytesConfig(load_in_8bit=True)
|
||||||
@ -227,7 +222,16 @@ def _configure_quantization(
|
|||||||
bnb_4bit_quant_storage=model_args.compute_dtype, # crucial for fsdp qlora
|
bnb_4bit_quant_storage=model_args.compute_dtype, # crucial for fsdp qlora
|
||||||
)
|
)
|
||||||
|
|
||||||
init_kwargs["device_map"] = {"": get_current_device()}
|
if is_deepspeed_zero3_enabled() or model_args.quantization_device_map == "auto":
|
||||||
|
if model_args.quantization_bit != 4:
|
||||||
|
raise ValueError("Only 4-bit quantized model can use auto device map.")
|
||||||
|
|
||||||
|
require_version("transformers>=4.39.0", "To fix: pip install transformers>=4.39.0")
|
||||||
|
require_version("accelerate>=0.28.0", "To fix: pip install accelerate>=0.28.0")
|
||||||
|
require_version("bitsandbytes>=0.43.0", "To fix: pip install bitsandbytes>=0.43.0")
|
||||||
|
else:
|
||||||
|
init_kwargs["device_map"] = {"": get_current_device()}
|
||||||
|
|
||||||
logger.info("Quantizing model to {} bit.".format(model_args.quantization_bit))
|
logger.info("Quantizing model to {} bit.".format(model_args.quantization_bit))
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user