mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-12-20 22:00:36 +08:00
Former-commit-id: 337ce5272b81f5561162beb08814b0e5abf23703
This commit is contained in:
@@ -92,7 +92,7 @@ def load_model_and_tokenizer(
|
||||
)
|
||||
|
||||
is_mergeable = False
|
||||
config_kwargs["device_map"] = {"": int(os.environ.get("LOCAL_RANK", "0"))}
|
||||
config_kwargs["device_map"] = {"": int(os.environ.get("LOCAL_RANK", "0"))} if is_trainable else "auto"
|
||||
logger.info("Quantizing model to {} bit.".format(model_args.quantization_bit))
|
||||
|
||||
# Load and prepare pretrained models (without valuehead).
|
||||
|
||||
Reference in New Issue
Block a user