From c8eff09c7c44fb7aa94c0d8593e88eb1ef8b4150 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Fri, 1 Dec 2023 23:37:10 +0800 Subject: [PATCH] tiny fix Former-commit-id: a973ce6e890d3f384fd225334f53a49907fff10d --- src/llmtuner/model/utils.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/llmtuner/model/utils.py b/src/llmtuner/model/utils.py index 1eab538d..12a45445 100644 --- a/src/llmtuner/model/utils.py +++ b/src/llmtuner/model/utils.py @@ -22,11 +22,7 @@ def dispatch_model(model: "PreTrainedModel") -> "PreTrainedModel": Dispatches a pre-trained model to GPUs with balanced memory. Borrowed from: https://github.com/huggingface/transformers/blob/v4.31.0/src/transformers/modeling_utils.py#L2803 """ - if ( - getattr(model, "is_loaded_in_8bit", False) # bnb - or getattr(model, "is_loaded_in_4bit", False) # bnb - or getattr(model.config, "quantization_config", None) # gptq or awq - ): # already set on current device + if getattr(model, "quantization_method", None): # already set on current device return model if torch.cuda.device_count() > 1: