From 92248f9cb2bd0b6f0b6e19014e8deb184e824ad3 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Sun, 24 Mar 2024 00:43:21 +0800 Subject: [PATCH] fix #2936 Former-commit-id: 9ae646fbbd809057a9c54fe41e1ae5a07a674556 --- src/llmtuner/extras/misc.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/llmtuner/extras/misc.py b/src/llmtuner/extras/misc.py index 6998b76e..cd2ff5bc 100644 --- a/src/llmtuner/extras/misc.py +++ b/src/llmtuner/extras/misc.py @@ -81,7 +81,11 @@ def count_parameters(model: torch.nn.Module) -> Tuple[int, int]: # Due to the design of 4bit linear layers from bitsandbytes, multiply the number of parameters by 2 if param.__class__.__name__ == "Params4bit": - num_bytes = param.quant_storage.itemsize if hasattr(param, "quant_storage") else 1 + if hasattr(param, "quant_storage") and hasattr(param.quant_storage, "itemsize"): + num_bytes = param.quant_storage.itemsize + else: + num_bytes = 1 + num_params = num_params * 2 * num_bytes all_param += num_params