From 0328c0e07c062e8452899a80b6451f8874531388 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Thu, 3 Aug 2023 19:29:02 +0800 Subject: [PATCH] fix mtloader Former-commit-id: a0173c427dacd96fac2fcffc23639d270721fdef --- src/llmtuner/tuner/core/loader.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/llmtuner/tuner/core/loader.py b/src/llmtuner/tuner/core/loader.py index eed7892b..59afbf38 100644 --- a/src/llmtuner/tuner/core/loader.py +++ b/src/llmtuner/tuner/core/loader.py @@ -95,13 +95,8 @@ def load_model_and_tokenizer( ) is_mergeable = False - logger.info("Quantizing model to {} bit.".format(model_args.quantization_bit)) - - if ( - model_args.quantization_bit is not None - or (os.environ.get('LOCAL_RANK') is not None and not is_deepspeed_zero3_enabled()) - ): config_kwargs["device_map"] = {"": int(os.environ.get("LOCAL_RANK", "0"))} + logger.info("Quantizing model to {} bit.".format(model_args.quantization_bit)) if model_args.checkpoint_dir is not None and finetuning_args.finetuning_type == "full": model_to_load = model_args.checkpoint_dir[0]