From 2cf95d4efee6e1bedd8a6aaa237b3f409f763dd6 Mon Sep 17 00:00:00 2001 From: hiyouga Date: Thu, 14 Mar 2024 18:17:01 +0800 Subject: [PATCH] fix export Former-commit-id: 3b4a59bfb1866a270b9934a4a2303197ffdab531 --- src/llmtuner/model/patcher.py | 9 +++++---- src/llmtuner/train/tuner.py | 3 +-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/llmtuner/model/patcher.py b/src/llmtuner/model/patcher.py index a5788a7c..bd484052 100644 --- a/src/llmtuner/model/patcher.py +++ b/src/llmtuner/model/patcher.py @@ -294,11 +294,12 @@ def patch_config( init_kwargs["torch_dtype"] = model_args.compute_dtype if not is_deepspeed_zero3_enabled(): init_kwargs["low_cpu_mem_usage"] = model_args.low_cpu_mem_usage - if "device_map" not in init_kwargs: # quant models cannot use auto device map - init_kwargs["device_map"] = model_args.device_map or {"": get_current_device()} + if model_args.low_cpu_mem_usage: + if "device_map" not in init_kwargs: # quant models cannot use auto device map + init_kwargs["device_map"] = model_args.device_map or {"": get_current_device()} - if init_kwargs["device_map"] == "auto": - init_kwargs["offload_folder"] = model_args.offload_folder + if init_kwargs["device_map"] == "auto": + init_kwargs["offload_folder"] = model_args.offload_folder def patch_model( diff --git a/src/llmtuner/train/tuner.py b/src/llmtuner/train/tuner.py index 43b76bef..5e027b73 100644 --- a/src/llmtuner/train/tuner.py +++ b/src/llmtuner/train/tuner.py @@ -42,8 +42,7 @@ def run_exp(args: Optional[Dict[str, Any]] = None, callbacks: Optional[List["Tra def export_model(args: Optional[Dict[str, Any]] = None): model_args, data_args, finetuning_args, _ = get_infer_args(args) - - model_args.device_map = {"": "cpu"} + model_args.low_cpu_mem_usage = False if model_args.export_dir is None: raise ValueError("Please specify `export_dir` to save model.")