mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-04 12:42:51 +08:00
Former-commit-id: 03d05991f81b51826d3a4d9da214504e19a301bd
This commit is contained in:
parent
c0df6b581c
commit
2279b1948e
@ -68,18 +68,6 @@ def count_parameters(model: torch.nn.Module) -> Tuple[int, int]:
|
|||||||
return trainable_params, all_param
|
return trainable_params, all_param
|
||||||
|
|
||||||
|
|
||||||
def get_current_device() -> str:
|
|
||||||
import accelerate
|
|
||||||
if accelerate.utils.is_xpu_available():
|
|
||||||
return "xpu:{}".format(os.environ.get("LOCAL_RANK", "0"))
|
|
||||||
elif accelerate.utils.is_npu_available():
|
|
||||||
return "npu:{}".format(os.environ.get("LOCAL_RANK", "0"))
|
|
||||||
elif torch.cuda.is_available():
|
|
||||||
return "cuda:{}".format(os.environ.get("LOCAL_RANK", "0"))
|
|
||||||
else:
|
|
||||||
return "cpu"
|
|
||||||
|
|
||||||
|
|
||||||
def get_logits_processor() -> "LogitsProcessorList":
|
def get_logits_processor() -> "LogitsProcessorList":
|
||||||
r"""
|
r"""
|
||||||
Gets logits processor that removes NaN and Inf logits.
|
Gets logits processor that removes NaN and Inf logits.
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
import os
|
||||||
import math
|
import math
|
||||||
import torch
|
import torch
|
||||||
from types import MethodType
|
from types import MethodType
|
||||||
@ -22,7 +23,7 @@ except ImportError: # https://github.com/huggingface/transformers/releases/tag/v
|
|||||||
from transformers.deepspeed import is_deepspeed_zero3_enabled
|
from transformers.deepspeed import is_deepspeed_zero3_enabled
|
||||||
|
|
||||||
from llmtuner.extras.logging import get_logger
|
from llmtuner.extras.logging import get_logger
|
||||||
from llmtuner.extras.misc import count_parameters, get_current_device, infer_optim_dtype, try_download_model_from_ms
|
from llmtuner.extras.misc import count_parameters, infer_optim_dtype, try_download_model_from_ms
|
||||||
from llmtuner.extras.packages import is_flash_attn2_available
|
from llmtuner.extras.packages import is_flash_attn2_available
|
||||||
from llmtuner.extras.patches import llama_patch as LlamaPatches
|
from llmtuner.extras.patches import llama_patch as LlamaPatches
|
||||||
from llmtuner.hparams import FinetuningArguments
|
from llmtuner.hparams import FinetuningArguments
|
||||||
@ -150,7 +151,7 @@ def load_model_and_tokenizer(
|
|||||||
if getattr(config, "quantization_config", None):
|
if getattr(config, "quantization_config", None):
|
||||||
if model_args.quantization_bit is not None: # remove bnb quantization
|
if model_args.quantization_bit is not None: # remove bnb quantization
|
||||||
model_args.quantization_bit = None
|
model_args.quantization_bit = None
|
||||||
config_kwargs["device_map"] = {"": get_current_device()}
|
config_kwargs["device_map"] = {"": int(os.environ.get("LOCAL_RANK", "0"))}
|
||||||
quantization_config = getattr(config, "quantization_config", None)
|
quantization_config = getattr(config, "quantization_config", None)
|
||||||
logger.info("Loading {}-bit quantized model.".format(quantization_config.get("bits", -1)))
|
logger.info("Loading {}-bit quantized model.".format(quantization_config.get("bits", -1)))
|
||||||
|
|
||||||
@ -172,7 +173,7 @@ def load_model_and_tokenizer(
|
|||||||
bnb_4bit_quant_type=model_args.quantization_type
|
bnb_4bit_quant_type=model_args.quantization_type
|
||||||
)
|
)
|
||||||
|
|
||||||
config_kwargs["device_map"] = {"": get_current_device()}
|
config_kwargs["device_map"] = {"": int(os.environ.get("LOCAL_RANK", "0"))}
|
||||||
logger.info("Quantizing model to {} bit.".format(model_args.quantization_bit))
|
logger.info("Quantizing model to {} bit.".format(model_args.quantization_bit))
|
||||||
|
|
||||||
# Load pre-trained models (without valuehead)
|
# Load pre-trained models (without valuehead)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user