mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-23 06:12:50 +08:00
tiny fix
Former-commit-id: edafb977330767b82b6c9591d9ec180046155632
This commit is contained in:
parent
c43d3f2460
commit
37c6234126
@ -279,6 +279,9 @@ def prepare_args(
|
|||||||
if training_args.do_train and (not training_args.fp16):
|
if training_args.do_train and (not training_args.fp16):
|
||||||
logger.warning("We recommend enable fp16 mixed precision training.")
|
logger.warning("We recommend enable fp16 mixed precision training.")
|
||||||
|
|
||||||
|
if data_args.prompt_template == "alpaca":
|
||||||
|
logger.warning("Please specify `prompt_template` if you are using other pre-trained models.")
|
||||||
|
|
||||||
if training_args.local_rank != -1 and training_args.ddp_find_unused_parameters is None:
|
if training_args.local_rank != -1 and training_args.ddp_find_unused_parameters is None:
|
||||||
logger.warning("`ddp_find_unused_parameters` needs to be set as False in DDP training.")
|
logger.warning("`ddp_find_unused_parameters` needs to be set as False in DDP training.")
|
||||||
training_args.ddp_find_unused_parameters = False
|
training_args.ddp_find_unused_parameters = False
|
||||||
@ -318,6 +321,9 @@ def prepare_infer_args() -> Tuple[ModelArguments, DataTrainingArguments, Finetun
|
|||||||
if model_args.quantization_bit is not None and finetuning_args.finetuning_type != "lora":
|
if model_args.quantization_bit is not None and finetuning_args.finetuning_type != "lora":
|
||||||
raise ValueError("Quantization is only compatible with the LoRA method.")
|
raise ValueError("Quantization is only compatible with the LoRA method.")
|
||||||
|
|
||||||
|
if data_args.prompt_template == "alpaca":
|
||||||
|
logger.warning("Please specify `prompt_template` if you are using other pre-trained models.")
|
||||||
|
|
||||||
return model_args, data_args, finetuning_args
|
return model_args, data_args, finetuning_args
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user