mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-08-02 19:52:50 +08:00
Fix Llama model save for full param train
Former-commit-id: eb178eaff390a1dc342cc35ab8c7820d654f3717
This commit is contained in:
parent
3bf6dde3a5
commit
67c05c2031
@ -312,6 +312,15 @@ def patch_config(
|
||||
def patch_model(
|
||||
model: "PreTrainedModel", tokenizer: "PreTrainedTokenizer", model_args: "ModelArguments", is_trainable: bool
|
||||
) -> None:
|
||||
#Config check and fix
|
||||
gen_config = model.generation_config
|
||||
if not gen_config.do_sample and (
|
||||
(gen_config.temperature is not None and gen_config.temperature != 1.0)
|
||||
or (gen_config.top_p is not None and gen_config.top_p != 1.0)
|
||||
or (gen_config.typical_p is not None and gen_config.typical_p != 1.0)
|
||||
):
|
||||
gen_config.do_sample = True
|
||||
|
||||
if "GenerationMixin" not in str(model.generate.__func__):
|
||||
model.generate = MethodType(PreTrainedModel.generate, model)
|
||||
|
||||
|
@ -64,14 +64,6 @@ def export_model(args: Optional[Dict[str, Any]] = None):
|
||||
for param in model.parameters():
|
||||
param.data = param.data.to(output_dtype)
|
||||
|
||||
gen_config = model.generation_config # check and fix generation config
|
||||
if not gen_config.do_sample and (
|
||||
(gen_config.temperature is not None and gen_config.temperature != 1.0)
|
||||
or (gen_config.top_p is not None and gen_config.top_p != 1.0)
|
||||
or (gen_config.typical_p is not None and gen_config.typical_p != 1.0)
|
||||
):
|
||||
gen_config.do_sample = True
|
||||
|
||||
model.save_pretrained(
|
||||
save_directory=model_args.export_dir,
|
||||
max_shard_size="{}GB".format(model_args.export_size),
|
||||
|
Loading…
x
Reference in New Issue
Block a user