[v1] support resume training from checkpoint (#10280)

Co-authored-by: frozenleaves <frozen@Mac.local>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
浮梦
2026-04-20 20:28:08 +08:00
committed by GitHub
parent c5aecaf31d
commit c4bbac49b2
9 changed files with 577 additions and 10 deletions

76
scripts/dcp2hf.py Normal file
View File

@@ -0,0 +1,76 @@
# Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert a DCP checkpoint to HuggingFace model format.
Usage:
python scripts/dcp2hf.py convert --dcp_path=/path/to/dcp --hf_path=/path/to/hf --config_path=/path/to/config
Arguments:
dcp_path: Path to the DCP checkpoint directory.
hf_path: Output path (directory) for HuggingFace model.
config_path: Path to the HuggingFace model directory containing config.json.
"""
import fire
import torch
import torch.distributed.checkpoint as dcp
import transformers
from transformers import AutoConfig
def convert(dcp_path: str, hf_path: str, config_path: str) -> None:
"""Convert DCP model weights to HF.
Note: this script is used to convert a DCP checkpoint to HuggingFace model format,
it will just convert the DCP checkpoint to a HuggingFace model format, for the tokenizer,
you may need to copy from the original model.
Args:
dcp_path: DCP checkpoint directory.
hf_path: Output path (directory) for HuggingFace model.
config_path: Path to the HuggingFace model directory containing config.json.
"""
if not dcp_path or not hf_path or not config_path:
raise ValueError("All 'dcp_path', 'hf_path', and 'config_path' are required.")
print(f"Loading config from {config_path}...")
config = AutoConfig.from_pretrained(config_path)
architectures = getattr(config, "architectures", [])
if architectures:
model_cls = getattr(transformers, architectures[0], transformers.AutoModelForCausalLM)
else:
model_cls = transformers.AutoModelForCausalLM
print("Initializing model on CPU...")
model = model_cls(config).to(torch.bfloat16)
print(f"Loading DCP from {dcp_path}...")
state_dict = model.state_dict()
dcp.load(state_dict, checkpoint_id=dcp_path)
model.load_state_dict(state_dict)
print(f"Saving to HF format at {hf_path}...")
model.save_pretrained(hf_path)
config.save_pretrained(hf_path)
print("Done!")
def help() -> None:
"""Show help message."""
print(__doc__)
if __name__ == "__main__":
fire.Fire({"convert": convert, "help": help, "--convert": convert})

View File

@@ -25,7 +25,8 @@ Arguments:
import fire
import torch
import torch.distributed.checkpoint as dcp
from transformers import AutoModelForCausalLM
import transformers
from transformers import AutoConfig
def convert(hf_path: str, dcp_path: str) -> None:
@@ -39,7 +40,14 @@ def convert(hf_path: str, dcp_path: str) -> None:
raise ValueError("Both 'hf_path' and 'dcp_path' are required.")
print(f"Loading HF model from {hf_path}...")
model = AutoModelForCausalLM.from_pretrained(hf_path, device_map="cpu", torch_dtype=torch.bfloat16)
config = AutoConfig.from_pretrained(hf_path)
architectures = getattr(config, "architectures", [])
if architectures:
model_cls = getattr(transformers, architectures[0], transformers.AutoModelForCausalLM)
else:
model_cls = transformers.AutoModelForCausalLM
model = model_cls.from_pretrained(hf_path, device_map="cpu", torch_dtype=torch.bfloat16)
print(f"Saving to DCP format at {dcp_path}...")
dcp.save(model.state_dict(), checkpoint_id=dcp_path)