mirror of
https://github.com/hiyouga/LLaMA-Factory.git
synced 2025-07-31 10:42:50 +08:00
[webui] fix abort finish (#8569)
This commit is contained in:
parent
043103e1c9
commit
6a8d88826e
@ -23,6 +23,7 @@ from typing import TYPE_CHECKING, Any, Literal, Union
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import transformers.dynamic_module_utils
|
||||
from huggingface_hub.utils import WeakFileLock
|
||||
from transformers import InfNanRemoveLogitsProcessor, LogitsProcessorList
|
||||
from transformers.dynamic_module_utils import get_relative_imports
|
||||
from transformers.utils import (
|
||||
@ -277,21 +278,27 @@ def try_download_model_from_other_hub(model_args: "ModelArguments") -> str:
|
||||
api.login(model_args.ms_hub_token)
|
||||
|
||||
revision = "master" if model_args.model_revision == "main" else model_args.model_revision
|
||||
return snapshot_download(
|
||||
model_args.model_name_or_path,
|
||||
revision=revision,
|
||||
cache_dir=model_args.cache_dir,
|
||||
)
|
||||
with WeakFileLock(os.path.abspath(os.path.expanduser("~/.cache/llamafactory/modelscope.lock"))):
|
||||
model_path = snapshot_download(
|
||||
model_args.model_name_or_path,
|
||||
revision=revision,
|
||||
cache_dir=model_args.cache_dir,
|
||||
)
|
||||
|
||||
return model_path
|
||||
|
||||
if use_openmind():
|
||||
check_version("openmind>=0.8.0", mandatory=True)
|
||||
from openmind.utils.hub import snapshot_download # type: ignore
|
||||
|
||||
return snapshot_download(
|
||||
model_args.model_name_or_path,
|
||||
revision=model_args.model_revision,
|
||||
cache_dir=model_args.cache_dir,
|
||||
)
|
||||
with WeakFileLock(os.path.abspath(os.path.expanduser("~/.cache/llamafactory/openmind.lock"))):
|
||||
model_path = snapshot_download(
|
||||
model_args.model_name_or_path,
|
||||
revision=model_args.model_revision,
|
||||
cache_dir=model_args.cache_dir,
|
||||
)
|
||||
|
||||
return model_path
|
||||
|
||||
|
||||
def use_modelscope() -> bool:
|
||||
|
@ -15,19 +15,19 @@
|
||||
LOCALES = {
|
||||
"title": {
|
||||
"en": {
|
||||
"value": "<h1><center>LLaMA Factory: Unified Efficient Fine-Tuning of 100+ LLMs</center></h1>",
|
||||
"value": "<h1><center>🦙🏭LLaMA Factory: Unified Efficient Fine-Tuning of 100+ LLMs</center></h1>",
|
||||
},
|
||||
"ru": {
|
||||
"value": "<h1><center>LLaMA Factory: Унифицированная эффективная тонкая настройка 100+ LLMs</center></h1>",
|
||||
"value": "<h1><center>🦙🏭LLaMA Factory: Унифицированная эффективная тонкая настройка 100+ LLMs</center></h1>",
|
||||
},
|
||||
"zh": {
|
||||
"value": "<h1><center>LLaMA Factory: 一站式大模型高效微调平台</center></h1>",
|
||||
"value": "<h1><center>🦙🏭LLaMA Factory: 一站式大模型高效微调平台</center></h1>",
|
||||
},
|
||||
"ko": {
|
||||
"value": "<h1><center>LLaMA Factory: 100+ LLMs를 위한 통합 효율적인 튜닝</center></h1>",
|
||||
"value": "<h1><center>🦙🏭LLaMA Factory: 100+ LLMs를 위한 통합 효율적인 튜닝</center></h1>",
|
||||
},
|
||||
"ja": {
|
||||
"value": "<h1><center>LLaMA Factory: 100+ LLMs の統合効率的なチューニング</center></h1>",
|
||||
"value": "<h1><center>🦙🏭LLaMA Factory: 100+ LLMs の統合効率的なチューニング</center></h1>",
|
||||
},
|
||||
},
|
||||
"subtitle": {
|
||||
|
@ -444,7 +444,7 @@ class Runner:
|
||||
except TimeoutExpired:
|
||||
continue
|
||||
|
||||
if return_code == 0:
|
||||
if return_code == 0 or self.aborted:
|
||||
finish_info = ALERTS["info_finished"][lang]
|
||||
if self.do_train:
|
||||
finish_log = ALERTS["info_finished"][lang] + "\n\n" + running_log
|
||||
|
Loading…
x
Reference in New Issue
Block a user