diff --git a/Makefile b/Makefile
index 4c3a3216d..f30b15c1a 100644
--- a/Makefile
+++ b/Makefile
@@ -2,23 +2,26 @@
check_dirs := scripts src tests tests_v1
+RUN := $(shell command -v uv >/dev/null 2>&1 && echo "uv run" || echo "")
+BUILD := $(shell command -v uv >/dev/null 2>&1 && echo "uv build" || echo "python -m build")
+
build:
- uv build
+ $(BUILD)
commit:
- uv run pre-commit install
- uv run pre-commit run --all-files
+ $(RUN) pre-commit install
+ $(RUN) pre-commit run --all-files
license:
- uv run python tests/check_license.py $(check_dirs)
+ $(RUN) python3 tests/check_license.py $(check_dirs)
quality:
- uv run ruff check $(check_dirs)
- uv run ruff format --check $(check_dirs)
+ $(RUN) ruff check $(check_dirs)
+ $(RUN) ruff format --check $(check_dirs)
style:
- uv run ruff check $(check_dirs) --fix
- uv run ruff format $(check_dirs)
+ $(RUN) ruff check $(check_dirs) --fix
+ $(RUN) ruff format $(check_dirs)
test:
- WANDB_DISABLED=true uv run pytest -vv --import-mode=importlib tests/ tests_v1/
+ WANDB_DISABLED=true $(RUN) pytest -vv --import-mode=importlib tests/ tests_v1/
diff --git a/README.md b/README.md
index 919cb2061..d1f0f1176 100644
--- a/README.md
+++ b/README.md
@@ -538,13 +538,7 @@ Please refer to [build docker](#build-docker) to build the image yourself.
Create an isolated Python environment with [uv](https://github.com/astral-sh/uv):
```bash
-uv sync --extra torch --extra metrics --prerelease=allow
-```
-
-Run LLaMA-Factory in the isolated environment:
-
-```bash
-uv run --prerelease=allow llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml
+uv run llamafactory-cli webui
```
@@ -581,7 +575,7 @@ To enable FlashAttention-2 on the Windows platform, please use the script from [
For Ascend NPU users
-To install LLaMA Factory on Ascend NPU devices, please upgrade Python to version 3.10 or higher: `pip install -e "."`. Additionally, you need to install the **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**. Please follow the [installation tutorial](https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/softwareinstall/instg/atlasdeploy_03_0031.html) or use the following commands:
+To install LLaMA Factory on Ascend NPU devices, please upgrade Python to version 3.10 or higher: `pip install -e . torch-npu==2.7.1`. Additionally, you need to install the **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**. Please follow the [installation tutorial](https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/softwareinstall/instg/atlasdeploy_03_0031.html) or use the following commands:
```bash
# replace the url according to your CANN version and devices
@@ -600,8 +594,8 @@ source /usr/local/Ascend/ascend-toolkit/set_env.sh
| Requirement | Minimum | Recommend |
| ------------ | ------- | -------------- |
| CANN | 8.0.RC1 | 8.0.0.alpha002 |
-| torch | 2.1.0 | 2.4.0 |
-| torch-npu | 2.1.0 | 2.4.0.post2 |
+| torch | 2.1.0 | 2.7.1 |
+| torch-npu | 2.1.0 | 2.7.1 |
| deepspeed | 0.13.2 | 0.13.2 |
| vllm-ascend | - | 0.7.3 |
diff --git a/README_zh.md b/README_zh.md
index 7995c88ce..ca782885b 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -519,7 +519,9 @@ cd LLaMA-Factory
pip install -e ".[torch,metrics]" --no-build-isolation
```
-可选的额外依赖项:torch、torch-npu、metrics、deepspeed、liger-kernel、bitsandbytes、hqq、eetq、gptq、aqlm、vllm、sglang、galore、apollo、badam、adam-mini、qwen、minicpm_v、openmind、swanlab、dev
+可选的额外依赖项:`metrics`、`deepspeed`。使用 `pip install -e ".[metrics,deepspeed]"` 安装。
+
+其他可选依赖项请参考 `examples/requirements/` 目录下的文件。
#### 从镜像安装
@@ -538,13 +540,7 @@ docker run -it --rm --gpus=all --ipc=host hiyouga/llamafactory:latest
使用 [uv](https://github.com/astral-sh/uv) 创建隔离的 Python 环境:
```bash
-uv sync --extra torch --extra metrics --prerelease=allow
-```
-
-在环境中运行 LLaMA-Factory:
-
-```bash
-uv run --prerelease=allow llamafactory-cli train examples/train_lora/llama3_lora_pretrain.yaml
+uv run llamafactory-cli webui
```
@@ -581,7 +577,7 @@ pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/downl
昇腾 NPU 用户指南
-在昇腾 NPU 设备上安装 LLaMA Factory 时,请升级 Python 到 3.10 及以上,并需要指定额外依赖项,使用 `pip install -e ".[torch-npu,metrics]"` 命令安装。此外,还需要安装 **[Ascend CANN Toolkit 与 Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**,安装方法请参考[安装教程](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC2alpha002/quickstart/quickstart/quickstart_18_0004.html)或使用以下命令:
+在昇腾 NPU 设备上安装 LLaMA Factory 时,请升级 Python 到 3.10 及以上,并需要指定额外依赖项,使用 `pip install -e . torch-npu==2.7.1` 命令安装。此外,还需要安装 **[Ascend CANN Toolkit 与 Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**,安装方法请参考[安装教程](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC2alpha002/quickstart/quickstart/quickstart_18_0004.html)或使用以下命令:
```bash
# 请替换 URL 为 CANN 版本和设备型号对应的 URL
@@ -600,8 +596,8 @@ source /usr/local/Ascend/ascend-toolkit/set_env.sh
| 依赖项 | 至少 | 推荐 |
| ------------ | ------- | -------------- |
| CANN | 8.0.RC1 | 8.0.0.alpha002 |
-| torch | 2.1.0 | 2.4.0 |
-| torch-npu | 2.1.0 | 2.4.0.post2 |
+| torch | 2.1.0 | 2.7.1 |
+| torch-npu | 2.1.0 | 2.7.1 |
| deepspeed | 0.13.2 | 0.13.2 |
| vllm-ascend | - | 0.7.3 |
diff --git a/docker/docker-cuda/Dockerfile b/docker/docker-cuda/Dockerfile
index 94184e791..2315a85b7 100644
--- a/docker/docker-cuda/Dockerfile
+++ b/docker/docker-cuda/Dockerfile
@@ -32,7 +32,7 @@ RUN pip config set global.index-url "${PIP_INDEX}" && \
COPY . /app
# Install LLaMA Factory
-RUN pip install --no-cache-dir -e "." --no-build-isolation
+RUN pip install --no-cache-dir -e ".[metrics,deepspeed]" --no-build-isolation
# Rebuild flash attention
RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
diff --git a/docker/docker-cuda/Dockerfile.megatron b/docker/docker-cuda/Dockerfile.megatron
index ee6913192..7f457fefd 100644
--- a/docker/docker-cuda/Dockerfile.megatron
+++ b/docker/docker-cuda/Dockerfile.megatron
@@ -60,7 +60,7 @@ WORKDIR /app
COPY . /app
# Install LLaMA Factory
-RUN pip install --no-cache-dir -e "." --no-build-isolation
+RUN pip install --no-cache-dir -e ".[metrics]" --no-build-isolation
RUN pip install "git+https://github.com/alibaba/roll.git#subdirectory=mcore_adapter"
diff --git a/docker/docker-cuda/docker-compose.yml b/docker/docker-cuda/docker-compose.yml
index ab0da4d87..eb4250ce6 100644
--- a/docker/docker-cuda/docker-compose.yml
+++ b/docker/docker-cuda/docker-compose.yml
@@ -5,7 +5,6 @@ services:
context: ../..
args:
PIP_INDEX: https://pypi.org/simple
- EXTRAS: metrics
container_name: llamafactory
ports:
- "7860:7860"
diff --git a/docker/docker-npu/Dockerfile b/docker/docker-npu/Dockerfile
index 95da49715..ef4e2627c 100644
--- a/docker/docker-npu/Dockerfile
+++ b/docker/docker-npu/Dockerfile
@@ -37,7 +37,7 @@ RUN pip uninstall -y torch torchvision torchaudio && \
COPY . /app
# Install LLaMA Factory
-RUN pip install --no-cache-dir -e "." --no-build-isolation
+RUN pip install --no-cache-dir -e ".[metrics,deepspeed]" --no-build-isolation
# Set up volumes
# VOLUME [ "/root/.cache/huggingface", "/app/shared_data", "/app/output" ]
diff --git a/docker/docker-npu/docker-compose.yml b/docker/docker-npu/docker-compose.yml
index 8530efafd..621d7177e 100644
--- a/docker/docker-npu/docker-compose.yml
+++ b/docker/docker-npu/docker-compose.yml
@@ -5,7 +5,6 @@ services:
context: ../..
args:
PIP_INDEX: https://pypi.org/simple
- EXTRAS: torch-npu,metrics
container_name: llamafactory-a2
image: llamafactory:npu-a2
volumes:
diff --git a/docker/docker-rocm/Dockerfile b/docker/docker-rocm/Dockerfile
index c5af6aa27..93e44e535 100644
--- a/docker/docker-rocm/Dockerfile
+++ b/docker/docker-rocm/Dockerfile
@@ -37,7 +37,7 @@ RUN pip uninstall -y torch torchvision torchaudio && \
COPY . /app
# Install LLaMA Factory
-RUN pip install --no-cache-dir -e "." --no-build-isolation
+RUN pip install --no-cache-dir -e ".[metrics,deepspeed]" --no-build-isolation
# Rebuild flash attention
RUN if [ "${INSTALL_FLASHATTN}" == "true" ]; then \
diff --git a/docker/docker-rocm/docker-compose.yml b/docker/docker-rocm/docker-compose.yml
index 32cdf5633..7e6c83bfb 100644
--- a/docker/docker-rocm/docker-compose.yml
+++ b/docker/docker-rocm/docker-compose.yml
@@ -5,7 +5,6 @@ services:
context: ../..
args:
PIP_INDEX: https://pypi.org/simple
- EXTRAS: metrics
container_name: llamafactory
ports:
- "7860:7860"
diff --git a/examples/requirements/dev.txt b/examples/requirements/dev.txt
deleted file mode 100644
index f0fc17fcc..000000000
--- a/examples/requirements/dev.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-pre-commit
-ruff
-pytest
-build
diff --git a/pyproject.toml b/pyproject.toml
index ec8473eb1..6cdd1b289 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -38,50 +38,48 @@ classifiers = [
]
dependencies = [
# core deps
+ "torch>=2.4.0",
+ "torchvision>=0.19.0",
"transformers>=4.49.0,<=4.56.2,!=4.52.0; python_version < '3.10'",
"transformers>=4.49.0,<=4.57.1,!=4.52.0,!=4.57.0; python_version >= '3.10'",
"datasets>=2.16.0,<=4.0.0",
"accelerate>=1.3.0,<=1.11.0",
"peft>=0.14.0,<=0.17.1",
"trl>=0.8.6,<=0.9.6",
- "torchdata",
- # torch
- "torch>=2.0.0",
- "torchvision>=0.15.0",
+ "torchdata>=0.10.0,<=0.11.0",
# gui
- "gradio>=4.38.0,<=5.45.0",
+ "gradio>=4.38.0,<=6.2.0",
"matplotlib>=3.7.0",
"tyro<0.9.0",
# ops
"einops",
- "numpy<2.0.0",
- "pandas>=2.0.0",
+ "numpy",
+ "pandas",
"scipy",
# model and tokenizer
"sentencepiece",
"tiktoken",
- "modelscope>=1.14.0",
+ "modelscope",
"hf-transfer",
- "safetensors<=0.5.3",
+ "safetensors",
# python
"fire",
"omegaconf",
"packaging",
"protobuf",
"pyyaml",
- "pydantic<=2.10.6",
+ "pydantic",
# api
"uvicorn",
"fastapi",
"sse-starlette",
# media
"av",
- "librosa",
- # yanked
- "propcache!=0.4.0"
+ "librosa"
]
[project.optional-dependencies]
+dev = ["pre-commit", "ruff", "pytest", "build"]
metrics = ["nltk", "jieba", "rouge-chinese"]
deepspeed = ["deepspeed>=0.10.0,<=0.16.9"]
diff --git a/src/llamafactory/data/mm_plugin.py b/src/llamafactory/data/mm_plugin.py
index 291554021..44df263ab 100644
--- a/src/llamafactory/data/mm_plugin.py
+++ b/src/llamafactory/data/mm_plugin.py
@@ -500,13 +500,17 @@ class ErnieVLPlugin(BasePlugin):
while IMAGE_PLACEHOLDER in content:
image_seqlen = image_grid_thw[image_idx].prod() // merge_length if self.expand_mm_tokens else 1
content = content.replace(
- IMAGE_PLACEHOLDER, f"Picture {image_idx + 1}:<|IMAGE_START|>{image_token * image_seqlen}<|IMAGE_END|>", 1
+ IMAGE_PLACEHOLDER,
+ f"Picture {image_idx + 1}:<|IMAGE_START|>{image_token * image_seqlen}<|IMAGE_END|>",
+ 1,
)
image_idx += 1
while VIDEO_PLACEHOLDER in content:
video_seqlen = video_grid_thw[video_idx].prod() // merge_length if self.expand_mm_tokens else 1
content = content.replace(
- VIDEO_PLACEHOLDER, f"Video {video_idx + 1}:<|VIDEO_START|>{video_token * video_seqlen}<|VIDEO_END|>", 1
+ VIDEO_PLACEHOLDER,
+ f"Video {video_idx + 1}:<|VIDEO_START|>{video_token * video_seqlen}<|VIDEO_END|>",
+ 1,
)
video_idx += 1
message["content"] = content
diff --git a/src/llamafactory/extras/misc.py b/src/llamafactory/extras/misc.py
index 5c4c24787..ba35bafb2 100644
--- a/src/llamafactory/extras/misc.py
+++ b/src/llamafactory/extras/misc.py
@@ -332,3 +332,7 @@ def fix_proxy(ipv6_enabled: bool = False) -> None:
if ipv6_enabled:
os.environ.pop("http_proxy", None)
os.environ.pop("HTTP_PROXY", None)
+ os.environ.pop("https_proxy", None)
+ os.environ.pop("HTTPS_PROXY", None)
+ os.environ.pop("all_proxy", None)
+ os.environ.pop("ALL_PROXY", None)
diff --git a/src/llamafactory/model/loader.py b/src/llamafactory/model/loader.py
index 72f510a44..ef4f3f134 100644
--- a/src/llamafactory/model/loader.py
+++ b/src/llamafactory/model/loader.py
@@ -15,7 +15,6 @@
import os
from typing import TYPE_CHECKING, Any, Optional, TypedDict
-import torch
from transformers import (
AutoConfig,
AutoModelForCausalLM,
@@ -158,6 +157,7 @@ def load_model(
if model is None and not lazy_load:
init_kwargs["config"] = config
init_kwargs["pretrained_model_name_or_path"] = model_args.model_name_or_path
+ init_kwargs["torch_dtype"] = "auto"
if model_args.mixture_of_depths == "load":
model = load_mod_pretrained_model(**init_kwargs)
diff --git a/src/llamafactory/model/patcher.py b/src/llamafactory/model/patcher.py
index 7401641aa..67eee886b 100644
--- a/src/llamafactory/model/patcher.py
+++ b/src/llamafactory/model/patcher.py
@@ -156,16 +156,13 @@ def patch_config(
# deepspeed zero3 is not compatible with low_cpu_mem_usage
init_kwargs["low_cpu_mem_usage"] = model_args.low_cpu_mem_usage and (not is_deepspeed_zero3_enabled())
- # do not cast data type of the model deepspeed zero3 without qlora
- if not (is_deepspeed_zero3_enabled() and model_args.quantization_bit is None):
- init_kwargs["torch_dtype"] = "auto"
+ # fsdp/deepspeed zero3 does not need device map
+ if not (is_deepspeed_zero3_enabled() or is_fsdp_enabled()) and init_kwargs["low_cpu_mem_usage"]:
+ if "device_map" not in init_kwargs and model_args.device_map:
+ init_kwargs["device_map"] = model_args.device_map # device map requires low_cpu_mem_usage=True
- if init_kwargs["low_cpu_mem_usage"] and not is_fsdp_enabled(): # fsdp does not need device map
- if "device_map" not in init_kwargs and model_args.device_map:
- init_kwargs["device_map"] = model_args.device_map # device map requires low_cpu_mem_usage=True
-
- if init_kwargs.get("device_map", None) == "auto":
- init_kwargs["offload_folder"] = model_args.offload_folder
+ if init_kwargs.get("device_map", None) == "auto":
+ init_kwargs["offload_folder"] = model_args.offload_folder
def patch_model(
diff --git a/src/llamafactory/train/test_utils.py b/src/llamafactory/train/test_utils.py
index 0f73d1c5e..f31b3d2fc 100644
--- a/src/llamafactory/train/test_utils.py
+++ b/src/llamafactory/train/test_utils.py
@@ -84,7 +84,7 @@ def load_reference_model(
model: AutoModelForCausalLMWithValueHead = AutoModelForCausalLMWithValueHead.from_pretrained(
model_path, torch_dtype=torch.float16, device_map="auto"
)
-
+
return model
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, device_map="auto")
diff --git a/src/llamafactory/webui/locales.py b/src/llamafactory/webui/locales.py
index 7051b30e8..06e64e4a7 100644
--- a/src/llamafactory/webui/locales.py
+++ b/src/llamafactory/webui/locales.py
@@ -35,35 +35,40 @@ LOCALES = {
"value": (
""
+ "Documentation "
+ "Blog"
),
},
"ru": {
"value": (
""
+ "Документацию "
+ "Блог"
),
},
"zh": {
"value": (
""
+ "官方文档 "
+ "博客"
),
},
"ko": {
"value": (
""
+ "공식 문서 "
+ "블로그를 방문하세요."
),
},
"ja": {
"value": (
""
+ "ドキュメント "
+ "ブログにアクセスする"
),
},
},